file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
QTofflineTemplate.py | from pyqtgraph.Qt import QtGui, QtCore
from sklearn import preprocessing
import numpy as np
import pyqtgraph as pg
import random
import pickle
import collections
from os import listdir
from os.path import isfile, join
import fastdtw
from scipy.spatial.distance import euclidean
import scipy.stats
from scipy import signal
import DTW
import time
import matplotlib.pyplot as plt
import pickle
from scipy import signal
from numpy.fft import fft, ifft, fft2, ifft2, fftshift
from scipy import stats
import re
DirSetting ='./Jay/' #base gesture Dit
ComplexDirSetting ='./Jay/Complex/' #complex gesture Dir
Name = 'Jay' #the prefix of filename
'''
MyRealTimePlot: create UI and construct the DTW Model
setMyData : a real-time ploter
DrawPic : draw the specificed file data
compare : compare two specificed file data by DTW and return the Distance
JudgeAll : Recognize all the file include base gesture and complex gesture.
This function is for getting accuracy for test set.
*** If you need GUI only, you do not need this function ***
'''
def absDist(A,B):
return np.sum(np.abs(A-B))
class MyRealTimePlot():
def __init__(self,dataSource = None,nameList=['Plot1','Plot2','Plot3','Plot4','Plot5','Plot6']):
'''
construct GUI
'''
self.numOfDataToPlot = 500 #nuber of point of x
self.ScalerNum = 2 # every self.ScalerNum we sample once - not used
self.numofPlotWidget=3
self.plotNum = 3 # how many line to plot in a plot widget
self.plotWidgetList = []
self.penStyleList= [[(0,0,200),(200,200,100),(195,46,212)],[(237,177,32),(126,47,142),(43,128,200)],[(0,0,200),(200,200,100),(195,46,212)]]
self.index=0
self.dataListLen = []
self.ROI1 = None # region of interest
self.ROI2 = None
self.dataTotolLen = 0
self.curveList = []
self.curveList2 = []
self.curveXData =[i for i in range(0,self.numOfDataToPlot) ] #initial x value
self.curveYDataList=[]
self.curveYDataList2=[]
self.app = QtGui.QApplication([])
self.mainWindow = QtGui.QMainWindow()
self.mainWindow.setWindowTitle('pyqtgraph example: PlotWidget')
self.mainWindow.resize(720,640)
self.GuiWiget = QtGui.QWidget()
self.mainWindow.setCentralWidget(self.GuiWiget)
layout = QtGui.QVBoxLayout()
secondLayout = QtGui.QHBoxLayout()
thirdLayout = QtGui.QHBoxLayout()
self.GuiWiget.setLayout(layout)
layout.addLayout(secondLayout)
layout.addLayout(thirdLayout)
pg.setConfigOption('background', 'w')
# create plot widgets by pg.PlotWidget(name=name) and we can draw multiple curve lines on it
for i,name in zip(range(0,self.numofPlotWidget),nameList):
plotWidget = pg.PlotWidget(name=name)
# set X range
plotWidget.setXRange(0, self.numOfDataToPlot)
# set Y range
if i == 0 :
plotWidget.setYRange(-2, 2)
elif i == 1:
plotWidget.setYRange(-180, 180)
else:
plotWidget.setYRange(-2, 2)
layout.addWidget(plotWidget)
self.plotWidgetList.append(plotWidget)
self.startLabel= QtGui.QLabel("Start:")
self.startWindows = QtGui.QLineEdit()
self.endLabel= QtGui.QLabel("End:")
self.endWindows = QtGui.QLineEdit()
self.button = QtGui.QPushButton('Split')
self.button.clicked.connect(self.DrawPic)
self.fileName= QtGui.QLabel("fileName:")
self.fileInputName = QtGui.QComboBox()
#self.fileInputName.setText("UpStraight0.dat")
self.Readbutton = QtGui.QPushButton('Read')
self.Readbutton.clicked.connect(self.ReadFile)
secondLayout.addWidget(self.startLabel)
secondLayout.addWidget(self.startWindows)
secondLayout.addWidget(self.endLabel)
secondLayout.addWidget(self.endWindows)
secondLayout.addWidget(self.button)
secondLayout.addWidget(self.fileName)
secondLayout.addWidget(self.fileInputName)
secondLayout.addWidget(self.Readbutton)
self.Aobj= QtGui.QLabel("A:")
self.comboA = QtGui.QComboBox()
self.AstartLabel= QtGui.QLabel("Start:")
self.AstartWindows = QtGui.QLineEdit()
self.AendLabel= QtGui.QLabel("End:")
self.AendWindows = QtGui.QLineEdit()
self.Comparebutton = QtGui.QPushButton('Compare')
#register a callback funtion - when button is pressed, we execute it
self.Comparebutton.clicked.connect(self.compare)
thirdLayout.addWidget(self.AstartLabel)
thirdLayout.addWidget(self.AstartWindows)
thirdLayout.addWidget(self.AendLabel)
thirdLayout.addWidget(self.AendWindows)
thirdLayout.addWidget(self.Aobj)
thirdLayout.addWidget(self.comboA)
thirdLayout.addWidget(self.Comparebutton)
# read file from Directory
self.readDir()
# Display the whole GUI architecture
self.mainWindow.show()
#Create plot instance by plotWidget.plot() and initial the Y value
for plotWidget,penStyle in zip(self.plotWidgetList,self.penStyleList):
for i in range(0,self.plotNum):
curve = plotWidget.plot()
curve.setPen(penStyle[i])
curveYData =[np.NAN for i in range(0,self.numOfDataToPlot) ] #initial y value
self.curveList.append(curve)
self.curveYDataList.append(curveYData)
for i in range(0,self.plotNum):
curve = self.plotWidgetList[2].plot()
curve.setPen(penStyle[i])
curveYData =[np.NAN for i in range(0,self.numOfDataToPlot) ]
self.curveList2.append(curve)
self.curveYDataList2.append(curveYData)
self.SettingModel()
print "init ok"
self.writeout = True
self.logfp = open('log.txt', "w")
self.timeLogfp = open('Timelog.txt', "w")
def SettingModel(self):
'''load model here'''
pass
def close(self):
self.app.closeAllWindows()
self.app.quit()
def ResetGraph(self):
for i in range(0, len(self.curveYDataList) ):
self.curveYDataList[i] =[np.NAN for j in range(0,self.numOfDataToPlot) ]
for i in range(0, len(self.curveYDataList2) ):
self.curveYDataList2[i] =[np.NAN for j in range(0,self.numOfDataToPlot) ]
self.dataListLen = []
self.dataTotolLen = 0
try:
self.plotWidgetList[0].removeItem(self.ROI1)
self.plotWidgetList[1].removeItem(self.ROI2)
except:
pass
self.ROI1 = None
self.ROI2 = None
def RegionWindows(self,upBbound):
axes = ['X','Y','Z']
Dirs = ['P2N','N2P']
ret = {}
ret['X'] ={}
ret['Y'] ={}
ret['Z'] ={}
ret['X']['N2P'] = []
ret['X']['P2N'] = []
ret['Y']['N2P'] = []
ret['Y']['P2N'] = []
ret['Z']['N2P'] = []
ret['Z']['P2N'] = []
for axis in axes:
for Dir in Dirs:
for boundry in self.windowsCrossDataIndex[axis][Dir]:
if boundry[1] > upBbound:
break
else:
ret[axis][Dir].append(boundry)
return ret
def GetInfo(self,ret):
axes = ['X','Y','Z']
Dirs = ['P2N','N2P']
pos = {}
pos['X'] =[0,1]
pos['Y'] =[1,2]
pos['Z'] =[2,3]
for axis in axes:
|
def DrawPic(self):
self.ResetGraph()
startWindowsIdx = int(self.startWindows.text())
endWindowsIdx = int(self.endWindows.text())
startIDX = self.workingIdx[startWindowsIdx][0]
endIDX = self.workingIdx[endWindowsIdx][1]
#start:stop:step
ret = self.RegionWindows(endIDX)
print ret,endIDX
dataList = np.concatenate((self.Acc[startIDX:endIDX,:],self.Angle[startIDX:endIDX,:]),axis=1)
# print "scipy.stats.skewtest:",scipy.stats.skewtest(self.Acc[startIDX:endIDX,:],axis=0)
# print "mean:",np.mean( self.Acc[self.workingIdx[endWindowsIdx][0]:endIDX,:] ,axis=0)
# print "start angle:",self.Angle[startIDX,:],"Last angle:",self.Angle[endIDX-1,:]
# print "local Min X:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,0:1] ,axis=0)[0],"local Min Y:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,1:2] ,axis=0)[0],"local Min Z:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,2:3] ,axis=0)[0]
# print "local Max X:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,0:1] ,axis=0)[0],"local Max Y:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,1:2] ,axis=0)[0],"local Max Z:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,2:3] ,axis=0)[0]
dataList = dataList[::self.ScalerNum,:]
self.GetInfo(ret)
dataList = dataList.transpose()
self.ROI1 = pg.LinearRegionItem([startIDX,endIDX])
self.ROI2 = pg.LinearRegionItem([startIDX,endIDX])
self.plotWidgetList[0].addItem(self.ROI1)
self.plotWidgetList[1].addItem(self.ROI2)
# print endIDX-startIDX,dataList.shape
for data,curve,yData,i in zip (dataList,self.curveList2,self.curveYDataList2 ,range(0,7)):
# print len(yData)
yData[0:dataList.shape[1]] = dataList[i,:].tolist()[0]
# print len(dataList[i,:].tolist())
curve.setData(y=yData, x=self.curveXData)
self.app.processEvents()
def diffAngle(self,data):
ret = np.mat([0.0,0.0,0.0])
for i,j in zip(range(0,data.shape[0]-1), range(1,data.shape[0]) ):
ret =np.concatenate ( (ret,data[j,:]-data[i,:]),axis=0 )
return ret
def ReadFile(self):
self.ResetGraph()
self.filename = str(self.fileInputName.currentText())
self.FileJudge(self.filename,paint=True)
def FileJudge(self,filename,paint=False,writeout=False):
print filename,DirSetting,ComplexDirSetting
if writeout==True:
self.logfp.write(filename+ ' '+DirSetting+'\n')
try:
fp = open(ComplexDirSetting+filename, "rb")
except:
fp = open(DirSetting+filename, "rb")
tempDict = pickle.load(fp)
# print tempDict
self.filteredIdx = tempDict['filteredIdx']
self.Acc = tempDict['Acc']
self.Gyo = tempDict['Gyo']
self.Mag = tempDict['Mag']
self.Angle = tempDict['Angle']
# self.Angle = self.Angle - np.mean(self.Angle,axis=1)
self.windowsCrossDataIndex = tempDict['windowsCrossDataIndex']
self.AccRawState = tempDict['AccRawState']
self.GyoRawState = tempDict['GyoRawStata']
self.workingIdx = tempDict['workingIdx']
self.MayBeValid = []
self.VarDataIdx = tempDict['VarDataIdx']
self.seq = tempDict['seq']
self.timestamp = tempDict['timestamp']
offset = self.workingIdx[0][0]
for i in range (0,len(self.workingIdx)):
self.workingIdx[i][0] = self.workingIdx[i][0] - offset
self.workingIdx[i][1] = self.workingIdx[i][1] - offset
for axis in ['X','Y','Z']:
for Dir in ['N2P','P2N']:
for i in range(0,len(self.windowsCrossDataIndex[axis][Dir])):
self.windowsCrossDataIndex[axis][Dir][i][0] = self.windowsCrossDataIndex[axis][Dir][i][0] - offset
self.windowsCrossDataIndex[axis][Dir][i][1] = self.windowsCrossDataIndex[axis][Dir][i][1] - offset
# self.windowsCrossDataIndex = self.getCrossingWindowsIDX(self.Acc)
else:
self.logfp.write("workingIdx:"+str(len(self.workingIdx))+'\n')
startIDX = self.workingIdx[0][0]
endIDX = self.workingIdx[-1][1]
dataList = np.concatenate((self.Acc,self.Angle),axis=1)
dataList = dataList.transpose()
if paint == True:
# Plot Data
for data,curve,yData,i in zip (dataList,self.curveList,self.curveYDataList ,range(0,7)):
# print len(yData)
yData[0:endIDX-startIDX] = dataList[i,:].tolist()[0]
# print len(dataList[i,:].tolist())
curve.setData(y=yData, x=self.curveXData)
self.app.processEvents()
# classify or somewhat you can implement in Judge function
self.Judge()
def readDir(self):
''' read Dir and catch all file contained the keywords which defined in complexG & keywords'''
global DirSetting,ComplexDirSetting,Name
self.fileList = []
complexG =['ForwardBackward','BackwardForward','DownUp','UpDown','RightLeft','LeftRight','LeftForward','RightForward','UpRight','UpLeft','RightUpForward','ForwardRight','V','VII','LeftbackForward','RightbackForward','ForwardLeft','LeftLeftForward','RightRightForward','ForwardUp','DownBackward','DownLeft','DownRight','ForwardBackwardForward','LeftRightUp','BackwardRightLeftforward','DownLeftRightforward','RightUpForward']
keywords = ['GoStraight','BackStraight','DownStraight','UpStraight','LeftStraight','RightStraight','RightUpStraight','LeftGoStraight','RightGoStraight','LeftBackStraight','RightBackStraight']
for i in range (0,len(complexG)):
complexG[i] = Name + complexG[i]
for i in range (0,len(keywords)):
keywords[i] = Name + keywords[i]
for keyword in complexG:
for fileName in listdir(ComplexDirSetting):
# if keyword == 'Circle':s
# print fileName,fileName[0:len(keyword)]
if keyword in fileName[0:len(keyword)]:
if fileName not in self.fileList:
self.fileList.append(fileName)
for keyword in keywords:
for fileName in listdir(DirSetting):
# if keyword == 'Circle':s
# print fileName,fileName[0:len(keyword)]
if keyword in fileName[0:len(keyword)]:
if fileName not in self.fileList:
self.fileList.append(fileName)
self.fileInputName.addItems(self.fileList)
self.comboA.addItems(self.fileList)
def Judge(self,writeout=False):
pass
# print "\033[1;31m",data[3],"len:",data[2] - data[1] + 1 ,data[2],data[1],"\033[1;m"
# print "Dis:",disOrder
# print "Angle:",angleOrder
def compare(self):
startWindowsIdx = int(self.startWindows.text())
endWindowsIdx = int(self.endWindows.text())
startIDX = self.workingIdx[startWindowsIdx][0]
endIDX = self.workingIdx[endWindowsIdx][1]
AstartWindowsIdx = None
AendWindowsIdx = None
AstartIDX = None
AendIDX = None
try:
AstartWindowsIdx = int(self.AstartWindows.text())
AendWindowsIdx = int(self.AendWindows.text())
except:
pass
filename = str(self.comboA.currentText())
try:
fp = open(DirSetting+filename, "rb")
except:
fp = open(ComplexDirSetting+filename, "rb")
tempDict = pickle.load(fp)
Acc = tempDict['Acc']
Gyo = tempDict['Gyo']
Mag = tempDict['Mag']
Angle = tempDict['Angle']
windowsCrossDataIndex = tempDict['windowsCrossDataIndex']
AccRawState = tempDict['AccRawState']
GyoRawState = tempDict['GyoRawStata']
workingIdx = tempDict['workingIdx']
VarDataIdx = tempDict['VarDataIdx']
seq = tempDict['seq']
timestamp = tempDict['timestamp']
AstartIDX = workingIdx[AstartWindowsIdx][0]
AendIDX = workingIdx[AendWindowsIdx][1]
print startWindowsIdx,endWindowsIdx,AstartWindowsIdx,AendWindowsIdx
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
#QtGui.QApplication.instance().exec_()
A = MyRealTimePlot()
# A.update()
A.app.instance().exec_()
# QtGui.QApplication.instance().exec_()
# def cross_correlation_using_fft(x, y):
# f1 = fft(x)
# f2 = fft(np.flipud(y))
# cc = np.real(ifft(f1 * f2))
# return fftshift(cc)
# # print self.windowsCrossDataIndex
# # for gesture in FinalS:
# # print "\033[1;31m",gesture,"\033[1;m"
# def compute_shift(x, y):
# assert len(x) == len(y)
# c = cross_correlation_using_fft(x, y)
# assert len(c) == len(x)
# zero_index = int(len(x) / 2) - 1
# shift = zero_index - np.argmax(c)
# return shift | for Dir in Dirs:
# print axis,Dir,"------------------------"
for idx in ret[axis][Dir]:
if idx[1] - idx[0] < 40:
# print idx[0],idx[1],"not enough"
idx.append("not enough")
else:
# print idx[0],idx[1],np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0))
if np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0)) < 0.15:
idx.append(None)
# print np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0))
else:
idx.append(np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0)) ) | conditional_block |
QTofflineTemplate.py | from pyqtgraph.Qt import QtGui, QtCore
from sklearn import preprocessing
import numpy as np
import pyqtgraph as pg
import random
import pickle
import collections
from os import listdir
from os.path import isfile, join
import fastdtw
from scipy.spatial.distance import euclidean
import scipy.stats
from scipy import signal
import DTW
import time
import matplotlib.pyplot as plt
import pickle
from scipy import signal
from numpy.fft import fft, ifft, fft2, ifft2, fftshift
from scipy import stats
import re
DirSetting ='./Jay/' #base gesture Dit
ComplexDirSetting ='./Jay/Complex/' #complex gesture Dir
Name = 'Jay' #the prefix of filename
'''
MyRealTimePlot: create UI and construct the DTW Model
setMyData : a real-time ploter
DrawPic : draw the specificed file data
compare : compare two specificed file data by DTW and return the Distance
JudgeAll : Recognize all the file include base gesture and complex gesture.
This function is for getting accuracy for test set.
*** If you need GUI only, you do not need this function ***
'''
def absDist(A,B):
return np.sum(np.abs(A-B))
class MyRealTimePlot():
def __init__(self,dataSource = None,nameList=['Plot1','Plot2','Plot3','Plot4','Plot5','Plot6']):
|
def SettingModel(self):
'''load model here'''
pass
def close(self):
self.app.closeAllWindows()
self.app.quit()
def ResetGraph(self):
for i in range(0, len(self.curveYDataList) ):
self.curveYDataList[i] =[np.NAN for j in range(0,self.numOfDataToPlot) ]
for i in range(0, len(self.curveYDataList2) ):
self.curveYDataList2[i] =[np.NAN for j in range(0,self.numOfDataToPlot) ]
self.dataListLen = []
self.dataTotolLen = 0
try:
self.plotWidgetList[0].removeItem(self.ROI1)
self.plotWidgetList[1].removeItem(self.ROI2)
except:
pass
self.ROI1 = None
self.ROI2 = None
def RegionWindows(self,upBbound):
axes = ['X','Y','Z']
Dirs = ['P2N','N2P']
ret = {}
ret['X'] ={}
ret['Y'] ={}
ret['Z'] ={}
ret['X']['N2P'] = []
ret['X']['P2N'] = []
ret['Y']['N2P'] = []
ret['Y']['P2N'] = []
ret['Z']['N2P'] = []
ret['Z']['P2N'] = []
for axis in axes:
for Dir in Dirs:
for boundry in self.windowsCrossDataIndex[axis][Dir]:
if boundry[1] > upBbound:
break
else:
ret[axis][Dir].append(boundry)
return ret
def GetInfo(self,ret):
axes = ['X','Y','Z']
Dirs = ['P2N','N2P']
pos = {}
pos['X'] =[0,1]
pos['Y'] =[1,2]
pos['Z'] =[2,3]
for axis in axes:
for Dir in Dirs:
# print axis,Dir,"------------------------"
for idx in ret[axis][Dir]:
if idx[1] - idx[0] < 40:
# print idx[0],idx[1],"not enough"
idx.append("not enough")
else:
# print idx[0],idx[1],np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0))
if np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0)) < 0.15:
idx.append(None)
# print np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0))
else:
idx.append(np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0)) )
def DrawPic(self):
self.ResetGraph()
startWindowsIdx = int(self.startWindows.text())
endWindowsIdx = int(self.endWindows.text())
startIDX = self.workingIdx[startWindowsIdx][0]
endIDX = self.workingIdx[endWindowsIdx][1]
#start:stop:step
ret = self.RegionWindows(endIDX)
print ret,endIDX
dataList = np.concatenate((self.Acc[startIDX:endIDX,:],self.Angle[startIDX:endIDX,:]),axis=1)
# print "scipy.stats.skewtest:",scipy.stats.skewtest(self.Acc[startIDX:endIDX,:],axis=0)
# print "mean:",np.mean( self.Acc[self.workingIdx[endWindowsIdx][0]:endIDX,:] ,axis=0)
# print "start angle:",self.Angle[startIDX,:],"Last angle:",self.Angle[endIDX-1,:]
# print "local Min X:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,0:1] ,axis=0)[0],"local Min Y:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,1:2] ,axis=0)[0],"local Min Z:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,2:3] ,axis=0)[0]
# print "local Max X:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,0:1] ,axis=0)[0],"local Max Y:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,1:2] ,axis=0)[0],"local Max Z:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,2:3] ,axis=0)[0]
dataList = dataList[::self.ScalerNum,:]
self.GetInfo(ret)
dataList = dataList.transpose()
self.ROI1 = pg.LinearRegionItem([startIDX,endIDX])
self.ROI2 = pg.LinearRegionItem([startIDX,endIDX])
self.plotWidgetList[0].addItem(self.ROI1)
self.plotWidgetList[1].addItem(self.ROI2)
# print endIDX-startIDX,dataList.shape
for data,curve,yData,i in zip (dataList,self.curveList2,self.curveYDataList2 ,range(0,7)):
# print len(yData)
yData[0:dataList.shape[1]] = dataList[i,:].tolist()[0]
# print len(dataList[i,:].tolist())
curve.setData(y=yData, x=self.curveXData)
self.app.processEvents()
def diffAngle(self,data):
ret = np.mat([0.0,0.0,0.0])
for i,j in zip(range(0,data.shape[0]-1), range(1,data.shape[0]) ):
ret =np.concatenate ( (ret,data[j,:]-data[i,:]),axis=0 )
return ret
def ReadFile(self):
self.ResetGraph()
self.filename = str(self.fileInputName.currentText())
self.FileJudge(self.filename,paint=True)
def FileJudge(self,filename,paint=False,writeout=False):
print filename,DirSetting,ComplexDirSetting
if writeout==True:
self.logfp.write(filename+ ' '+DirSetting+'\n')
try:
fp = open(ComplexDirSetting+filename, "rb")
except:
fp = open(DirSetting+filename, "rb")
tempDict = pickle.load(fp)
# print tempDict
self.filteredIdx = tempDict['filteredIdx']
self.Acc = tempDict['Acc']
self.Gyo = tempDict['Gyo']
self.Mag = tempDict['Mag']
self.Angle = tempDict['Angle']
# self.Angle = self.Angle - np.mean(self.Angle,axis=1)
self.windowsCrossDataIndex = tempDict['windowsCrossDataIndex']
self.AccRawState = tempDict['AccRawState']
self.GyoRawState = tempDict['GyoRawStata']
self.workingIdx = tempDict['workingIdx']
self.MayBeValid = []
self.VarDataIdx = tempDict['VarDataIdx']
self.seq = tempDict['seq']
self.timestamp = tempDict['timestamp']
offset = self.workingIdx[0][0]
for i in range (0,len(self.workingIdx)):
self.workingIdx[i][0] = self.workingIdx[i][0] - offset
self.workingIdx[i][1] = self.workingIdx[i][1] - offset
for axis in ['X','Y','Z']:
for Dir in ['N2P','P2N']:
for i in range(0,len(self.windowsCrossDataIndex[axis][Dir])):
self.windowsCrossDataIndex[axis][Dir][i][0] = self.windowsCrossDataIndex[axis][Dir][i][0] - offset
self.windowsCrossDataIndex[axis][Dir][i][1] = self.windowsCrossDataIndex[axis][Dir][i][1] - offset
# self.windowsCrossDataIndex = self.getCrossingWindowsIDX(self.Acc)
else:
self.logfp.write("workingIdx:"+str(len(self.workingIdx))+'\n')
startIDX = self.workingIdx[0][0]
endIDX = self.workingIdx[-1][1]
dataList = np.concatenate((self.Acc,self.Angle),axis=1)
dataList = dataList.transpose()
if paint == True:
# Plot Data
for data,curve,yData,i in zip (dataList,self.curveList,self.curveYDataList ,range(0,7)):
# print len(yData)
yData[0:endIDX-startIDX] = dataList[i,:].tolist()[0]
# print len(dataList[i,:].tolist())
curve.setData(y=yData, x=self.curveXData)
self.app.processEvents()
# classify or somewhat you can implement in Judge function
self.Judge()
def readDir(self):
''' read Dir and catch all file contained the keywords which defined in complexG & keywords'''
global DirSetting,ComplexDirSetting,Name
self.fileList = []
complexG =['ForwardBackward','BackwardForward','DownUp','UpDown','RightLeft','LeftRight','LeftForward','RightForward','UpRight','UpLeft','RightUpForward','ForwardRight','V','VII','LeftbackForward','RightbackForward','ForwardLeft','LeftLeftForward','RightRightForward','ForwardUp','DownBackward','DownLeft','DownRight','ForwardBackwardForward','LeftRightUp','BackwardRightLeftforward','DownLeftRightforward','RightUpForward']
keywords = ['GoStraight','BackStraight','DownStraight','UpStraight','LeftStraight','RightStraight','RightUpStraight','LeftGoStraight','RightGoStraight','LeftBackStraight','RightBackStraight']
for i in range (0,len(complexG)):
complexG[i] = Name + complexG[i]
for i in range (0,len(keywords)):
keywords[i] = Name + keywords[i]
for keyword in complexG:
for fileName in listdir(ComplexDirSetting):
# if keyword == 'Circle':s
# print fileName,fileName[0:len(keyword)]
if keyword in fileName[0:len(keyword)]:
if fileName not in self.fileList:
self.fileList.append(fileName)
for keyword in keywords:
for fileName in listdir(DirSetting):
# if keyword == 'Circle':s
# print fileName,fileName[0:len(keyword)]
if keyword in fileName[0:len(keyword)]:
if fileName not in self.fileList:
self.fileList.append(fileName)
self.fileInputName.addItems(self.fileList)
self.comboA.addItems(self.fileList)
def Judge(self,writeout=False):
pass
# print "\033[1;31m",data[3],"len:",data[2] - data[1] + 1 ,data[2],data[1],"\033[1;m"
# print "Dis:",disOrder
# print "Angle:",angleOrder
def compare(self):
startWindowsIdx = int(self.startWindows.text())
endWindowsIdx = int(self.endWindows.text())
startIDX = self.workingIdx[startWindowsIdx][0]
endIDX = self.workingIdx[endWindowsIdx][1]
AstartWindowsIdx = None
AendWindowsIdx = None
AstartIDX = None
AendIDX = None
try:
AstartWindowsIdx = int(self.AstartWindows.text())
AendWindowsIdx = int(self.AendWindows.text())
except:
pass
filename = str(self.comboA.currentText())
try:
fp = open(DirSetting+filename, "rb")
except:
fp = open(ComplexDirSetting+filename, "rb")
tempDict = pickle.load(fp)
Acc = tempDict['Acc']
Gyo = tempDict['Gyo']
Mag = tempDict['Mag']
Angle = tempDict['Angle']
windowsCrossDataIndex = tempDict['windowsCrossDataIndex']
AccRawState = tempDict['AccRawState']
GyoRawState = tempDict['GyoRawStata']
workingIdx = tempDict['workingIdx']
VarDataIdx = tempDict['VarDataIdx']
seq = tempDict['seq']
timestamp = tempDict['timestamp']
AstartIDX = workingIdx[AstartWindowsIdx][0]
AendIDX = workingIdx[AendWindowsIdx][1]
print startWindowsIdx,endWindowsIdx,AstartWindowsIdx,AendWindowsIdx
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
#QtGui.QApplication.instance().exec_()
A = MyRealTimePlot()
# A.update()
A.app.instance().exec_()
# QtGui.QApplication.instance().exec_()
# def cross_correlation_using_fft(x, y):
# f1 = fft(x)
# f2 = fft(np.flipud(y))
# cc = np.real(ifft(f1 * f2))
# return fftshift(cc)
# # print self.windowsCrossDataIndex
# # for gesture in FinalS:
# # print "\033[1;31m",gesture,"\033[1;m"
# def compute_shift(x, y):
# assert len(x) == len(y)
# c = cross_correlation_using_fft(x, y)
# assert len(c) == len(x)
# zero_index = int(len(x) / 2) - 1
# shift = zero_index - np.argmax(c)
# return shift | '''
construct GUI
'''
self.numOfDataToPlot = 500 #nuber of point of x
self.ScalerNum = 2 # every self.ScalerNum we sample once - not used
self.numofPlotWidget=3
self.plotNum = 3 # how many line to plot in a plot widget
self.plotWidgetList = []
self.penStyleList= [[(0,0,200),(200,200,100),(195,46,212)],[(237,177,32),(126,47,142),(43,128,200)],[(0,0,200),(200,200,100),(195,46,212)]]
self.index=0
self.dataListLen = []
self.ROI1 = None # region of interest
self.ROI2 = None
self.dataTotolLen = 0
self.curveList = []
self.curveList2 = []
self.curveXData =[i for i in range(0,self.numOfDataToPlot) ] #initial x value
self.curveYDataList=[]
self.curveYDataList2=[]
self.app = QtGui.QApplication([])
self.mainWindow = QtGui.QMainWindow()
self.mainWindow.setWindowTitle('pyqtgraph example: PlotWidget')
self.mainWindow.resize(720,640)
self.GuiWiget = QtGui.QWidget()
self.mainWindow.setCentralWidget(self.GuiWiget)
layout = QtGui.QVBoxLayout()
secondLayout = QtGui.QHBoxLayout()
thirdLayout = QtGui.QHBoxLayout()
self.GuiWiget.setLayout(layout)
layout.addLayout(secondLayout)
layout.addLayout(thirdLayout)
pg.setConfigOption('background', 'w')
# create plot widgets by pg.PlotWidget(name=name) and we can draw multiple curve lines on it
for i,name in zip(range(0,self.numofPlotWidget),nameList):
plotWidget = pg.PlotWidget(name=name)
# set X range
plotWidget.setXRange(0, self.numOfDataToPlot)
# set Y range
if i == 0 :
plotWidget.setYRange(-2, 2)
elif i == 1:
plotWidget.setYRange(-180, 180)
else:
plotWidget.setYRange(-2, 2)
layout.addWidget(plotWidget)
self.plotWidgetList.append(plotWidget)
self.startLabel= QtGui.QLabel("Start:")
self.startWindows = QtGui.QLineEdit()
self.endLabel= QtGui.QLabel("End:")
self.endWindows = QtGui.QLineEdit()
self.button = QtGui.QPushButton('Split')
self.button.clicked.connect(self.DrawPic)
self.fileName= QtGui.QLabel("fileName:")
self.fileInputName = QtGui.QComboBox()
#self.fileInputName.setText("UpStraight0.dat")
self.Readbutton = QtGui.QPushButton('Read')
self.Readbutton.clicked.connect(self.ReadFile)
secondLayout.addWidget(self.startLabel)
secondLayout.addWidget(self.startWindows)
secondLayout.addWidget(self.endLabel)
secondLayout.addWidget(self.endWindows)
secondLayout.addWidget(self.button)
secondLayout.addWidget(self.fileName)
secondLayout.addWidget(self.fileInputName)
secondLayout.addWidget(self.Readbutton)
self.Aobj= QtGui.QLabel("A:")
self.comboA = QtGui.QComboBox()
self.AstartLabel= QtGui.QLabel("Start:")
self.AstartWindows = QtGui.QLineEdit()
self.AendLabel= QtGui.QLabel("End:")
self.AendWindows = QtGui.QLineEdit()
self.Comparebutton = QtGui.QPushButton('Compare')
#register a callback funtion - when button is pressed, we execute it
self.Comparebutton.clicked.connect(self.compare)
thirdLayout.addWidget(self.AstartLabel)
thirdLayout.addWidget(self.AstartWindows)
thirdLayout.addWidget(self.AendLabel)
thirdLayout.addWidget(self.AendWindows)
thirdLayout.addWidget(self.Aobj)
thirdLayout.addWidget(self.comboA)
thirdLayout.addWidget(self.Comparebutton)
# read file from Directory
self.readDir()
# Display the whole GUI architecture
self.mainWindow.show()
#Create plot instance by plotWidget.plot() and initial the Y value
for plotWidget,penStyle in zip(self.plotWidgetList,self.penStyleList):
for i in range(0,self.plotNum):
curve = plotWidget.plot()
curve.setPen(penStyle[i])
curveYData =[np.NAN for i in range(0,self.numOfDataToPlot) ] #initial y value
self.curveList.append(curve)
self.curveYDataList.append(curveYData)
for i in range(0,self.plotNum):
curve = self.plotWidgetList[2].plot()
curve.setPen(penStyle[i])
curveYData =[np.NAN for i in range(0,self.numOfDataToPlot) ]
self.curveList2.append(curve)
self.curveYDataList2.append(curveYData)
self.SettingModel()
print "init ok"
self.writeout = True
self.logfp = open('log.txt', "w")
self.timeLogfp = open('Timelog.txt', "w") | identifier_body |
QTofflineTemplate.py | from pyqtgraph.Qt import QtGui, QtCore
from sklearn import preprocessing
import numpy as np
import pyqtgraph as pg
import random
import pickle
import collections
from os import listdir
from os.path import isfile, join
import fastdtw
from scipy.spatial.distance import euclidean
import scipy.stats
from scipy import signal
import DTW
import time
import matplotlib.pyplot as plt
import pickle
from scipy import signal
from numpy.fft import fft, ifft, fft2, ifft2, fftshift
from scipy import stats
import re
DirSetting ='./Jay/' #base gesture Dit
ComplexDirSetting ='./Jay/Complex/' #complex gesture Dir
Name = 'Jay' #the prefix of filename
'''
MyRealTimePlot: create UI and construct the DTW Model
setMyData : a real-time ploter
DrawPic : draw the specificed file data
compare : compare two specificed file data by DTW and return the Distance
JudgeAll : Recognize all the file include base gesture and complex gesture.
This function is for getting accuracy for test set.
*** If you need GUI only, you do not need this function ***
''' | return np.sum(np.abs(A-B))
class MyRealTimePlot():
def __init__(self,dataSource = None,nameList=['Plot1','Plot2','Plot3','Plot4','Plot5','Plot6']):
'''
construct GUI
'''
self.numOfDataToPlot = 500 #nuber of point of x
self.ScalerNum = 2 # every self.ScalerNum we sample once - not used
self.numofPlotWidget=3
self.plotNum = 3 # how many line to plot in a plot widget
self.plotWidgetList = []
self.penStyleList= [[(0,0,200),(200,200,100),(195,46,212)],[(237,177,32),(126,47,142),(43,128,200)],[(0,0,200),(200,200,100),(195,46,212)]]
self.index=0
self.dataListLen = []
self.ROI1 = None # region of interest
self.ROI2 = None
self.dataTotolLen = 0
self.curveList = []
self.curveList2 = []
self.curveXData =[i for i in range(0,self.numOfDataToPlot) ] #initial x value
self.curveYDataList=[]
self.curveYDataList2=[]
self.app = QtGui.QApplication([])
self.mainWindow = QtGui.QMainWindow()
self.mainWindow.setWindowTitle('pyqtgraph example: PlotWidget')
self.mainWindow.resize(720,640)
self.GuiWiget = QtGui.QWidget()
self.mainWindow.setCentralWidget(self.GuiWiget)
layout = QtGui.QVBoxLayout()
secondLayout = QtGui.QHBoxLayout()
thirdLayout = QtGui.QHBoxLayout()
self.GuiWiget.setLayout(layout)
layout.addLayout(secondLayout)
layout.addLayout(thirdLayout)
pg.setConfigOption('background', 'w')
# create plot widgets by pg.PlotWidget(name=name) and we can draw multiple curve lines on it
for i,name in zip(range(0,self.numofPlotWidget),nameList):
plotWidget = pg.PlotWidget(name=name)
# set X range
plotWidget.setXRange(0, self.numOfDataToPlot)
# set Y range
if i == 0 :
plotWidget.setYRange(-2, 2)
elif i == 1:
plotWidget.setYRange(-180, 180)
else:
plotWidget.setYRange(-2, 2)
layout.addWidget(plotWidget)
self.plotWidgetList.append(plotWidget)
self.startLabel= QtGui.QLabel("Start:")
self.startWindows = QtGui.QLineEdit()
self.endLabel= QtGui.QLabel("End:")
self.endWindows = QtGui.QLineEdit()
self.button = QtGui.QPushButton('Split')
self.button.clicked.connect(self.DrawPic)
self.fileName= QtGui.QLabel("fileName:")
self.fileInputName = QtGui.QComboBox()
#self.fileInputName.setText("UpStraight0.dat")
self.Readbutton = QtGui.QPushButton('Read')
self.Readbutton.clicked.connect(self.ReadFile)
secondLayout.addWidget(self.startLabel)
secondLayout.addWidget(self.startWindows)
secondLayout.addWidget(self.endLabel)
secondLayout.addWidget(self.endWindows)
secondLayout.addWidget(self.button)
secondLayout.addWidget(self.fileName)
secondLayout.addWidget(self.fileInputName)
secondLayout.addWidget(self.Readbutton)
self.Aobj= QtGui.QLabel("A:")
self.comboA = QtGui.QComboBox()
self.AstartLabel= QtGui.QLabel("Start:")
self.AstartWindows = QtGui.QLineEdit()
self.AendLabel= QtGui.QLabel("End:")
self.AendWindows = QtGui.QLineEdit()
self.Comparebutton = QtGui.QPushButton('Compare')
#register a callback funtion - when button is pressed, we execute it
self.Comparebutton.clicked.connect(self.compare)
thirdLayout.addWidget(self.AstartLabel)
thirdLayout.addWidget(self.AstartWindows)
thirdLayout.addWidget(self.AendLabel)
thirdLayout.addWidget(self.AendWindows)
thirdLayout.addWidget(self.Aobj)
thirdLayout.addWidget(self.comboA)
thirdLayout.addWidget(self.Comparebutton)
# read file from Directory
self.readDir()
# Display the whole GUI architecture
self.mainWindow.show()
#Create plot instance by plotWidget.plot() and initial the Y value
for plotWidget,penStyle in zip(self.plotWidgetList,self.penStyleList):
for i in range(0,self.plotNum):
curve = plotWidget.plot()
curve.setPen(penStyle[i])
curveYData =[np.NAN for i in range(0,self.numOfDataToPlot) ] #initial y value
self.curveList.append(curve)
self.curveYDataList.append(curveYData)
for i in range(0,self.plotNum):
curve = self.plotWidgetList[2].plot()
curve.setPen(penStyle[i])
curveYData =[np.NAN for i in range(0,self.numOfDataToPlot) ]
self.curveList2.append(curve)
self.curveYDataList2.append(curveYData)
self.SettingModel()
print "init ok"
self.writeout = True
self.logfp = open('log.txt', "w")
self.timeLogfp = open('Timelog.txt', "w")
def SettingModel(self):
'''load model here'''
pass
def close(self):
self.app.closeAllWindows()
self.app.quit()
def ResetGraph(self):
for i in range(0, len(self.curveYDataList) ):
self.curveYDataList[i] =[np.NAN for j in range(0,self.numOfDataToPlot) ]
for i in range(0, len(self.curveYDataList2) ):
self.curveYDataList2[i] =[np.NAN for j in range(0,self.numOfDataToPlot) ]
self.dataListLen = []
self.dataTotolLen = 0
try:
self.plotWidgetList[0].removeItem(self.ROI1)
self.plotWidgetList[1].removeItem(self.ROI2)
except:
pass
self.ROI1 = None
self.ROI2 = None
def RegionWindows(self,upBbound):
axes = ['X','Y','Z']
Dirs = ['P2N','N2P']
ret = {}
ret['X'] ={}
ret['Y'] ={}
ret['Z'] ={}
ret['X']['N2P'] = []
ret['X']['P2N'] = []
ret['Y']['N2P'] = []
ret['Y']['P2N'] = []
ret['Z']['N2P'] = []
ret['Z']['P2N'] = []
for axis in axes:
for Dir in Dirs:
for boundry in self.windowsCrossDataIndex[axis][Dir]:
if boundry[1] > upBbound:
break
else:
ret[axis][Dir].append(boundry)
return ret
def GetInfo(self,ret):
axes = ['X','Y','Z']
Dirs = ['P2N','N2P']
pos = {}
pos['X'] =[0,1]
pos['Y'] =[1,2]
pos['Z'] =[2,3]
for axis in axes:
for Dir in Dirs:
# print axis,Dir,"------------------------"
for idx in ret[axis][Dir]:
if idx[1] - idx[0] < 40:
# print idx[0],idx[1],"not enough"
idx.append("not enough")
else:
# print idx[0],idx[1],np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0))
if np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0)) < 0.15:
idx.append(None)
# print np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0))
else:
idx.append(np.sqrt(np.mean(np.multiply(self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]],self.Acc[idx[0]:idx[1],pos[axis][0]:pos[axis][1]]),axis=0)) )
def DrawPic(self):
self.ResetGraph()
startWindowsIdx = int(self.startWindows.text())
endWindowsIdx = int(self.endWindows.text())
startIDX = self.workingIdx[startWindowsIdx][0]
endIDX = self.workingIdx[endWindowsIdx][1]
#start:stop:step
ret = self.RegionWindows(endIDX)
print ret,endIDX
dataList = np.concatenate((self.Acc[startIDX:endIDX,:],self.Angle[startIDX:endIDX,:]),axis=1)
# print "scipy.stats.skewtest:",scipy.stats.skewtest(self.Acc[startIDX:endIDX,:],axis=0)
# print "mean:",np.mean( self.Acc[self.workingIdx[endWindowsIdx][0]:endIDX,:] ,axis=0)
# print "start angle:",self.Angle[startIDX,:],"Last angle:",self.Angle[endIDX-1,:]
# print "local Min X:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,0:1] ,axis=0)[0],"local Min Y:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,1:2] ,axis=0)[0],"local Min Z:",scipy.signal.argrelmin(self.Acc[startIDX:endIDX,2:3] ,axis=0)[0]
# print "local Max X:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,0:1] ,axis=0)[0],"local Max Y:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,1:2] ,axis=0)[0],"local Max Z:",scipy.signal.argrelmax(self.Acc[startIDX:endIDX,2:3] ,axis=0)[0]
dataList = dataList[::self.ScalerNum,:]
self.GetInfo(ret)
dataList = dataList.transpose()
self.ROI1 = pg.LinearRegionItem([startIDX,endIDX])
self.ROI2 = pg.LinearRegionItem([startIDX,endIDX])
self.plotWidgetList[0].addItem(self.ROI1)
self.plotWidgetList[1].addItem(self.ROI2)
# print endIDX-startIDX,dataList.shape
for data,curve,yData,i in zip (dataList,self.curveList2,self.curveYDataList2 ,range(0,7)):
# print len(yData)
yData[0:dataList.shape[1]] = dataList[i,:].tolist()[0]
# print len(dataList[i,:].tolist())
curve.setData(y=yData, x=self.curveXData)
self.app.processEvents()
def diffAngle(self,data):
ret = np.mat([0.0,0.0,0.0])
for i,j in zip(range(0,data.shape[0]-1), range(1,data.shape[0]) ):
ret =np.concatenate ( (ret,data[j,:]-data[i,:]),axis=0 )
return ret
def ReadFile(self):
self.ResetGraph()
self.filename = str(self.fileInputName.currentText())
self.FileJudge(self.filename,paint=True)
def FileJudge(self,filename,paint=False,writeout=False):
print filename,DirSetting,ComplexDirSetting
if writeout==True:
self.logfp.write(filename+ ' '+DirSetting+'\n')
try:
fp = open(ComplexDirSetting+filename, "rb")
except:
fp = open(DirSetting+filename, "rb")
tempDict = pickle.load(fp)
# print tempDict
self.filteredIdx = tempDict['filteredIdx']
self.Acc = tempDict['Acc']
self.Gyo = tempDict['Gyo']
self.Mag = tempDict['Mag']
self.Angle = tempDict['Angle']
# self.Angle = self.Angle - np.mean(self.Angle,axis=1)
self.windowsCrossDataIndex = tempDict['windowsCrossDataIndex']
self.AccRawState = tempDict['AccRawState']
self.GyoRawState = tempDict['GyoRawStata']
self.workingIdx = tempDict['workingIdx']
self.MayBeValid = []
self.VarDataIdx = tempDict['VarDataIdx']
self.seq = tempDict['seq']
self.timestamp = tempDict['timestamp']
offset = self.workingIdx[0][0]
for i in range (0,len(self.workingIdx)):
self.workingIdx[i][0] = self.workingIdx[i][0] - offset
self.workingIdx[i][1] = self.workingIdx[i][1] - offset
for axis in ['X','Y','Z']:
for Dir in ['N2P','P2N']:
for i in range(0,len(self.windowsCrossDataIndex[axis][Dir])):
self.windowsCrossDataIndex[axis][Dir][i][0] = self.windowsCrossDataIndex[axis][Dir][i][0] - offset
self.windowsCrossDataIndex[axis][Dir][i][1] = self.windowsCrossDataIndex[axis][Dir][i][1] - offset
# self.windowsCrossDataIndex = self.getCrossingWindowsIDX(self.Acc)
else:
self.logfp.write("workingIdx:"+str(len(self.workingIdx))+'\n')
startIDX = self.workingIdx[0][0]
endIDX = self.workingIdx[-1][1]
dataList = np.concatenate((self.Acc,self.Angle),axis=1)
dataList = dataList.transpose()
if paint == True:
# Plot Data
for data,curve,yData,i in zip (dataList,self.curveList,self.curveYDataList ,range(0,7)):
# print len(yData)
yData[0:endIDX-startIDX] = dataList[i,:].tolist()[0]
# print len(dataList[i,:].tolist())
curve.setData(y=yData, x=self.curveXData)
self.app.processEvents()
# classify or somewhat you can implement in Judge function
self.Judge()
def readDir(self):
''' read Dir and catch all file contained the keywords which defined in complexG & keywords'''
global DirSetting,ComplexDirSetting,Name
self.fileList = []
complexG =['ForwardBackward','BackwardForward','DownUp','UpDown','RightLeft','LeftRight','LeftForward','RightForward','UpRight','UpLeft','RightUpForward','ForwardRight','V','VII','LeftbackForward','RightbackForward','ForwardLeft','LeftLeftForward','RightRightForward','ForwardUp','DownBackward','DownLeft','DownRight','ForwardBackwardForward','LeftRightUp','BackwardRightLeftforward','DownLeftRightforward','RightUpForward']
keywords = ['GoStraight','BackStraight','DownStraight','UpStraight','LeftStraight','RightStraight','RightUpStraight','LeftGoStraight','RightGoStraight','LeftBackStraight','RightBackStraight']
for i in range (0,len(complexG)):
complexG[i] = Name + complexG[i]
for i in range (0,len(keywords)):
keywords[i] = Name + keywords[i]
for keyword in complexG:
for fileName in listdir(ComplexDirSetting):
# if keyword == 'Circle':s
# print fileName,fileName[0:len(keyword)]
if keyword in fileName[0:len(keyword)]:
if fileName not in self.fileList:
self.fileList.append(fileName)
for keyword in keywords:
for fileName in listdir(DirSetting):
# if keyword == 'Circle':s
# print fileName,fileName[0:len(keyword)]
if keyword in fileName[0:len(keyword)]:
if fileName not in self.fileList:
self.fileList.append(fileName)
self.fileInputName.addItems(self.fileList)
self.comboA.addItems(self.fileList)
def Judge(self,writeout=False):
pass
# print "\033[1;31m",data[3],"len:",data[2] - data[1] + 1 ,data[2],data[1],"\033[1;m"
# print "Dis:",disOrder
# print "Angle:",angleOrder
def compare(self):
startWindowsIdx = int(self.startWindows.text())
endWindowsIdx = int(self.endWindows.text())
startIDX = self.workingIdx[startWindowsIdx][0]
endIDX = self.workingIdx[endWindowsIdx][1]
AstartWindowsIdx = None
AendWindowsIdx = None
AstartIDX = None
AendIDX = None
try:
AstartWindowsIdx = int(self.AstartWindows.text())
AendWindowsIdx = int(self.AendWindows.text())
except:
pass
filename = str(self.comboA.currentText())
try:
fp = open(DirSetting+filename, "rb")
except:
fp = open(ComplexDirSetting+filename, "rb")
tempDict = pickle.load(fp)
Acc = tempDict['Acc']
Gyo = tempDict['Gyo']
Mag = tempDict['Mag']
Angle = tempDict['Angle']
windowsCrossDataIndex = tempDict['windowsCrossDataIndex']
AccRawState = tempDict['AccRawState']
GyoRawState = tempDict['GyoRawStata']
workingIdx = tempDict['workingIdx']
VarDataIdx = tempDict['VarDataIdx']
seq = tempDict['seq']
timestamp = tempDict['timestamp']
AstartIDX = workingIdx[AstartWindowsIdx][0]
AendIDX = workingIdx[AendWindowsIdx][1]
print startWindowsIdx,endWindowsIdx,AstartWindowsIdx,AendWindowsIdx
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
#QtGui.QApplication.instance().exec_()
A = MyRealTimePlot()
# A.update()
A.app.instance().exec_()
# QtGui.QApplication.instance().exec_()
# def cross_correlation_using_fft(x, y):
# f1 = fft(x)
# f2 = fft(np.flipud(y))
# cc = np.real(ifft(f1 * f2))
# return fftshift(cc)
# # print self.windowsCrossDataIndex
# # for gesture in FinalS:
# # print "\033[1;31m",gesture,"\033[1;m"
# def compute_shift(x, y):
# assert len(x) == len(y)
# c = cross_correlation_using_fft(x, y)
# assert len(c) == len(x)
# zero_index = int(len(x) / 2) - 1
# shift = zero_index - np.argmax(c)
# return shift |
def absDist(A,B):
| random_line_split |
search.py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
from game import Directions
from pacman import Actions as pacmanActions
from pacman import PacmanRules as pacmanrules
s = Directions.SOUTH
w = Directions.WEST
n = Directions.NORTH
e = Directions.EAST
""" My OWN Class for Analyzing the Consistency of Heuristic used in A* Method """
class Analyzer :
"""
This class is implemented to check whether the path chosen by A* heuristic is consistent or not.
@actions : returned actions by A* method which then will be converted to @corndinates
@coordinates : the tranlsateion of each action from @actions in cordinate form of (x,y) that determines which way to go.
for example corrdinate:(-1,0) means that pacman should go one step to "west"
@states : the states that exists in the path of pacman. These states are created by using start state of pacman and @coordinates
"""
foodCoordinates = []
hasGatheredNeededData = False
foodGridMap = None
def __init__(self , problem , actions ):
self.problem = problem
self.startState = problem.getStartState()
self.actions = actions
self.coordinates = []
self.states = []
self.path = []
def __findFoodOrdering(self):
for node in self.states :
if( (node in self.foodCoordinates) and (not node in self.path) ):
self.path.append(node)
def __createStates(self):
for action in self.actions :
vector = pacmanActions.directionToVector( action, pacmanrules.PACMAN_SPEED )
self.coordinates.append(vector)
self.states.append(self.startState)
for coordinate in self.coordinates:
xCord , yCord = coordinate
try:
currentCordX , currentCordY = self.states[-1][0]
except:
currentCordX , currentCordY = self.states[-1]
nextCordX = xCord + int(currentCordX)
nextCordY = yCord + int(currentCordY)
newState = (nextCordX,nextCordY)
self.states.append(newState)
def __analyze(self):
for state in self.states :
if(len(self.path) > 0 ):
nextGoal = self.path[0]
if(state == nextGoal):
self.path.remove(state)
if(len(self.path) > 0 ):
nextGoal = self.path[0]
tempSearchState = state , Analyzer.foodGridShit
stateSuccessors = self.problem.getSuccessors(tempSearchState)
sumOfManhattans = 0
for successor in stateSuccessors :
successorState , action , cost = successor
sumOfManhattans += util.manhattanDistance(successorState[0],nextGoal)
currentStateManhattanDistance = util.manhattanDistance(state,nextGoal)
if ( currentStateManhattanDistance <= sumOfManhattans + 3):
True
else:
return False
return True
def __isConsistent(self):
isConsistent = self.__analyze()
if(isConsistent):
print("It's Consistent")
return True
print("Not Consistent")
return False
def start(self):
print("========== Analyzer Data ============")
self.__createStates()
self.__findFoodOrdering()
self.__printData()
self.__isConsistent()
print("=======================")
def __printData(self):
print("Actions : " + str(self.actions))
print("Cordinates : " + str(self.coordinates))
print("States : " + str(self.states))
print("Food Coordinates : " + str(Analyzer.foodCoordinates))
print("Path: " + str(self.path))
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def | (self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
actions = []
fringe = util.Stack()
cost = 0
if (problem.isGoalState(startState) == True):#if startState is the goalState
return actions
else :
# Data Type Format : (currentState,actions,cost) based on errors I got :\
fringe.push((startState,actions,cost))
while (fringe.isEmpty() == False) :
currentState , actions , cost = fringe.pop()
if(problem.isGoalState(currentState)):
return actions
elif ((currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , cost = node
if ( (state in visitedNodes) == False ):
newNode = (state , actions + [action] , cost)
fringe.push(newNode)
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
fringe = util.Queue()
cost = 0
if (problem.isGoalState(startState) == True ):
return [] # No Specific Actions
else :
fringe.push((startState , [] , cost ))
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
""" get the latest node in the Queue """
if ( problem.isGoalState(currentState) == True ):
""" check if the node is our goal or not """
#print("Final Path : " + str(actions))
return actions
else:
if ( (currentState in visitedNodes) == False ):
""" check if this node is alreay visited or needs to be extended ? """
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
if(not node in visitedNodes):
state , action , cost = node
if ( not state in visitedNodes):
fringe.push((state , actions + [action] , cost ))
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
fringe = util.PriorityQueue()
cost = 0
visitedNodes = []
actions = []
"""
Format of Priority Queue :
(item , priority)
item => state , actions , cost
priorityQueue.push ( (state , actions , cost) , cost )
"""
if ( problem.isGoalState(startState) ):
return actions
else :
newNode = startState , actions , cost
priority = cost
fringe.push( newNode , priority )
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
if ( problem.isGoalState(currentState) == True ) :
#print("Final Path : " + str(actions))
return actions
else :
if ( (currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentStateSuccessors = problem.getSuccessors(currentState)
for node in currentStateSuccessors :
state , action , stateCost = node
if( ( state in visitedNodes) == False ) :
newNode = state , actions + [action] , cost + stateCost
priority = cost + stateCost
fringe.push( newNode , priority )
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
print("\t===========================================")
print("\t Processing ... Please Wait for 11 seconds!")
print("\t===========================================")
startState = problem.getStartState();
fringe = util.PriorityQueue()
costs = 0
visitedNodes = []
actions = []
if ( problem.isGoalState(startState) == True):
return actions
else:
newFringeItem = (startState , actions , costs)
fringe.push(newFringeItem,costs)
while(fringe.isEmpty() == False ):
#f(x) = h(x) + g(x)
currentState , actions , costs = fringe.pop()
if ( problem.isGoalState(currentState) == True):
#print("Final Actions : " + str(actions))
"""
If you want the Analyzer Class analizes the chosen path and heuristic ,
Uncomment these two lines of code otherwise leave it be commented cause it increases the run time by 2 seconds.
"""
"""Start : Analyzer Properties """
#analyzer = Analyzer(problem,actions)
#analyzer.start()
"""End : Analyzer Properties """
return actions
else:
if(not currentState in visitedNodes ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , stateCost = node
heuristicAmount = heuristic(state , problem)
newFringeItem = state , actions + [action] , costs + stateCost
priority = costs + heuristicAmount
fringe.push( newFringeItem , priority )
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| getCostOfActions | identifier_name |
search.py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
from game import Directions
from pacman import Actions as pacmanActions
from pacman import PacmanRules as pacmanrules
s = Directions.SOUTH
w = Directions.WEST
n = Directions.NORTH
e = Directions.EAST
""" My OWN Class for Analyzing the Consistency of Heuristic used in A* Method """
class Analyzer :
"""
This class is implemented to check whether the path chosen by A* heuristic is consistent or not.
@actions : returned actions by A* method which then will be converted to @corndinates
@coordinates : the tranlsateion of each action from @actions in cordinate form of (x,y) that determines which way to go.
for example corrdinate:(-1,0) means that pacman should go one step to "west"
@states : the states that exists in the path of pacman. These states are created by using start state of pacman and @coordinates
"""
foodCoordinates = []
hasGatheredNeededData = False
foodGridMap = None
def __init__(self , problem , actions ):
self.problem = problem
self.startState = problem.getStartState()
self.actions = actions
self.coordinates = []
self.states = []
self.path = []
def __findFoodOrdering(self):
for node in self.states :
if( (node in self.foodCoordinates) and (not node in self.path) ):
self.path.append(node)
def __createStates(self):
for action in self.actions :
vector = pacmanActions.directionToVector( action, pacmanrules.PACMAN_SPEED )
self.coordinates.append(vector)
self.states.append(self.startState)
for coordinate in self.coordinates:
xCord , yCord = coordinate
try:
currentCordX , currentCordY = self.states[-1][0]
except:
currentCordX , currentCordY = self.states[-1]
nextCordX = xCord + int(currentCordX)
nextCordY = yCord + int(currentCordY)
newState = (nextCordX,nextCordY)
self.states.append(newState)
def __analyze(self):
for state in self.states :
if(len(self.path) > 0 ):
nextGoal = self.path[0]
if(state == nextGoal):
self.path.remove(state)
if(len(self.path) > 0 ):
nextGoal = self.path[0]
tempSearchState = state , Analyzer.foodGridShit
stateSuccessors = self.problem.getSuccessors(tempSearchState)
sumOfManhattans = 0
for successor in stateSuccessors :
successorState , action , cost = successor
sumOfManhattans += util.manhattanDistance(successorState[0],nextGoal)
currentStateManhattanDistance = util.manhattanDistance(state,nextGoal)
if ( currentStateManhattanDistance <= sumOfManhattans + 3):
True
else:
return False
return True
def __isConsistent(self):
isConsistent = self.__analyze()
if(isConsistent):
print("It's Consistent")
return True
print("Not Consistent")
return False
def start(self):
print("========== Analyzer Data ============")
self.__createStates()
self.__findFoodOrdering()
self.__printData()
self.__isConsistent()
print("=======================")
def __printData(self):
print("Actions : " + str(self.actions))
print("Cordinates : " + str(self.coordinates))
print("States : " + str(self.states))
print("Food Coordinates : " + str(Analyzer.foodCoordinates))
print("Path: " + str(self.path))
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
actions = []
fringe = util.Stack()
cost = 0
if (problem.isGoalState(startState) == True):#if startState is the goalState
return actions
else :
# Data Type Format : (currentState,actions,cost) based on errors I got :\
fringe.push((startState,actions,cost))
while (fringe.isEmpty() == False) :
currentState , actions , cost = fringe.pop()
if(problem.isGoalState(currentState)):
return actions
elif ((currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , cost = node
if ( (state in visitedNodes) == False ):
newNode = (state , actions + [action] , cost)
fringe.push(newNode)
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
fringe = util.Queue()
cost = 0
if (problem.isGoalState(startState) == True ):
return [] # No Specific Actions
else :
fringe.push((startState , [] , cost ))
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
""" get the latest node in the Queue """
if ( problem.isGoalState(currentState) == True ):
""" check if the node is our goal or not """
#print("Final Path : " + str(actions))
return actions
else:
if ( (currentState in visitedNodes) == False ):
""" check if this node is alreay visited or needs to be extended ? """
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
if(not node in visitedNodes):
state , action , cost = node
if ( not state in visitedNodes):
fringe.push((state , actions + [action] , cost ))
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
fringe = util.PriorityQueue()
cost = 0
visitedNodes = []
actions = []
"""
Format of Priority Queue :
(item , priority)
item => state , actions , cost
priorityQueue.push ( (state , actions , cost) , cost )
"""
if ( problem.isGoalState(startState) ):
return actions
else :
newNode = startState , actions , cost
priority = cost
fringe.push( newNode , priority )
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
if ( problem.isGoalState(currentState) == True ) :
#print("Final Path : " + str(actions))
return actions
else :
if ( (currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentStateSuccessors = problem.getSuccessors(currentState)
for node in currentStateSuccessors :
state , action , stateCost = node
if( ( state in visitedNodes) == False ) :
newNode = state , actions + [action] , cost + stateCost
priority = cost + stateCost
fringe.push( newNode , priority )
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
print("\t===========================================")
print("\t Processing ... Please Wait for 11 seconds!")
print("\t===========================================")
startState = problem.getStartState();
fringe = util.PriorityQueue()
costs = 0
visitedNodes = []
actions = []
if ( problem.isGoalState(startState) == True): | while(fringe.isEmpty() == False ):
#f(x) = h(x) + g(x)
currentState , actions , costs = fringe.pop()
if ( problem.isGoalState(currentState) == True):
#print("Final Actions : " + str(actions))
"""
If you want the Analyzer Class analizes the chosen path and heuristic ,
Uncomment these two lines of code otherwise leave it be commented cause it increases the run time by 2 seconds.
"""
"""Start : Analyzer Properties """
#analyzer = Analyzer(problem,actions)
#analyzer.start()
"""End : Analyzer Properties """
return actions
else:
if(not currentState in visitedNodes ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , stateCost = node
heuristicAmount = heuristic(state , problem)
newFringeItem = state , actions + [action] , costs + stateCost
priority = costs + heuristicAmount
fringe.push( newFringeItem , priority )
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch | return actions
else:
newFringeItem = (startState , actions , costs)
fringe.push(newFringeItem,costs) | random_line_split |
search.py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
from game import Directions
from pacman import Actions as pacmanActions
from pacman import PacmanRules as pacmanrules
s = Directions.SOUTH
w = Directions.WEST
n = Directions.NORTH
e = Directions.EAST
""" My OWN Class for Analyzing the Consistency of Heuristic used in A* Method """
class Analyzer :
"""
This class is implemented to check whether the path chosen by A* heuristic is consistent or not.
@actions : returned actions by A* method which then will be converted to @corndinates
@coordinates : the tranlsateion of each action from @actions in cordinate form of (x,y) that determines which way to go.
for example corrdinate:(-1,0) means that pacman should go one step to "west"
@states : the states that exists in the path of pacman. These states are created by using start state of pacman and @coordinates
"""
foodCoordinates = []
hasGatheredNeededData = False
foodGridMap = None
def __init__(self , problem , actions ):
self.problem = problem
self.startState = problem.getStartState()
self.actions = actions
self.coordinates = []
self.states = []
self.path = []
def __findFoodOrdering(self):
for node in self.states :
if( (node in self.foodCoordinates) and (not node in self.path) ):
self.path.append(node)
def __createStates(self):
for action in self.actions :
vector = pacmanActions.directionToVector( action, pacmanrules.PACMAN_SPEED )
self.coordinates.append(vector)
self.states.append(self.startState)
for coordinate in self.coordinates:
xCord , yCord = coordinate
try:
currentCordX , currentCordY = self.states[-1][0]
except:
currentCordX , currentCordY = self.states[-1]
nextCordX = xCord + int(currentCordX)
nextCordY = yCord + int(currentCordY)
newState = (nextCordX,nextCordY)
self.states.append(newState)
def __analyze(self):
for state in self.states :
if(len(self.path) > 0 ):
nextGoal = self.path[0]
if(state == nextGoal):
self.path.remove(state)
if(len(self.path) > 0 ):
nextGoal = self.path[0]
tempSearchState = state , Analyzer.foodGridShit
stateSuccessors = self.problem.getSuccessors(tempSearchState)
sumOfManhattans = 0
for successor in stateSuccessors :
successorState , action , cost = successor
sumOfManhattans += util.manhattanDistance(successorState[0],nextGoal)
currentStateManhattanDistance = util.manhattanDistance(state,nextGoal)
if ( currentStateManhattanDistance <= sumOfManhattans + 3):
True
else:
return False
return True
def __isConsistent(self):
isConsistent = self.__analyze()
if(isConsistent):
print("It's Consistent")
return True
print("Not Consistent")
return False
def start(self):
print("========== Analyzer Data ============")
self.__createStates()
self.__findFoodOrdering()
self.__printData()
self.__isConsistent()
print("=======================")
def __printData(self):
print("Actions : " + str(self.actions))
print("Cordinates : " + str(self.coordinates))
print("States : " + str(self.states))
print("Food Coordinates : " + str(Analyzer.foodCoordinates))
print("Path: " + str(self.path))
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
actions = []
fringe = util.Stack()
cost = 0
if (problem.isGoalState(startState) == True):#if startState is the goalState
return actions
else :
# Data Type Format : (currentState,actions,cost) based on errors I got :\
fringe.push((startState,actions,cost))
while (fringe.isEmpty() == False) :
currentState , actions , cost = fringe.pop()
if(problem.isGoalState(currentState)):
return actions
elif ((currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , cost = node
if ( (state in visitedNodes) == False ):
newNode = (state , actions + [action] , cost)
fringe.push(newNode)
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
fringe = util.Queue()
cost = 0
if (problem.isGoalState(startState) == True ):
return [] # No Specific Actions
else :
fringe.push((startState , [] , cost ))
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
""" get the latest node in the Queue """
if ( problem.isGoalState(currentState) == True ):
""" check if the node is our goal or not """
#print("Final Path : " + str(actions))
return actions
else:
if ( (currentState in visitedNodes) == False ):
""" check if this node is alreay visited or needs to be extended ? """
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
if(not node in visitedNodes):
state , action , cost = node
if ( not state in visitedNodes):
fringe.push((state , actions + [action] , cost ))
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
fringe = util.PriorityQueue()
cost = 0
visitedNodes = []
actions = []
"""
Format of Priority Queue :
(item , priority)
item => state , actions , cost
priorityQueue.push ( (state , actions , cost) , cost )
"""
if ( problem.isGoalState(startState) ):
return actions
else :
newNode = startState , actions , cost
priority = cost
fringe.push( newNode , priority )
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
if ( problem.isGoalState(currentState) == True ) :
#print("Final Path : " + str(actions))
return actions
else :
if ( (currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentStateSuccessors = problem.getSuccessors(currentState)
for node in currentStateSuccessors :
|
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
print("\t===========================================")
print("\t Processing ... Please Wait for 11 seconds!")
print("\t===========================================")
startState = problem.getStartState();
fringe = util.PriorityQueue()
costs = 0
visitedNodes = []
actions = []
if ( problem.isGoalState(startState) == True):
return actions
else:
newFringeItem = (startState , actions , costs)
fringe.push(newFringeItem,costs)
while(fringe.isEmpty() == False ):
#f(x) = h(x) + g(x)
currentState , actions , costs = fringe.pop()
if ( problem.isGoalState(currentState) == True):
#print("Final Actions : " + str(actions))
"""
If you want the Analyzer Class analizes the chosen path and heuristic ,
Uncomment these two lines of code otherwise leave it be commented cause it increases the run time by 2 seconds.
"""
"""Start : Analyzer Properties """
#analyzer = Analyzer(problem,actions)
#analyzer.start()
"""End : Analyzer Properties """
return actions
else:
if(not currentState in visitedNodes ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , stateCost = node
heuristicAmount = heuristic(state , problem)
newFringeItem = state , actions + [action] , costs + stateCost
priority = costs + heuristicAmount
fringe.push( newFringeItem , priority )
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| state , action , stateCost = node
if( ( state in visitedNodes) == False ) :
newNode = state , actions + [action] , cost + stateCost
priority = cost + stateCost
fringe.push( newNode , priority ) | conditional_block |
search.py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
from game import Directions
from pacman import Actions as pacmanActions
from pacman import PacmanRules as pacmanrules
s = Directions.SOUTH
w = Directions.WEST
n = Directions.NORTH
e = Directions.EAST
""" My OWN Class for Analyzing the Consistency of Heuristic used in A* Method """
class Analyzer :
"""
This class is implemented to check whether the path chosen by A* heuristic is consistent or not.
@actions : returned actions by A* method which then will be converted to @corndinates
@coordinates : the tranlsateion of each action from @actions in cordinate form of (x,y) that determines which way to go.
for example corrdinate:(-1,0) means that pacman should go one step to "west"
@states : the states that exists in the path of pacman. These states are created by using start state of pacman and @coordinates
"""
foodCoordinates = []
hasGatheredNeededData = False
foodGridMap = None
def __init__(self , problem , actions ):
self.problem = problem
self.startState = problem.getStartState()
self.actions = actions
self.coordinates = []
self.states = []
self.path = []
def __findFoodOrdering(self):
for node in self.states :
if( (node in self.foodCoordinates) and (not node in self.path) ):
self.path.append(node)
def __createStates(self):
for action in self.actions :
vector = pacmanActions.directionToVector( action, pacmanrules.PACMAN_SPEED )
self.coordinates.append(vector)
self.states.append(self.startState)
for coordinate in self.coordinates:
xCord , yCord = coordinate
try:
currentCordX , currentCordY = self.states[-1][0]
except:
currentCordX , currentCordY = self.states[-1]
nextCordX = xCord + int(currentCordX)
nextCordY = yCord + int(currentCordY)
newState = (nextCordX,nextCordY)
self.states.append(newState)
def __analyze(self):
for state in self.states :
if(len(self.path) > 0 ):
nextGoal = self.path[0]
if(state == nextGoal):
self.path.remove(state)
if(len(self.path) > 0 ):
nextGoal = self.path[0]
tempSearchState = state , Analyzer.foodGridShit
stateSuccessors = self.problem.getSuccessors(tempSearchState)
sumOfManhattans = 0
for successor in stateSuccessors :
successorState , action , cost = successor
sumOfManhattans += util.manhattanDistance(successorState[0],nextGoal)
currentStateManhattanDistance = util.manhattanDistance(state,nextGoal)
if ( currentStateManhattanDistance <= sumOfManhattans + 3):
True
else:
return False
return True
def __isConsistent(self):
isConsistent = self.__analyze()
if(isConsistent):
print("It's Consistent")
return True
print("Not Consistent")
return False
def start(self):
print("========== Analyzer Data ============")
self.__createStates()
self.__findFoodOrdering()
self.__printData()
self.__isConsistent()
print("=======================")
def __printData(self):
print("Actions : " + str(self.actions))
print("Cordinates : " + str(self.coordinates))
print("States : " + str(self.states))
print("Food Coordinates : " + str(Analyzer.foodCoordinates))
print("Path: " + str(self.path))
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
|
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
actions = []
fringe = util.Stack()
cost = 0
if (problem.isGoalState(startState) == True):#if startState is the goalState
return actions
else :
# Data Type Format : (currentState,actions,cost) based on errors I got :\
fringe.push((startState,actions,cost))
while (fringe.isEmpty() == False) :
currentState , actions , cost = fringe.pop()
if(problem.isGoalState(currentState)):
return actions
elif ((currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , cost = node
if ( (state in visitedNodes) == False ):
newNode = (state , actions + [action] , cost)
fringe.push(newNode)
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
visitedNodes = []
fringe = util.Queue()
cost = 0
if (problem.isGoalState(startState) == True ):
return [] # No Specific Actions
else :
fringe.push((startState , [] , cost ))
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
""" get the latest node in the Queue """
if ( problem.isGoalState(currentState) == True ):
""" check if the node is our goal or not """
#print("Final Path : " + str(actions))
return actions
else:
if ( (currentState in visitedNodes) == False ):
""" check if this node is alreay visited or needs to be extended ? """
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
if(not node in visitedNodes):
state , action , cost = node
if ( not state in visitedNodes):
fringe.push((state , actions + [action] , cost ))
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
fringe = util.PriorityQueue()
cost = 0
visitedNodes = []
actions = []
"""
Format of Priority Queue :
(item , priority)
item => state , actions , cost
priorityQueue.push ( (state , actions , cost) , cost )
"""
if ( problem.isGoalState(startState) ):
return actions
else :
newNode = startState , actions , cost
priority = cost
fringe.push( newNode , priority )
while ( fringe.isEmpty() == False ):
currentState , actions , cost = fringe.pop()
if ( problem.isGoalState(currentState) == True ) :
#print("Final Path : " + str(actions))
return actions
else :
if ( (currentState in visitedNodes) == False ):
visitedNodes.append(currentState)
currentStateSuccessors = problem.getSuccessors(currentState)
for node in currentStateSuccessors :
state , action , stateCost = node
if( ( state in visitedNodes) == False ) :
newNode = state , actions + [action] , cost + stateCost
priority = cost + stateCost
fringe.push( newNode , priority )
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
print("\t===========================================")
print("\t Processing ... Please Wait for 11 seconds!")
print("\t===========================================")
startState = problem.getStartState();
fringe = util.PriorityQueue()
costs = 0
visitedNodes = []
actions = []
if ( problem.isGoalState(startState) == True):
return actions
else:
newFringeItem = (startState , actions , costs)
fringe.push(newFringeItem,costs)
while(fringe.isEmpty() == False ):
#f(x) = h(x) + g(x)
currentState , actions , costs = fringe.pop()
if ( problem.isGoalState(currentState) == True):
#print("Final Actions : " + str(actions))
"""
If you want the Analyzer Class analizes the chosen path and heuristic ,
Uncomment these two lines of code otherwise leave it be commented cause it increases the run time by 2 seconds.
"""
"""Start : Analyzer Properties """
#analyzer = Analyzer(problem,actions)
#analyzer.start()
"""End : Analyzer Properties """
return actions
else:
if(not currentState in visitedNodes ):
visitedNodes.append(currentState)
currentNodeSuccessors = problem.getSuccessors(currentState)
for node in currentNodeSuccessors :
state , action , stateCost = node
heuristicAmount = heuristic(state , problem)
newFringeItem = state , actions + [action] , costs + stateCost
priority = costs + heuristicAmount
fringe.push( newFringeItem , priority )
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| """
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w] | identifier_body |
controller.go | package configurableroutes
import (
"context"
"fmt"
"strings"
configv1 "github.com/openshift/api/config/v1"
logf "github.com/openshift/cluster-ingress-operator/pkg/log"
operatorcontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller"
util "github.com/openshift/cluster-ingress-operator/pkg/util"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
ControllerName = "configurable_route_controller"
componentRouteHashLabelKey = "ingress.operator.openshift.io/component-route-hash"
)
var (
log = logf.Logger.WithName(ControllerName)
)
// New creates the configurable route controller from configuration. This is the controller
// that handles all the logic for generating roles and rolebindings for operators that
// include routes with configurable hostnames and serving certificates.
//
// Cluster-admins may provide a custom hostname and serving certificate for a route
// by creating a spec.componentRoute entry in the ingresses.config.openshift.io/cluster
// resource. If a componentRoute entry exists in the status.componentRoutes list with
// a matching namespace and name this controller will generate:
// - A role that grants get/list/watch permissions for the secret defined in the spec.
// - A roleBinding that binds the aforementioned role to each consumingUser specified
// in the corresponding status entry.
func New(mgr manager.Manager, config Config, eventRecorder events.Recorder) (controller.Controller, error) {
kubeClient, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
return nil, err
}
operatorCache := mgr.GetCache()
reconciler := &reconciler{
kclient: kubeClient,
config: config,
client: mgr.GetClient(),
cache: operatorCache,
eventRecorder: eventRecorder,
}
c, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: reconciler})
if err != nil {
return nil, err
}
// Trigger reconcile requests for the cluster ingress resource.
clusterNamePredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
clusterIngressResource := operatorcontroller.IngressClusterConfigName()
return o.GetName() == clusterIngressResource.Name && o.GetNamespace() == clusterIngressResource.Namespace
})
if err := c.Watch(source.Kind(operatorCache, &configv1.Ingress{}), &handler.EnqueueRequestForObject{}, clusterNamePredicate); err != nil {
return nil, err
}
// Trigger reconcile requests for the roles and roleBindings with the componentRoute label.
defaultPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
labels := o.GetLabels()
_, ok := labels[componentRouteHashLabelKey]
return ok
})
if err := c.Watch(source.Kind(operatorCache, &rbacv1.Role{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
if err := c.Watch(source.Kind(operatorCache, &rbacv1.RoleBinding{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
return c, nil
}
// resourceToClusterIngressConfig is used to only trigger reconciles on the cluster ingress config.
func (r *reconciler) resourceToClusterIngressConfig(ctx context.Context, o client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: operatorcontroller.IngressClusterConfigName(),
},
}
}
// Config holds all the things necessary for the controller to run.
type Config struct {
SecretNamespace string
}
// reconciler handles the actual ingress reconciliation logic in response to
// events.
type reconciler struct {
config Config
client client.Client
kclient kubernetes.Interface
cache cache.Cache
eventRecorder events.Recorder
}
// Reconcile expects request to refer to the
// ingresses.config.openshift.io/cluster object and will do all the work to
// ensure that RBAC for any configured component routes is in the desired state.
func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log.Info("reconciling", "request", request)
// Only proceed if we can get the ingress resource.
ingress := &configv1.Ingress{}
if err := r.cache.Get(ctx, request.NamespacedName, ingress); err != nil {
if apierrors.IsNotFound(err) {
log.Info("ingress cr not found; reconciliation will be skipped", "request", request)
return reconcile.Result{}, nil
}
return reconcile.Result{}, fmt.Errorf("failed to get ingress %q: %w", request.NamespacedName, err)
}
// Get the list of componentRoutes defined in both the spec and status of the ingress resource that require
// roles and roleBindings.
componentRoutes := intersectingComponentRoutes(ingress.Spec.ComponentRoutes, ingress.Status.ComponentRoutes)
// Ensure role and roleBindings exist for each valid componentRoute.
for _, componentRoute := range componentRoutes {
// Ensure role.
roleName, err := r.ensureServiceCertKeyPairSecretRole(componentRoute)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create role: %v", err)
}
// Get the role just created so the UID is available for the ownerReference on the roleBinding.
role := &rbacv1.Role{}
if err := r.client.Get(ctx, types.NamespacedName{Namespace: r.config.SecretNamespace, Name: roleName}, role); err != nil {
return reconcile.Result{}, err
}
// Ensure roleBinding.
if err := r.ensureServiceCertKeyPairSecretRoleBinding(role, componentRoute); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create roleBinding: %v", err)
}
}
existingHashes := sets.String{}
for _, cr := range componentRoutes {
existingHashes.Insert(cr.Hash)
}
// Delete any roles or roleBindings that were generated for componentRoutes that are no longer defined.
// RoleBindings are cleanedup by garbage collector due to owner reference to Role.
if err := utilerrors.NewAggregate(r.deleteOrphanedRoles(componentRoutes, existingHashes)); err != nil {
return reconcile.Result{}, fmt.Errorf("error(s) deleting orphaned roles: %v", err)
}
return reconcile.Result{}, nil
}
// newAggregatedComponentRoute returns an aggregatedComponentRoute.
func newAggregatedComponentRoute(spec configv1.ComponentRouteSpec, status configv1.ComponentRouteStatus) aggregatedComponentRoute |
// aggregatedComponeRoute contains information from the ComponentRouteSpec
// and ComponentRouteStatus to generate the required Role and RoleBinding.
type aggregatedComponentRoute struct {
Name string
Hash string
ServingCertificateName string
ConsumingUsers []configv1.ConsumingUser
}
// getSubjects returns a list of subjects defined in the aggregatedComponentRoute.
func (componentRoute *aggregatedComponentRoute) getSubjects() []rbacv1.Subject {
subjects := []rbacv1.Subject{}
for _, consumingUser := range componentRoute.ConsumingUsers {
splitConsumingUser := strings.Split(string(consumingUser), ":")
// Ignore invalid consuming users.
if len(splitConsumingUser) != 4 {
continue
}
switch splitConsumingUser[1] {
case "serviceaccount":
subjects = append(subjects, rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
APIGroup: "",
Name: splitConsumingUser[3],
Namespace: splitConsumingUser[2],
})
}
}
return subjects
}
// requiresRBAC returns a boolean indicating if the componentRoute requires roles or rolebindings to be generated.
func (componentRoute *aggregatedComponentRoute) requiresRBAC() bool {
// Do not generate RBAC if no consuming users exist.
if len(componentRoute.getSubjects()) == 0 {
return false
}
// Do not generate RBAC if no secret is specified.
if componentRoute.ServingCertificateName == "" {
return false
}
return true
}
// intersectingComponentRoutes takes a slice of componentRouteSpec and a slice
// of componentRouteStatus, identifies which (namespace,name) tuples appear in
// both slices, and returns a slice of aggregatedComponentRoute corresponding to
// those tuples if they require Roles and RoleBindings.
func intersectingComponentRoutes(componentRouteSpecs []configv1.ComponentRouteSpec, componentRouteStatuses []configv1.ComponentRouteStatus) []aggregatedComponentRoute {
componentRouteHashToComponentRouteStatus := map[string]configv1.ComponentRouteStatus{}
for _, componentRouteStatus := range componentRouteStatuses {
componentRouteHash := util.Hash(namespacedName(componentRouteStatus.Namespace, componentRouteStatus.Name))
componentRouteHashToComponentRouteStatus[componentRouteHash] = componentRouteStatus
}
componentRoutes := []aggregatedComponentRoute{}
for _, componentRouteSpec := range componentRouteSpecs {
hash := util.Hash(namespacedName(componentRouteSpec.Namespace, componentRouteSpec.Name))
if componentRouteStatus, ok := componentRouteHashToComponentRouteStatus[hash]; ok {
componentRoute := newAggregatedComponentRoute(componentRouteSpec, componentRouteStatus)
if componentRoute.requiresRBAC() {
componentRoutes = append(componentRoutes, componentRoute)
}
}
}
return componentRoutes
}
func namespacedName(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func componentRouteResources(componentRoute aggregatedComponentRoute) []client.ListOption {
return []client.ListOption{
client.MatchingLabels{
componentRouteHashLabelKey: componentRoute.Hash,
},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func allComponentRouteResources() []client.ListOption {
return []client.ListOption{
client.HasLabels{componentRouteHashLabelKey},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func (r *reconciler) deleteOrphanedRoles(componentRoutes []aggregatedComponentRoute, existingHashes sets.String) []error {
errors := []error{}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, allComponentRouteResources()...); err != nil {
return append(errors, err)
}
for _, item := range roleList.Items {
expectedHash, ok := item.GetLabels()[componentRouteHashLabelKey]
if !ok {
errors = append(errors, fmt.Errorf("Unable to find componentRoute hash label on role %s/%s", item.GetNamespace(), item.GetName()))
continue
}
if !existingHashes.Has(expectedHash) {
log.Info("deleting role", "name", item.GetName(), "namespace", item.GetNamespace())
if err := r.client.Delete(context.TODO(), &item); err != nil && !apierrors.IsNotFound(err) {
errors = append(errors, err)
}
}
}
return errors
}
func (r *reconciler) ensureServiceCertKeyPairSecretRole(componentRoute aggregatedComponentRoute) (string, error) {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
GenerateName: componentRoute.Name + "-",
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get", "list", "watch"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{componentRoute.ServingCertificateName},
},
},
}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, componentRouteResources(componentRoute)...); err != nil {
return "", err
}
if len(roleList.Items) == 0 {
if err := r.client.Create(context.TODO(), role); err != nil {
return "", err
}
} else {
role.Name = roleList.Items[0].Name
role.GenerateName = ""
if _, _, err := resourceapply.ApplyRole(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, role); err != nil {
return "", err
}
}
return role.GetName(), nil
}
func (r *reconciler) ensureServiceCertKeyPairSecretRoleBinding(role *rbacv1.Role, componentRoute aggregatedComponentRoute) error {
if role == nil {
return fmt.Errorf("cannot be passed nil role")
}
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: role.GetName(),
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: rbacv1.SchemeGroupVersion.String(),
Kind: "Role",
Name: role.GetName(),
UID: role.GetUID(),
},
},
},
Subjects: componentRoute.getSubjects(),
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
APIGroup: rbacv1.GroupName,
},
}
_, _, err := resourceapply.ApplyRoleBinding(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, roleBinding)
return err
}
| {
// Copy the list of consuming users.
consumingUsersCopy := make([]configv1.ConsumingUser, len(status.ConsumingUsers))
copy(consumingUsersCopy, status.ConsumingUsers)
return aggregatedComponentRoute{
Name: spec.Name,
Hash: util.Hash(namespacedName(spec.Namespace, spec.Name)),
ServingCertificateName: spec.ServingCertKeyPairSecret.Name,
ConsumingUsers: consumingUsersCopy,
}
} | identifier_body |
controller.go | package configurableroutes
import (
"context"
"fmt"
"strings"
configv1 "github.com/openshift/api/config/v1"
logf "github.com/openshift/cluster-ingress-operator/pkg/log"
operatorcontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller"
util "github.com/openshift/cluster-ingress-operator/pkg/util"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
ControllerName = "configurable_route_controller"
componentRouteHashLabelKey = "ingress.operator.openshift.io/component-route-hash"
)
var (
log = logf.Logger.WithName(ControllerName)
)
// New creates the configurable route controller from configuration. This is the controller
// that handles all the logic for generating roles and rolebindings for operators that
// include routes with configurable hostnames and serving certificates.
//
// Cluster-admins may provide a custom hostname and serving certificate for a route
// by creating a spec.componentRoute entry in the ingresses.config.openshift.io/cluster
// resource. If a componentRoute entry exists in the status.componentRoutes list with
// a matching namespace and name this controller will generate:
// - A role that grants get/list/watch permissions for the secret defined in the spec.
// - A roleBinding that binds the aforementioned role to each consumingUser specified
// in the corresponding status entry.
func New(mgr manager.Manager, config Config, eventRecorder events.Recorder) (controller.Controller, error) {
kubeClient, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
return nil, err
}
operatorCache := mgr.GetCache()
reconciler := &reconciler{
kclient: kubeClient,
config: config,
client: mgr.GetClient(),
cache: operatorCache,
eventRecorder: eventRecorder,
}
c, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: reconciler})
if err != nil {
return nil, err
}
// Trigger reconcile requests for the cluster ingress resource.
clusterNamePredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
clusterIngressResource := operatorcontroller.IngressClusterConfigName()
return o.GetName() == clusterIngressResource.Name && o.GetNamespace() == clusterIngressResource.Namespace
})
if err := c.Watch(source.Kind(operatorCache, &configv1.Ingress{}), &handler.EnqueueRequestForObject{}, clusterNamePredicate); err != nil {
return nil, err
}
// Trigger reconcile requests for the roles and roleBindings with the componentRoute label.
defaultPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
labels := o.GetLabels()
_, ok := labels[componentRouteHashLabelKey]
return ok
})
if err := c.Watch(source.Kind(operatorCache, &rbacv1.Role{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
if err := c.Watch(source.Kind(operatorCache, &rbacv1.RoleBinding{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
return c, nil
}
// resourceToClusterIngressConfig is used to only trigger reconciles on the cluster ingress config.
func (r *reconciler) resourceToClusterIngressConfig(ctx context.Context, o client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: operatorcontroller.IngressClusterConfigName(),
},
}
}
// Config holds all the things necessary for the controller to run.
type Config struct {
SecretNamespace string
}
// reconciler handles the actual ingress reconciliation logic in response to
// events.
type reconciler struct {
config Config
client client.Client
kclient kubernetes.Interface
cache cache.Cache
eventRecorder events.Recorder
}
// Reconcile expects request to refer to the
// ingresses.config.openshift.io/cluster object and will do all the work to
// ensure that RBAC for any configured component routes is in the desired state.
func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log.Info("reconciling", "request", request)
// Only proceed if we can get the ingress resource.
ingress := &configv1.Ingress{}
if err := r.cache.Get(ctx, request.NamespacedName, ingress); err != nil {
if apierrors.IsNotFound(err) {
log.Info("ingress cr not found; reconciliation will be skipped", "request", request)
return reconcile.Result{}, nil
}
return reconcile.Result{}, fmt.Errorf("failed to get ingress %q: %w", request.NamespacedName, err)
}
// Get the list of componentRoutes defined in both the spec and status of the ingress resource that require
// roles and roleBindings.
componentRoutes := intersectingComponentRoutes(ingress.Spec.ComponentRoutes, ingress.Status.ComponentRoutes)
// Ensure role and roleBindings exist for each valid componentRoute.
for _, componentRoute := range componentRoutes {
// Ensure role.
roleName, err := r.ensureServiceCertKeyPairSecretRole(componentRoute)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create role: %v", err)
}
// Get the role just created so the UID is available for the ownerReference on the roleBinding.
role := &rbacv1.Role{}
if err := r.client.Get(ctx, types.NamespacedName{Namespace: r.config.SecretNamespace, Name: roleName}, role); err != nil {
return reconcile.Result{}, err
}
// Ensure roleBinding.
if err := r.ensureServiceCertKeyPairSecretRoleBinding(role, componentRoute); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create roleBinding: %v", err)
}
}
existingHashes := sets.String{}
for _, cr := range componentRoutes {
existingHashes.Insert(cr.Hash)
}
// Delete any roles or roleBindings that were generated for componentRoutes that are no longer defined.
// RoleBindings are cleanedup by garbage collector due to owner reference to Role.
if err := utilerrors.NewAggregate(r.deleteOrphanedRoles(componentRoutes, existingHashes)); err != nil {
return reconcile.Result{}, fmt.Errorf("error(s) deleting orphaned roles: %v", err)
}
return reconcile.Result{}, nil
}
// newAggregatedComponentRoute returns an aggregatedComponentRoute.
func newAggregatedComponentRoute(spec configv1.ComponentRouteSpec, status configv1.ComponentRouteStatus) aggregatedComponentRoute {
// Copy the list of consuming users.
consumingUsersCopy := make([]configv1.ConsumingUser, len(status.ConsumingUsers))
copy(consumingUsersCopy, status.ConsumingUsers)
return aggregatedComponentRoute{
Name: spec.Name,
Hash: util.Hash(namespacedName(spec.Namespace, spec.Name)),
ServingCertificateName: spec.ServingCertKeyPairSecret.Name,
ConsumingUsers: consumingUsersCopy,
}
}
// aggregatedComponeRoute contains information from the ComponentRouteSpec
// and ComponentRouteStatus to generate the required Role and RoleBinding.
type aggregatedComponentRoute struct {
Name string
Hash string
ServingCertificateName string
ConsumingUsers []configv1.ConsumingUser
}
// getSubjects returns a list of subjects defined in the aggregatedComponentRoute.
func (componentRoute *aggregatedComponentRoute) getSubjects() []rbacv1.Subject {
subjects := []rbacv1.Subject{}
for _, consumingUser := range componentRoute.ConsumingUsers {
splitConsumingUser := strings.Split(string(consumingUser), ":")
// Ignore invalid consuming users.
if len(splitConsumingUser) != 4 {
continue
}
switch splitConsumingUser[1] {
case "serviceaccount":
subjects = append(subjects, rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
APIGroup: "",
Name: splitConsumingUser[3],
Namespace: splitConsumingUser[2],
})
}
}
return subjects
}
// requiresRBAC returns a boolean indicating if the componentRoute requires roles or rolebindings to be generated.
func (componentRoute *aggregatedComponentRoute) requiresRBAC() bool {
// Do not generate RBAC if no consuming users exist.
if len(componentRoute.getSubjects()) == 0 {
return false
}
// Do not generate RBAC if no secret is specified.
if componentRoute.ServingCertificateName == "" {
return false
}
return true
}
// intersectingComponentRoutes takes a slice of componentRouteSpec and a slice
// of componentRouteStatus, identifies which (namespace,name) tuples appear in
// both slices, and returns a slice of aggregatedComponentRoute corresponding to
// those tuples if they require Roles and RoleBindings.
func intersectingComponentRoutes(componentRouteSpecs []configv1.ComponentRouteSpec, componentRouteStatuses []configv1.ComponentRouteStatus) []aggregatedComponentRoute {
componentRouteHashToComponentRouteStatus := map[string]configv1.ComponentRouteStatus{}
for _, componentRouteStatus := range componentRouteStatuses {
componentRouteHash := util.Hash(namespacedName(componentRouteStatus.Namespace, componentRouteStatus.Name))
componentRouteHashToComponentRouteStatus[componentRouteHash] = componentRouteStatus
}
componentRoutes := []aggregatedComponentRoute{}
for _, componentRouteSpec := range componentRouteSpecs {
hash := util.Hash(namespacedName(componentRouteSpec.Namespace, componentRouteSpec.Name))
if componentRouteStatus, ok := componentRouteHashToComponentRouteStatus[hash]; ok {
componentRoute := newAggregatedComponentRoute(componentRouteSpec, componentRouteStatus)
if componentRoute.requiresRBAC() {
componentRoutes = append(componentRoutes, componentRoute)
}
}
}
return componentRoutes
}
func namespacedName(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func componentRouteResources(componentRoute aggregatedComponentRoute) []client.ListOption {
return []client.ListOption{
client.MatchingLabels{
componentRouteHashLabelKey: componentRoute.Hash,
},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
| }
}
func (r *reconciler) deleteOrphanedRoles(componentRoutes []aggregatedComponentRoute, existingHashes sets.String) []error {
errors := []error{}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, allComponentRouteResources()...); err != nil {
return append(errors, err)
}
for _, item := range roleList.Items {
expectedHash, ok := item.GetLabels()[componentRouteHashLabelKey]
if !ok {
errors = append(errors, fmt.Errorf("Unable to find componentRoute hash label on role %s/%s", item.GetNamespace(), item.GetName()))
continue
}
if !existingHashes.Has(expectedHash) {
log.Info("deleting role", "name", item.GetName(), "namespace", item.GetNamespace())
if err := r.client.Delete(context.TODO(), &item); err != nil && !apierrors.IsNotFound(err) {
errors = append(errors, err)
}
}
}
return errors
}
func (r *reconciler) ensureServiceCertKeyPairSecretRole(componentRoute aggregatedComponentRoute) (string, error) {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
GenerateName: componentRoute.Name + "-",
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get", "list", "watch"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{componentRoute.ServingCertificateName},
},
},
}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, componentRouteResources(componentRoute)...); err != nil {
return "", err
}
if len(roleList.Items) == 0 {
if err := r.client.Create(context.TODO(), role); err != nil {
return "", err
}
} else {
role.Name = roleList.Items[0].Name
role.GenerateName = ""
if _, _, err := resourceapply.ApplyRole(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, role); err != nil {
return "", err
}
}
return role.GetName(), nil
}
func (r *reconciler) ensureServiceCertKeyPairSecretRoleBinding(role *rbacv1.Role, componentRoute aggregatedComponentRoute) error {
if role == nil {
return fmt.Errorf("cannot be passed nil role")
}
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: role.GetName(),
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: rbacv1.SchemeGroupVersion.String(),
Kind: "Role",
Name: role.GetName(),
UID: role.GetUID(),
},
},
},
Subjects: componentRoute.getSubjects(),
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
APIGroup: rbacv1.GroupName,
},
}
_, _, err := resourceapply.ApplyRoleBinding(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, roleBinding)
return err
} | func allComponentRouteResources() []client.ListOption {
return []client.ListOption{
client.HasLabels{componentRouteHashLabelKey},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace), | random_line_split |
controller.go | package configurableroutes
import (
"context"
"fmt"
"strings"
configv1 "github.com/openshift/api/config/v1"
logf "github.com/openshift/cluster-ingress-operator/pkg/log"
operatorcontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller"
util "github.com/openshift/cluster-ingress-operator/pkg/util"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
ControllerName = "configurable_route_controller"
componentRouteHashLabelKey = "ingress.operator.openshift.io/component-route-hash"
)
var (
log = logf.Logger.WithName(ControllerName)
)
// New creates the configurable route controller from configuration. This is the controller
// that handles all the logic for generating roles and rolebindings for operators that
// include routes with configurable hostnames and serving certificates.
//
// Cluster-admins may provide a custom hostname and serving certificate for a route
// by creating a spec.componentRoute entry in the ingresses.config.openshift.io/cluster
// resource. If a componentRoute entry exists in the status.componentRoutes list with
// a matching namespace and name this controller will generate:
// - A role that grants get/list/watch permissions for the secret defined in the spec.
// - A roleBinding that binds the aforementioned role to each consumingUser specified
// in the corresponding status entry.
func New(mgr manager.Manager, config Config, eventRecorder events.Recorder) (controller.Controller, error) {
kubeClient, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
return nil, err
}
operatorCache := mgr.GetCache()
reconciler := &reconciler{
kclient: kubeClient,
config: config,
client: mgr.GetClient(),
cache: operatorCache,
eventRecorder: eventRecorder,
}
c, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: reconciler})
if err != nil {
return nil, err
}
// Trigger reconcile requests for the cluster ingress resource.
clusterNamePredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
clusterIngressResource := operatorcontroller.IngressClusterConfigName()
return o.GetName() == clusterIngressResource.Name && o.GetNamespace() == clusterIngressResource.Namespace
})
if err := c.Watch(source.Kind(operatorCache, &configv1.Ingress{}), &handler.EnqueueRequestForObject{}, clusterNamePredicate); err != nil {
return nil, err
}
// Trigger reconcile requests for the roles and roleBindings with the componentRoute label.
defaultPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
labels := o.GetLabels()
_, ok := labels[componentRouteHashLabelKey]
return ok
})
if err := c.Watch(source.Kind(operatorCache, &rbacv1.Role{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
if err := c.Watch(source.Kind(operatorCache, &rbacv1.RoleBinding{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
return c, nil
}
// resourceToClusterIngressConfig is used to only trigger reconciles on the cluster ingress config.
func (r *reconciler) resourceToClusterIngressConfig(ctx context.Context, o client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: operatorcontroller.IngressClusterConfigName(),
},
}
}
// Config holds all the things necessary for the controller to run.
type Config struct {
SecretNamespace string
}
// reconciler handles the actual ingress reconciliation logic in response to
// events.
type reconciler struct {
config Config
client client.Client
kclient kubernetes.Interface
cache cache.Cache
eventRecorder events.Recorder
}
// Reconcile expects request to refer to the
// ingresses.config.openshift.io/cluster object and will do all the work to
// ensure that RBAC for any configured component routes is in the desired state.
func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log.Info("reconciling", "request", request)
// Only proceed if we can get the ingress resource.
ingress := &configv1.Ingress{}
if err := r.cache.Get(ctx, request.NamespacedName, ingress); err != nil {
if apierrors.IsNotFound(err) {
log.Info("ingress cr not found; reconciliation will be skipped", "request", request)
return reconcile.Result{}, nil
}
return reconcile.Result{}, fmt.Errorf("failed to get ingress %q: %w", request.NamespacedName, err)
}
// Get the list of componentRoutes defined in both the spec and status of the ingress resource that require
// roles and roleBindings.
componentRoutes := intersectingComponentRoutes(ingress.Spec.ComponentRoutes, ingress.Status.ComponentRoutes)
// Ensure role and roleBindings exist for each valid componentRoute.
for _, componentRoute := range componentRoutes {
// Ensure role.
roleName, err := r.ensureServiceCertKeyPairSecretRole(componentRoute)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create role: %v", err)
}
// Get the role just created so the UID is available for the ownerReference on the roleBinding.
role := &rbacv1.Role{}
if err := r.client.Get(ctx, types.NamespacedName{Namespace: r.config.SecretNamespace, Name: roleName}, role); err != nil {
return reconcile.Result{}, err
}
// Ensure roleBinding.
if err := r.ensureServiceCertKeyPairSecretRoleBinding(role, componentRoute); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create roleBinding: %v", err)
}
}
existingHashes := sets.String{}
for _, cr := range componentRoutes {
existingHashes.Insert(cr.Hash)
}
// Delete any roles or roleBindings that were generated for componentRoutes that are no longer defined.
// RoleBindings are cleanedup by garbage collector due to owner reference to Role.
if err := utilerrors.NewAggregate(r.deleteOrphanedRoles(componentRoutes, existingHashes)); err != nil {
return reconcile.Result{}, fmt.Errorf("error(s) deleting orphaned roles: %v", err)
}
return reconcile.Result{}, nil
}
// newAggregatedComponentRoute returns an aggregatedComponentRoute.
func newAggregatedComponentRoute(spec configv1.ComponentRouteSpec, status configv1.ComponentRouteStatus) aggregatedComponentRoute {
// Copy the list of consuming users.
consumingUsersCopy := make([]configv1.ConsumingUser, len(status.ConsumingUsers))
copy(consumingUsersCopy, status.ConsumingUsers)
return aggregatedComponentRoute{
Name: spec.Name,
Hash: util.Hash(namespacedName(spec.Namespace, spec.Name)),
ServingCertificateName: spec.ServingCertKeyPairSecret.Name,
ConsumingUsers: consumingUsersCopy,
}
}
// aggregatedComponeRoute contains information from the ComponentRouteSpec
// and ComponentRouteStatus to generate the required Role and RoleBinding.
type aggregatedComponentRoute struct {
Name string
Hash string
ServingCertificateName string
ConsumingUsers []configv1.ConsumingUser
}
// getSubjects returns a list of subjects defined in the aggregatedComponentRoute.
func (componentRoute *aggregatedComponentRoute) getSubjects() []rbacv1.Subject {
subjects := []rbacv1.Subject{}
for _, consumingUser := range componentRoute.ConsumingUsers {
splitConsumingUser := strings.Split(string(consumingUser), ":")
// Ignore invalid consuming users.
if len(splitConsumingUser) != 4 {
continue
}
switch splitConsumingUser[1] {
case "serviceaccount":
subjects = append(subjects, rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
APIGroup: "",
Name: splitConsumingUser[3],
Namespace: splitConsumingUser[2],
})
}
}
return subjects
}
// requiresRBAC returns a boolean indicating if the componentRoute requires roles or rolebindings to be generated.
func (componentRoute *aggregatedComponentRoute) requiresRBAC() bool {
// Do not generate RBAC if no consuming users exist.
if len(componentRoute.getSubjects()) == 0 {
return false
}
// Do not generate RBAC if no secret is specified.
if componentRoute.ServingCertificateName == "" {
return false
}
return true
}
// intersectingComponentRoutes takes a slice of componentRouteSpec and a slice
// of componentRouteStatus, identifies which (namespace,name) tuples appear in
// both slices, and returns a slice of aggregatedComponentRoute corresponding to
// those tuples if they require Roles and RoleBindings.
func intersectingComponentRoutes(componentRouteSpecs []configv1.ComponentRouteSpec, componentRouteStatuses []configv1.ComponentRouteStatus) []aggregatedComponentRoute {
componentRouteHashToComponentRouteStatus := map[string]configv1.ComponentRouteStatus{}
for _, componentRouteStatus := range componentRouteStatuses |
componentRoutes := []aggregatedComponentRoute{}
for _, componentRouteSpec := range componentRouteSpecs {
hash := util.Hash(namespacedName(componentRouteSpec.Namespace, componentRouteSpec.Name))
if componentRouteStatus, ok := componentRouteHashToComponentRouteStatus[hash]; ok {
componentRoute := newAggregatedComponentRoute(componentRouteSpec, componentRouteStatus)
if componentRoute.requiresRBAC() {
componentRoutes = append(componentRoutes, componentRoute)
}
}
}
return componentRoutes
}
func namespacedName(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func componentRouteResources(componentRoute aggregatedComponentRoute) []client.ListOption {
return []client.ListOption{
client.MatchingLabels{
componentRouteHashLabelKey: componentRoute.Hash,
},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func allComponentRouteResources() []client.ListOption {
return []client.ListOption{
client.HasLabels{componentRouteHashLabelKey},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func (r *reconciler) deleteOrphanedRoles(componentRoutes []aggregatedComponentRoute, existingHashes sets.String) []error {
errors := []error{}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, allComponentRouteResources()...); err != nil {
return append(errors, err)
}
for _, item := range roleList.Items {
expectedHash, ok := item.GetLabels()[componentRouteHashLabelKey]
if !ok {
errors = append(errors, fmt.Errorf("Unable to find componentRoute hash label on role %s/%s", item.GetNamespace(), item.GetName()))
continue
}
if !existingHashes.Has(expectedHash) {
log.Info("deleting role", "name", item.GetName(), "namespace", item.GetNamespace())
if err := r.client.Delete(context.TODO(), &item); err != nil && !apierrors.IsNotFound(err) {
errors = append(errors, err)
}
}
}
return errors
}
func (r *reconciler) ensureServiceCertKeyPairSecretRole(componentRoute aggregatedComponentRoute) (string, error) {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
GenerateName: componentRoute.Name + "-",
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get", "list", "watch"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{componentRoute.ServingCertificateName},
},
},
}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, componentRouteResources(componentRoute)...); err != nil {
return "", err
}
if len(roleList.Items) == 0 {
if err := r.client.Create(context.TODO(), role); err != nil {
return "", err
}
} else {
role.Name = roleList.Items[0].Name
role.GenerateName = ""
if _, _, err := resourceapply.ApplyRole(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, role); err != nil {
return "", err
}
}
return role.GetName(), nil
}
func (r *reconciler) ensureServiceCertKeyPairSecretRoleBinding(role *rbacv1.Role, componentRoute aggregatedComponentRoute) error {
if role == nil {
return fmt.Errorf("cannot be passed nil role")
}
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: role.GetName(),
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: rbacv1.SchemeGroupVersion.String(),
Kind: "Role",
Name: role.GetName(),
UID: role.GetUID(),
},
},
},
Subjects: componentRoute.getSubjects(),
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
APIGroup: rbacv1.GroupName,
},
}
_, _, err := resourceapply.ApplyRoleBinding(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, roleBinding)
return err
}
| {
componentRouteHash := util.Hash(namespacedName(componentRouteStatus.Namespace, componentRouteStatus.Name))
componentRouteHashToComponentRouteStatus[componentRouteHash] = componentRouteStatus
} | conditional_block |
controller.go | package configurableroutes
import (
"context"
"fmt"
"strings"
configv1 "github.com/openshift/api/config/v1"
logf "github.com/openshift/cluster-ingress-operator/pkg/log"
operatorcontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller"
util "github.com/openshift/cluster-ingress-operator/pkg/util"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
ControllerName = "configurable_route_controller"
componentRouteHashLabelKey = "ingress.operator.openshift.io/component-route-hash"
)
var (
log = logf.Logger.WithName(ControllerName)
)
// New creates the configurable route controller from configuration. This is the controller
// that handles all the logic for generating roles and rolebindings for operators that
// include routes with configurable hostnames and serving certificates.
//
// Cluster-admins may provide a custom hostname and serving certificate for a route
// by creating a spec.componentRoute entry in the ingresses.config.openshift.io/cluster
// resource. If a componentRoute entry exists in the status.componentRoutes list with
// a matching namespace and name this controller will generate:
// - A role that grants get/list/watch permissions for the secret defined in the spec.
// - A roleBinding that binds the aforementioned role to each consumingUser specified
// in the corresponding status entry.
func New(mgr manager.Manager, config Config, eventRecorder events.Recorder) (controller.Controller, error) {
kubeClient, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
return nil, err
}
operatorCache := mgr.GetCache()
reconciler := &reconciler{
kclient: kubeClient,
config: config,
client: mgr.GetClient(),
cache: operatorCache,
eventRecorder: eventRecorder,
}
c, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: reconciler})
if err != nil {
return nil, err
}
// Trigger reconcile requests for the cluster ingress resource.
clusterNamePredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
clusterIngressResource := operatorcontroller.IngressClusterConfigName()
return o.GetName() == clusterIngressResource.Name && o.GetNamespace() == clusterIngressResource.Namespace
})
if err := c.Watch(source.Kind(operatorCache, &configv1.Ingress{}), &handler.EnqueueRequestForObject{}, clusterNamePredicate); err != nil {
return nil, err
}
// Trigger reconcile requests for the roles and roleBindings with the componentRoute label.
defaultPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {
labels := o.GetLabels()
_, ok := labels[componentRouteHashLabelKey]
return ok
})
if err := c.Watch(source.Kind(operatorCache, &rbacv1.Role{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
if err := c.Watch(source.Kind(operatorCache, &rbacv1.RoleBinding{}), handler.EnqueueRequestsFromMapFunc(reconciler.resourceToClusterIngressConfig), defaultPredicate); err != nil {
return nil, err
}
return c, nil
}
// resourceToClusterIngressConfig is used to only trigger reconciles on the cluster ingress config.
func (r *reconciler) resourceToClusterIngressConfig(ctx context.Context, o client.Object) []reconcile.Request {
return []reconcile.Request{
{
NamespacedName: operatorcontroller.IngressClusterConfigName(),
},
}
}
// Config holds all the things necessary for the controller to run.
type Config struct {
SecretNamespace string
}
// reconciler handles the actual ingress reconciliation logic in response to
// events.
type reconciler struct {
config Config
client client.Client
kclient kubernetes.Interface
cache cache.Cache
eventRecorder events.Recorder
}
// Reconcile expects request to refer to the
// ingresses.config.openshift.io/cluster object and will do all the work to
// ensure that RBAC for any configured component routes is in the desired state.
func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log.Info("reconciling", "request", request)
// Only proceed if we can get the ingress resource.
ingress := &configv1.Ingress{}
if err := r.cache.Get(ctx, request.NamespacedName, ingress); err != nil {
if apierrors.IsNotFound(err) {
log.Info("ingress cr not found; reconciliation will be skipped", "request", request)
return reconcile.Result{}, nil
}
return reconcile.Result{}, fmt.Errorf("failed to get ingress %q: %w", request.NamespacedName, err)
}
// Get the list of componentRoutes defined in both the spec and status of the ingress resource that require
// roles and roleBindings.
componentRoutes := intersectingComponentRoutes(ingress.Spec.ComponentRoutes, ingress.Status.ComponentRoutes)
// Ensure role and roleBindings exist for each valid componentRoute.
for _, componentRoute := range componentRoutes {
// Ensure role.
roleName, err := r.ensureServiceCertKeyPairSecretRole(componentRoute)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create role: %v", err)
}
// Get the role just created so the UID is available for the ownerReference on the roleBinding.
role := &rbacv1.Role{}
if err := r.client.Get(ctx, types.NamespacedName{Namespace: r.config.SecretNamespace, Name: roleName}, role); err != nil {
return reconcile.Result{}, err
}
// Ensure roleBinding.
if err := r.ensureServiceCertKeyPairSecretRoleBinding(role, componentRoute); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create roleBinding: %v", err)
}
}
existingHashes := sets.String{}
for _, cr := range componentRoutes {
existingHashes.Insert(cr.Hash)
}
// Delete any roles or roleBindings that were generated for componentRoutes that are no longer defined.
// RoleBindings are cleanedup by garbage collector due to owner reference to Role.
if err := utilerrors.NewAggregate(r.deleteOrphanedRoles(componentRoutes, existingHashes)); err != nil {
return reconcile.Result{}, fmt.Errorf("error(s) deleting orphaned roles: %v", err)
}
return reconcile.Result{}, nil
}
// newAggregatedComponentRoute returns an aggregatedComponentRoute.
func | (spec configv1.ComponentRouteSpec, status configv1.ComponentRouteStatus) aggregatedComponentRoute {
// Copy the list of consuming users.
consumingUsersCopy := make([]configv1.ConsumingUser, len(status.ConsumingUsers))
copy(consumingUsersCopy, status.ConsumingUsers)
return aggregatedComponentRoute{
Name: spec.Name,
Hash: util.Hash(namespacedName(spec.Namespace, spec.Name)),
ServingCertificateName: spec.ServingCertKeyPairSecret.Name,
ConsumingUsers: consumingUsersCopy,
}
}
// aggregatedComponeRoute contains information from the ComponentRouteSpec
// and ComponentRouteStatus to generate the required Role and RoleBinding.
type aggregatedComponentRoute struct {
Name string
Hash string
ServingCertificateName string
ConsumingUsers []configv1.ConsumingUser
}
// getSubjects returns a list of subjects defined in the aggregatedComponentRoute.
func (componentRoute *aggregatedComponentRoute) getSubjects() []rbacv1.Subject {
subjects := []rbacv1.Subject{}
for _, consumingUser := range componentRoute.ConsumingUsers {
splitConsumingUser := strings.Split(string(consumingUser), ":")
// Ignore invalid consuming users.
if len(splitConsumingUser) != 4 {
continue
}
switch splitConsumingUser[1] {
case "serviceaccount":
subjects = append(subjects, rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
APIGroup: "",
Name: splitConsumingUser[3],
Namespace: splitConsumingUser[2],
})
}
}
return subjects
}
// requiresRBAC returns a boolean indicating if the componentRoute requires roles or rolebindings to be generated.
func (componentRoute *aggregatedComponentRoute) requiresRBAC() bool {
// Do not generate RBAC if no consuming users exist.
if len(componentRoute.getSubjects()) == 0 {
return false
}
// Do not generate RBAC if no secret is specified.
if componentRoute.ServingCertificateName == "" {
return false
}
return true
}
// intersectingComponentRoutes takes a slice of componentRouteSpec and a slice
// of componentRouteStatus, identifies which (namespace,name) tuples appear in
// both slices, and returns a slice of aggregatedComponentRoute corresponding to
// those tuples if they require Roles and RoleBindings.
func intersectingComponentRoutes(componentRouteSpecs []configv1.ComponentRouteSpec, componentRouteStatuses []configv1.ComponentRouteStatus) []aggregatedComponentRoute {
componentRouteHashToComponentRouteStatus := map[string]configv1.ComponentRouteStatus{}
for _, componentRouteStatus := range componentRouteStatuses {
componentRouteHash := util.Hash(namespacedName(componentRouteStatus.Namespace, componentRouteStatus.Name))
componentRouteHashToComponentRouteStatus[componentRouteHash] = componentRouteStatus
}
componentRoutes := []aggregatedComponentRoute{}
for _, componentRouteSpec := range componentRouteSpecs {
hash := util.Hash(namespacedName(componentRouteSpec.Namespace, componentRouteSpec.Name))
if componentRouteStatus, ok := componentRouteHashToComponentRouteStatus[hash]; ok {
componentRoute := newAggregatedComponentRoute(componentRouteSpec, componentRouteStatus)
if componentRoute.requiresRBAC() {
componentRoutes = append(componentRoutes, componentRoute)
}
}
}
return componentRoutes
}
func namespacedName(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func componentRouteResources(componentRoute aggregatedComponentRoute) []client.ListOption {
return []client.ListOption{
client.MatchingLabels{
componentRouteHashLabelKey: componentRoute.Hash,
},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func allComponentRouteResources() []client.ListOption {
return []client.ListOption{
client.HasLabels{componentRouteHashLabelKey},
client.InNamespace(operatorcontroller.GlobalUserSpecifiedConfigNamespace),
}
}
func (r *reconciler) deleteOrphanedRoles(componentRoutes []aggregatedComponentRoute, existingHashes sets.String) []error {
errors := []error{}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, allComponentRouteResources()...); err != nil {
return append(errors, err)
}
for _, item := range roleList.Items {
expectedHash, ok := item.GetLabels()[componentRouteHashLabelKey]
if !ok {
errors = append(errors, fmt.Errorf("Unable to find componentRoute hash label on role %s/%s", item.GetNamespace(), item.GetName()))
continue
}
if !existingHashes.Has(expectedHash) {
log.Info("deleting role", "name", item.GetName(), "namespace", item.GetNamespace())
if err := r.client.Delete(context.TODO(), &item); err != nil && !apierrors.IsNotFound(err) {
errors = append(errors, err)
}
}
}
return errors
}
func (r *reconciler) ensureServiceCertKeyPairSecretRole(componentRoute aggregatedComponentRoute) (string, error) {
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
GenerateName: componentRoute.Name + "-",
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get", "list", "watch"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{componentRoute.ServingCertificateName},
},
},
}
roleList := &rbacv1.RoleList{}
if err := r.cache.List(context.TODO(), roleList, componentRouteResources(componentRoute)...); err != nil {
return "", err
}
if len(roleList.Items) == 0 {
if err := r.client.Create(context.TODO(), role); err != nil {
return "", err
}
} else {
role.Name = roleList.Items[0].Name
role.GenerateName = ""
if _, _, err := resourceapply.ApplyRole(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, role); err != nil {
return "", err
}
}
return role.GetName(), nil
}
func (r *reconciler) ensureServiceCertKeyPairSecretRoleBinding(role *rbacv1.Role, componentRoute aggregatedComponentRoute) error {
if role == nil {
return fmt.Errorf("cannot be passed nil role")
}
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: role.GetName(),
Namespace: r.config.SecretNamespace,
Labels: map[string]string{
componentRouteHashLabelKey: componentRoute.Hash,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: rbacv1.SchemeGroupVersion.String(),
Kind: "Role",
Name: role.GetName(),
UID: role.GetUID(),
},
},
},
Subjects: componentRoute.getSubjects(),
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: role.GetName(),
APIGroup: rbacv1.GroupName,
},
}
_, _, err := resourceapply.ApplyRoleBinding(context.TODO(), r.kclient.RbacV1(), r.eventRecorder, roleBinding)
return err
}
| newAggregatedComponentRoute | identifier_name |
stream_inference.py | #!/usr/bin/env python3
"""
Stream inference on microphone data.
Forked from text-mode spectrogram using live microphone data from:
https://python-sounddevice.readthedocs.io/en/0.3.15/examples.html#real-time-text-mode-spectrogram
Usage:
python scripts/stream_inference.py --model_dir /home/ilyak/Downloads/1594432670
If you have multiple devices see the number of the relevant one here:
python scripts/stream_inference.py --list-devices
then use the -d parameter
"""
import argparse
import math
import shutil
import sys
from time import sleep, monotonic, time
import cv2
from librosa import amplitude_to_db, db_to_amplitude
from librosa.filters import get_window
from librosa.util import pad_center
import numpy as np
import sounddevice as sd
import soundfile as sf
import tensorflow as tf
import pdb
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
class MovingAvgPerf():
def __init__(self, nticks=10):
self.times = []
self.nticks = nticks
def tick(self, diff):
self.times.append(diff)
if len(self.times) > self.nticks:
self.times.pop(0)
def fps_str(self):
fps = len(self.times) / sum(self.times)
return '%.2f fps' % fps
class | (MovingAvgPerf):
def tick(self):
super().tick(monotonic())
def fps_str(self, text='fps'):
if len(self.times) == 1:
fps = 0
else:
fps = len(self.times) / (self.times[-1] - self.times[0])
return '%.2f %s' % (fps, text)
class RunningMeanStdDev():
"""Online mean and std dev.
"""
def __init__(self, n=0, m=0.0, S=0.0):
self.n = n
self.m = m
self.S = S
def mean(self):
return self.m
def std_dev(self):
return np.sqrt(self.S/self.n)
def update(self, x):
# S grows by 1000 each second
for x_i in x:
self.n = self.n + 1
m_prev = self.m
self.m = self.m + (x_i - self.m) / self.n
self.S = self.S + (x_i - self.m) * (x_i - m_prev)
# if self.n % 10000 == 0:
# print('n: {}, m: {}, S: {}'.format(self.n, self.m, self.S))
class NotificationAudioPlayer():
"""Play a sound and don't accept calls to play for "block" period of time.
"""
def __init__(self, block_s=None):
self.sound, self.fs = sf.read('data/notification.wav', dtype='float32')
self.length_s = len(self.sound) / float(self.fs)
self.block_s = block_s or self.length_s
self.last_play_time = time() - (2*self.block_s)
def is_blocked(self):
return (time() - self.last_play_time) < self.block_s
def play(self, device):
if not self.is_blocked():
sd.play(self.sound, self.fs, device=device)
self.last_play_time = time()
def __draw_label(img, text, bg_color, pos=(0,0)):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.4
color = (0, 0, 0)
thickness = cv2.FILLED
margin = 2
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
pos = (pos[0] + 5, pos[1] + int(txt_size[0][1]*2))
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
buffer_size_s = 10
samplerate = 16000
window_length = int(0.025 * samplerate)
n_fft = 512
hop_length = int(0.01 * samplerate)
ma_width = 124
samples_buffer_block_size = hop_length
samples_buffer_nblocks = 1 + n_fft // samples_buffer_block_size
samples_buffer = np.zeros(samples_buffer_nblocks * samples_buffer_block_size)
samples_buffer_p = 0
spec_buffer_w = buffer_size_s * samplerate // samples_buffer_block_size
# Also add some historical padding
# write in 2 locations if within the last N seconds
# of buffer
buffer_pad_size_s = 2
spec_buffer_h = 257
spec_buffer_pad = buffer_pad_size_s * samplerate // samples_buffer_block_size
spec_buffer = np.zeros((spec_buffer_h, spec_buffer_pad + spec_buffer_w))
spec_buffer_p = 0
# modeled after: https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#stft
fft_window = get_window('hann', window_length, fftbins=True)
fft_window = pad_center(fft_window, n_fft)
# normalize audio.
# should match how network was trained.
norm_win = get_window('hann', 5, fftbins=False)
norm_win = norm_win / norm_win.sum()
target_dBFS = -15.0
running_stats = RunningMeanStdDev()
bell = NotificationAudioPlayer(block_s=1.5)
columns = 80
def update_spectrogram(indata, frames, time, status):
global samples_buffer, spec_buffer, samples_buffer_p, spec_buffer_p, running_stats
if status:
text = ' ' + str(status) + ' '
print('\x1b[34;40m', text.center(columns, '#'),
'\x1b[0m', sep='')
if any(indata):
indata = np.array(indata[:,0])
# normalization seems to help prevent false alarms from fricatives
# but is not essential.
cur_dBFS = np.convolve(indata, norm_win, 'same').max()
a = 10**( (target_dBFS-cur_dBFS) / 20.0 )
normed_indata = a * indata
if samples_buffer_p < (samples_buffer_nblocks - 1):
print('buffering' + ('.'*samples_buffer_p))
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
samples_buffer_p += 1
elif samples_buffer_p == (samples_buffer_nblocks - 1):
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
# fft
magnitude = np.abs(np.fft.rfft(fft_window * samples_buffer[-n_fft:], n=n_fft))
#
mag_db = amplitude_to_db(magnitude)
mag_db = np.clip(mag_db, -55, 65)
magnitude = db_to_amplitude(mag_db)
magnitude = magnitude**0.3
running_stats.update(magnitude)
magnitude -= running_stats.mean()
magnitude /= running_stats.std_dev()
# primary write
write_idx = (spec_buffer_p % spec_buffer_w)
spec_buffer[:, spec_buffer_pad + write_idx] = magnitude
# secondary buffer write
if spec_buffer_w < write_idx + spec_buffer_pad:
pad_write_idx = (write_idx + spec_buffer_pad) % spec_buffer_w
spec_buffer[:, pad_write_idx] = magnitude
spec_buffer_p += 1
samples_buffer = np.roll(samples_buffer, -samples_buffer_block_size)
else:
raise ValueError('samples_buffer_p out of range, is %i' % samples_buffer_p)
else:
print('no input')
def stream_spectrogram_of_microphone_audio(args):
with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
while True:
sleep(0.02)
cv2.imshow("Press 'q' to quit", np.asarray((spec_buffer * 255).astype(np.uint8)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def stream_inference_of_microphone_audio(args):
"""
The spectrum sits in a buffer with width spec_buffer_pad + spec_buffer_w .
The first spec_buffer_pad of it is a copy of the last spec_buffer_pad of it.
"""
with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
with tf.Session() as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], args.model_dir)
predictor = tf.contrib.predictor.from_saved_model(args.model_dir)
network_spec_w = args.model_input_width
spectrogram_predictions = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
spectrogram_predictions_ma = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
# display buffer that can be overwritten with every new display
display_predictions = np.stack([np.arange(spec_buffer_w), np.zeros(spec_buffer_w)]).astype(int).T
frame = np.zeros((spec_buffer_h, spec_buffer_w, 3), dtype=np.uint8)
alpha = 0.025
N = 90
myfilt = alpha*((1-alpha)**np.arange(0,N))
myfilt /= myfilt[:60].sum()
last_pred_write = 0
perf = MovingWindowPerf()
while True:
# sleep(0.01) # restrict max fps to 100
imageify = spec_buffer[:,spec_buffer_pad:].copy()
imageify = (imageify - imageify.min()) / (1e-5 + imageify.max() - imageify.min())
imageify = (imageify * 255).astype(np.uint8)
frame[:,:,0] = imageify
frame[:,:,1] = imageify
frame[:,:,2] = imageify
idx_now = spec_buffer_p % spec_buffer_w
# we look into the past
se = idx_now + spec_buffer_pad
ss = se - network_spec_w
next_input = np.expand_dims(spec_buffer[:, ss:se], 0)
prediction = predictor({"spectrograms": next_input })['softmax']
perf.tick()
prediction = prediction[0] # batch size of one
spectrogram_predictions[last_pred_write:se,:] = prediction[-1,:] # write latest prediction
latest_ma = spectrogram_predictions[(se-ma_width):se,2].mean()
spectrogram_predictions_ma[last_pred_write:se,:] = latest_ma # write the latest moving average
last_pred_write = se
pred_class = np.argmax(prediction[-1,:])
# erase the future
spectrogram_predictions[se+1:] = 0
spectrogram_predictions_ma[se+1:] = 0
# play a bell on WW detection
if latest_ma >= args.detection_threshold:
bell.play(device=args.device)
### display code
white = (255,255,255)
blue = (255,0,0)
red = (0,0,255)
green = (0,255,0)
colors = [green, blue, red]
activities = ['voice', 'silence', 'alexa']
for i, color in enumerate(colors):
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=color)
# display moving average
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions_ma[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=white)
cv2.line(frame, (idx_now, 0), (idx_now, spec_buffer_h), green, 2) # moving vertical line
thresh_display_height = spec_buffer_h - int(args.detection_threshold * spec_buffer_h)
cv2.line(frame, (0, thresh_display_height), (spec_buffer_w, thresh_display_height), white, 2) # horizontal line
__draw_label(frame, activities[pred_class], colors[pred_class], (spec_buffer_w//2, 0))
__draw_label(frame, perf.fps_str('inferences/sec'), green)
cv2.imshow("Press 'q' to quit", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def main():
usage_line = ' press q to quit '
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__ + '\n\nSupported keys:' + usage_line,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-m', '--model_dir', type=str, default='/Users/ilyak/Downloads/spectrogram03.20.20/1584800014',
help='...')
parser.add_argument(
'-w', '--model_input_width', type=int, default=31,
help='This is the width of the spectrogram input the model expects.')
parser.add_argument(
'-t', '--detection_threshold', type=float, default=0.280,
help='The threshold that the moving average of WW predictions needs to meet to count as a WW detection.')
args = parser.parse_args(remaining)
print(usage_line)
stream_inference_of_microphone_audio(args)
if __name__ == '__main__':
main()
| MovingWindowPerf | identifier_name |
stream_inference.py | #!/usr/bin/env python3
"""
Stream inference on microphone data.
Forked from text-mode spectrogram using live microphone data from:
https://python-sounddevice.readthedocs.io/en/0.3.15/examples.html#real-time-text-mode-spectrogram
Usage:
python scripts/stream_inference.py --model_dir /home/ilyak/Downloads/1594432670
If you have multiple devices see the number of the relevant one here:
python scripts/stream_inference.py --list-devices
then use the -d parameter
"""
import argparse
import math
import shutil
import sys
from time import sleep, monotonic, time
import cv2
from librosa import amplitude_to_db, db_to_amplitude
from librosa.filters import get_window
from librosa.util import pad_center
import numpy as np
import sounddevice as sd
import soundfile as sf
import tensorflow as tf
import pdb
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
class MovingAvgPerf():
def __init__(self, nticks=10):
self.times = []
self.nticks = nticks
def tick(self, diff):
self.times.append(diff)
if len(self.times) > self.nticks:
self.times.pop(0)
def fps_str(self):
fps = len(self.times) / sum(self.times)
return '%.2f fps' % fps
class MovingWindowPerf(MovingAvgPerf): | if len(self.times) == 1:
fps = 0
else:
fps = len(self.times) / (self.times[-1] - self.times[0])
return '%.2f %s' % (fps, text)
class RunningMeanStdDev():
"""Online mean and std dev.
"""
def __init__(self, n=0, m=0.0, S=0.0):
self.n = n
self.m = m
self.S = S
def mean(self):
return self.m
def std_dev(self):
return np.sqrt(self.S/self.n)
def update(self, x):
# S grows by 1000 each second
for x_i in x:
self.n = self.n + 1
m_prev = self.m
self.m = self.m + (x_i - self.m) / self.n
self.S = self.S + (x_i - self.m) * (x_i - m_prev)
# if self.n % 10000 == 0:
# print('n: {}, m: {}, S: {}'.format(self.n, self.m, self.S))
class NotificationAudioPlayer():
"""Play a sound and don't accept calls to play for "block" period of time.
"""
def __init__(self, block_s=None):
self.sound, self.fs = sf.read('data/notification.wav', dtype='float32')
self.length_s = len(self.sound) / float(self.fs)
self.block_s = block_s or self.length_s
self.last_play_time = time() - (2*self.block_s)
def is_blocked(self):
return (time() - self.last_play_time) < self.block_s
def play(self, device):
if not self.is_blocked():
sd.play(self.sound, self.fs, device=device)
self.last_play_time = time()
def __draw_label(img, text, bg_color, pos=(0,0)):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.4
color = (0, 0, 0)
thickness = cv2.FILLED
margin = 2
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
pos = (pos[0] + 5, pos[1] + int(txt_size[0][1]*2))
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
buffer_size_s = 10
samplerate = 16000
window_length = int(0.025 * samplerate)
n_fft = 512
hop_length = int(0.01 * samplerate)
ma_width = 124
samples_buffer_block_size = hop_length
samples_buffer_nblocks = 1 + n_fft // samples_buffer_block_size
samples_buffer = np.zeros(samples_buffer_nblocks * samples_buffer_block_size)
samples_buffer_p = 0
spec_buffer_w = buffer_size_s * samplerate // samples_buffer_block_size
# Also add some historical padding
# write in 2 locations if within the last N seconds
# of buffer
buffer_pad_size_s = 2
spec_buffer_h = 257
spec_buffer_pad = buffer_pad_size_s * samplerate // samples_buffer_block_size
spec_buffer = np.zeros((spec_buffer_h, spec_buffer_pad + spec_buffer_w))
spec_buffer_p = 0
# modeled after: https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#stft
fft_window = get_window('hann', window_length, fftbins=True)
fft_window = pad_center(fft_window, n_fft)
# normalize audio.
# should match how network was trained.
norm_win = get_window('hann', 5, fftbins=False)
norm_win = norm_win / norm_win.sum()
target_dBFS = -15.0
running_stats = RunningMeanStdDev()
bell = NotificationAudioPlayer(block_s=1.5)
columns = 80
def update_spectrogram(indata, frames, time, status):
global samples_buffer, spec_buffer, samples_buffer_p, spec_buffer_p, running_stats
if status:
text = ' ' + str(status) + ' '
print('\x1b[34;40m', text.center(columns, '#'),
'\x1b[0m', sep='')
if any(indata):
indata = np.array(indata[:,0])
# normalization seems to help prevent false alarms from fricatives
# but is not essential.
cur_dBFS = np.convolve(indata, norm_win, 'same').max()
a = 10**( (target_dBFS-cur_dBFS) / 20.0 )
normed_indata = a * indata
if samples_buffer_p < (samples_buffer_nblocks - 1):
print('buffering' + ('.'*samples_buffer_p))
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
samples_buffer_p += 1
elif samples_buffer_p == (samples_buffer_nblocks - 1):
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
# fft
magnitude = np.abs(np.fft.rfft(fft_window * samples_buffer[-n_fft:], n=n_fft))
#
mag_db = amplitude_to_db(magnitude)
mag_db = np.clip(mag_db, -55, 65)
magnitude = db_to_amplitude(mag_db)
magnitude = magnitude**0.3
running_stats.update(magnitude)
magnitude -= running_stats.mean()
magnitude /= running_stats.std_dev()
# primary write
write_idx = (spec_buffer_p % spec_buffer_w)
spec_buffer[:, spec_buffer_pad + write_idx] = magnitude
# secondary buffer write
if spec_buffer_w < write_idx + spec_buffer_pad:
pad_write_idx = (write_idx + spec_buffer_pad) % spec_buffer_w
spec_buffer[:, pad_write_idx] = magnitude
spec_buffer_p += 1
samples_buffer = np.roll(samples_buffer, -samples_buffer_block_size)
else:
raise ValueError('samples_buffer_p out of range, is %i' % samples_buffer_p)
else:
print('no input')
def stream_spectrogram_of_microphone_audio(args):
with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
while True:
sleep(0.02)
cv2.imshow("Press 'q' to quit", np.asarray((spec_buffer * 255).astype(np.uint8)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def stream_inference_of_microphone_audio(args):
"""
The spectrum sits in a buffer with width spec_buffer_pad + spec_buffer_w .
The first spec_buffer_pad of it is a copy of the last spec_buffer_pad of it.
"""
with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
with tf.Session() as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], args.model_dir)
predictor = tf.contrib.predictor.from_saved_model(args.model_dir)
network_spec_w = args.model_input_width
spectrogram_predictions = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
spectrogram_predictions_ma = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
# display buffer that can be overwritten with every new display
display_predictions = np.stack([np.arange(spec_buffer_w), np.zeros(spec_buffer_w)]).astype(int).T
frame = np.zeros((spec_buffer_h, spec_buffer_w, 3), dtype=np.uint8)
alpha = 0.025
N = 90
myfilt = alpha*((1-alpha)**np.arange(0,N))
myfilt /= myfilt[:60].sum()
last_pred_write = 0
perf = MovingWindowPerf()
while True:
# sleep(0.01) # restrict max fps to 100
imageify = spec_buffer[:,spec_buffer_pad:].copy()
imageify = (imageify - imageify.min()) / (1e-5 + imageify.max() - imageify.min())
imageify = (imageify * 255).astype(np.uint8)
frame[:,:,0] = imageify
frame[:,:,1] = imageify
frame[:,:,2] = imageify
idx_now = spec_buffer_p % spec_buffer_w
# we look into the past
se = idx_now + spec_buffer_pad
ss = se - network_spec_w
next_input = np.expand_dims(spec_buffer[:, ss:se], 0)
prediction = predictor({"spectrograms": next_input })['softmax']
perf.tick()
prediction = prediction[0] # batch size of one
spectrogram_predictions[last_pred_write:se,:] = prediction[-1,:] # write latest prediction
latest_ma = spectrogram_predictions[(se-ma_width):se,2].mean()
spectrogram_predictions_ma[last_pred_write:se,:] = latest_ma # write the latest moving average
last_pred_write = se
pred_class = np.argmax(prediction[-1,:])
# erase the future
spectrogram_predictions[se+1:] = 0
spectrogram_predictions_ma[se+1:] = 0
# play a bell on WW detection
if latest_ma >= args.detection_threshold:
bell.play(device=args.device)
### display code
white = (255,255,255)
blue = (255,0,0)
red = (0,0,255)
green = (0,255,0)
colors = [green, blue, red]
activities = ['voice', 'silence', 'alexa']
for i, color in enumerate(colors):
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=color)
# display moving average
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions_ma[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=white)
cv2.line(frame, (idx_now, 0), (idx_now, spec_buffer_h), green, 2) # moving vertical line
thresh_display_height = spec_buffer_h - int(args.detection_threshold * spec_buffer_h)
cv2.line(frame, (0, thresh_display_height), (spec_buffer_w, thresh_display_height), white, 2) # horizontal line
__draw_label(frame, activities[pred_class], colors[pred_class], (spec_buffer_w//2, 0))
__draw_label(frame, perf.fps_str('inferences/sec'), green)
cv2.imshow("Press 'q' to quit", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def main():
usage_line = ' press q to quit '
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__ + '\n\nSupported keys:' + usage_line,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-m', '--model_dir', type=str, default='/Users/ilyak/Downloads/spectrogram03.20.20/1584800014',
help='...')
parser.add_argument(
'-w', '--model_input_width', type=int, default=31,
help='This is the width of the spectrogram input the model expects.')
parser.add_argument(
'-t', '--detection_threshold', type=float, default=0.280,
help='The threshold that the moving average of WW predictions needs to meet to count as a WW detection.')
args = parser.parse_args(remaining)
print(usage_line)
stream_inference_of_microphone_audio(args)
if __name__ == '__main__':
main() |
def tick(self):
super().tick(monotonic())
def fps_str(self, text='fps'): | random_line_split |
stream_inference.py | #!/usr/bin/env python3
"""
Stream inference on microphone data.
Forked from text-mode spectrogram using live microphone data from:
https://python-sounddevice.readthedocs.io/en/0.3.15/examples.html#real-time-text-mode-spectrogram
Usage:
python scripts/stream_inference.py --model_dir /home/ilyak/Downloads/1594432670
If you have multiple devices see the number of the relevant one here:
python scripts/stream_inference.py --list-devices
then use the -d parameter
"""
import argparse
import math
import shutil
import sys
from time import sleep, monotonic, time
import cv2
from librosa import amplitude_to_db, db_to_amplitude
from librosa.filters import get_window
from librosa.util import pad_center
import numpy as np
import sounddevice as sd
import soundfile as sf
import tensorflow as tf
import pdb
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
class MovingAvgPerf():
def __init__(self, nticks=10):
self.times = []
self.nticks = nticks
def tick(self, diff):
self.times.append(diff)
if len(self.times) > self.nticks:
self.times.pop(0)
def fps_str(self):
fps = len(self.times) / sum(self.times)
return '%.2f fps' % fps
class MovingWindowPerf(MovingAvgPerf):
def tick(self):
super().tick(monotonic())
def fps_str(self, text='fps'):
if len(self.times) == 1:
fps = 0
else:
fps = len(self.times) / (self.times[-1] - self.times[0])
return '%.2f %s' % (fps, text)
class RunningMeanStdDev():
"""Online mean and std dev.
"""
def __init__(self, n=0, m=0.0, S=0.0):
self.n = n
self.m = m
self.S = S
def mean(self):
return self.m
def std_dev(self):
return np.sqrt(self.S/self.n)
def update(self, x):
# S grows by 1000 each second
for x_i in x:
self.n = self.n + 1
m_prev = self.m
self.m = self.m + (x_i - self.m) / self.n
self.S = self.S + (x_i - self.m) * (x_i - m_prev)
# if self.n % 10000 == 0:
# print('n: {}, m: {}, S: {}'.format(self.n, self.m, self.S))
class NotificationAudioPlayer():
"""Play a sound and don't accept calls to play for "block" period of time.
"""
def __init__(self, block_s=None):
self.sound, self.fs = sf.read('data/notification.wav', dtype='float32')
self.length_s = len(self.sound) / float(self.fs)
self.block_s = block_s or self.length_s
self.last_play_time = time() - (2*self.block_s)
def is_blocked(self):
return (time() - self.last_play_time) < self.block_s
def play(self, device):
if not self.is_blocked():
sd.play(self.sound, self.fs, device=device)
self.last_play_time = time()
def __draw_label(img, text, bg_color, pos=(0,0)):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.4
color = (0, 0, 0)
thickness = cv2.FILLED
margin = 2
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
pos = (pos[0] + 5, pos[1] + int(txt_size[0][1]*2))
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
buffer_size_s = 10
samplerate = 16000
window_length = int(0.025 * samplerate)
n_fft = 512
hop_length = int(0.01 * samplerate)
ma_width = 124
samples_buffer_block_size = hop_length
samples_buffer_nblocks = 1 + n_fft // samples_buffer_block_size
samples_buffer = np.zeros(samples_buffer_nblocks * samples_buffer_block_size)
samples_buffer_p = 0
spec_buffer_w = buffer_size_s * samplerate // samples_buffer_block_size
# Also add some historical padding
# write in 2 locations if within the last N seconds
# of buffer
buffer_pad_size_s = 2
spec_buffer_h = 257
spec_buffer_pad = buffer_pad_size_s * samplerate // samples_buffer_block_size
spec_buffer = np.zeros((spec_buffer_h, spec_buffer_pad + spec_buffer_w))
spec_buffer_p = 0
# modeled after: https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#stft
fft_window = get_window('hann', window_length, fftbins=True)
fft_window = pad_center(fft_window, n_fft)
# normalize audio.
# should match how network was trained.
norm_win = get_window('hann', 5, fftbins=False)
norm_win = norm_win / norm_win.sum()
target_dBFS = -15.0
running_stats = RunningMeanStdDev()
bell = NotificationAudioPlayer(block_s=1.5)
columns = 80
def update_spectrogram(indata, frames, time, status):
global samples_buffer, spec_buffer, samples_buffer_p, spec_buffer_p, running_stats
if status:
text = ' ' + str(status) + ' '
print('\x1b[34;40m', text.center(columns, '#'),
'\x1b[0m', sep='')
if any(indata):
indata = np.array(indata[:,0])
# normalization seems to help prevent false alarms from fricatives
# but is not essential.
cur_dBFS = np.convolve(indata, norm_win, 'same').max()
a = 10**( (target_dBFS-cur_dBFS) / 20.0 )
normed_indata = a * indata
if samples_buffer_p < (samples_buffer_nblocks - 1):
print('buffering' + ('.'*samples_buffer_p))
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
samples_buffer_p += 1
elif samples_buffer_p == (samples_buffer_nblocks - 1):
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
# fft
magnitude = np.abs(np.fft.rfft(fft_window * samples_buffer[-n_fft:], n=n_fft))
#
mag_db = amplitude_to_db(magnitude)
mag_db = np.clip(mag_db, -55, 65)
magnitude = db_to_amplitude(mag_db)
magnitude = magnitude**0.3
running_stats.update(magnitude)
magnitude -= running_stats.mean()
magnitude /= running_stats.std_dev()
# primary write
write_idx = (spec_buffer_p % spec_buffer_w)
spec_buffer[:, spec_buffer_pad + write_idx] = magnitude
# secondary buffer write
if spec_buffer_w < write_idx + spec_buffer_pad:
pad_write_idx = (write_idx + spec_buffer_pad) % spec_buffer_w
spec_buffer[:, pad_write_idx] = magnitude
spec_buffer_p += 1
samples_buffer = np.roll(samples_buffer, -samples_buffer_block_size)
else:
raise ValueError('samples_buffer_p out of range, is %i' % samples_buffer_p)
else:
print('no input')
def stream_spectrogram_of_microphone_audio(args):
with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
while True:
sleep(0.02)
cv2.imshow("Press 'q' to quit", np.asarray((spec_buffer * 255).astype(np.uint8)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def stream_inference_of_microphone_audio(args):
"""
The spectrum sits in a buffer with width spec_buffer_pad + spec_buffer_w .
The first spec_buffer_pad of it is a copy of the last spec_buffer_pad of it.
"""
with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
with tf.Session() as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], args.model_dir)
predictor = tf.contrib.predictor.from_saved_model(args.model_dir)
network_spec_w = args.model_input_width
spectrogram_predictions = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
spectrogram_predictions_ma = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
# display buffer that can be overwritten with every new display
display_predictions = np.stack([np.arange(spec_buffer_w), np.zeros(spec_buffer_w)]).astype(int).T
frame = np.zeros((spec_buffer_h, spec_buffer_w, 3), dtype=np.uint8)
alpha = 0.025
N = 90
myfilt = alpha*((1-alpha)**np.arange(0,N))
myfilt /= myfilt[:60].sum()
last_pred_write = 0
perf = MovingWindowPerf()
while True:
# sleep(0.01) # restrict max fps to 100
imageify = spec_buffer[:,spec_buffer_pad:].copy()
imageify = (imageify - imageify.min()) / (1e-5 + imageify.max() - imageify.min())
imageify = (imageify * 255).astype(np.uint8)
frame[:,:,0] = imageify
frame[:,:,1] = imageify
frame[:,:,2] = imageify
idx_now = spec_buffer_p % spec_buffer_w
# we look into the past
se = idx_now + spec_buffer_pad
ss = se - network_spec_w
next_input = np.expand_dims(spec_buffer[:, ss:se], 0)
prediction = predictor({"spectrograms": next_input })['softmax']
perf.tick()
prediction = prediction[0] # batch size of one
spectrogram_predictions[last_pred_write:se,:] = prediction[-1,:] # write latest prediction
latest_ma = spectrogram_predictions[(se-ma_width):se,2].mean()
spectrogram_predictions_ma[last_pred_write:se,:] = latest_ma # write the latest moving average
last_pred_write = se
pred_class = np.argmax(prediction[-1,:])
# erase the future
spectrogram_predictions[se+1:] = 0
spectrogram_predictions_ma[se+1:] = 0
# play a bell on WW detection
if latest_ma >= args.detection_threshold:
bell.play(device=args.device)
### display code
white = (255,255,255)
blue = (255,0,0)
red = (0,0,255)
green = (0,255,0)
colors = [green, blue, red]
activities = ['voice', 'silence', 'alexa']
for i, color in enumerate(colors):
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=color)
# display moving average
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions_ma[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=white)
cv2.line(frame, (idx_now, 0), (idx_now, spec_buffer_h), green, 2) # moving vertical line
thresh_display_height = spec_buffer_h - int(args.detection_threshold * spec_buffer_h)
cv2.line(frame, (0, thresh_display_height), (spec_buffer_w, thresh_display_height), white, 2) # horizontal line
__draw_label(frame, activities[pred_class], colors[pred_class], (spec_buffer_w//2, 0))
__draw_label(frame, perf.fps_str('inferences/sec'), green)
cv2.imshow("Press 'q' to quit", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
|
def main():
usage_line = ' press q to quit '
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__ + '\n\nSupported keys:' + usage_line,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-m', '--model_dir', type=str, default='/Users/ilyak/Downloads/spectrogram03.20.20/1584800014',
help='...')
parser.add_argument(
'-w', '--model_input_width', type=int, default=31,
help='This is the width of the spectrogram input the model expects.')
parser.add_argument(
'-t', '--detection_threshold', type=float, default=0.280,
help='The threshold that the moving average of WW predictions needs to meet to count as a WW detection.')
args = parser.parse_args(remaining)
print(usage_line)
stream_inference_of_microphone_audio(args)
if __name__ == '__main__':
main()
| break | conditional_block |
stream_inference.py | #!/usr/bin/env python3
"""
Stream inference on microphone data.
Forked from text-mode spectrogram using live microphone data from:
https://python-sounddevice.readthedocs.io/en/0.3.15/examples.html#real-time-text-mode-spectrogram
Usage:
python scripts/stream_inference.py --model_dir /home/ilyak/Downloads/1594432670
If you have multiple devices see the number of the relevant one here:
python scripts/stream_inference.py --list-devices
then use the -d parameter
"""
import argparse
import math
import shutil
import sys
from time import sleep, monotonic, time
import cv2
from librosa import amplitude_to_db, db_to_amplitude
from librosa.filters import get_window
from librosa.util import pad_center
import numpy as np
import sounddevice as sd
import soundfile as sf
import tensorflow as tf
import pdb
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
class MovingAvgPerf():
def __init__(self, nticks=10):
self.times = []
self.nticks = nticks
def tick(self, diff):
self.times.append(diff)
if len(self.times) > self.nticks:
self.times.pop(0)
def fps_str(self):
fps = len(self.times) / sum(self.times)
return '%.2f fps' % fps
class MovingWindowPerf(MovingAvgPerf):
def tick(self):
super().tick(monotonic())
def fps_str(self, text='fps'):
if len(self.times) == 1:
fps = 0
else:
fps = len(self.times) / (self.times[-1] - self.times[0])
return '%.2f %s' % (fps, text)
class RunningMeanStdDev():
"""Online mean and std dev.
"""
def __init__(self, n=0, m=0.0, S=0.0):
self.n = n
self.m = m
self.S = S
def mean(self):
return self.m
def std_dev(self):
return np.sqrt(self.S/self.n)
def update(self, x):
# S grows by 1000 each second
for x_i in x:
self.n = self.n + 1
m_prev = self.m
self.m = self.m + (x_i - self.m) / self.n
self.S = self.S + (x_i - self.m) * (x_i - m_prev)
# if self.n % 10000 == 0:
# print('n: {}, m: {}, S: {}'.format(self.n, self.m, self.S))
class NotificationAudioPlayer():
"""Play a sound and don't accept calls to play for "block" period of time.
"""
def __init__(self, block_s=None):
self.sound, self.fs = sf.read('data/notification.wav', dtype='float32')
self.length_s = len(self.sound) / float(self.fs)
self.block_s = block_s or self.length_s
self.last_play_time = time() - (2*self.block_s)
def is_blocked(self):
return (time() - self.last_play_time) < self.block_s
def play(self, device):
if not self.is_blocked():
sd.play(self.sound, self.fs, device=device)
self.last_play_time = time()
def __draw_label(img, text, bg_color, pos=(0,0)):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.4
color = (0, 0, 0)
thickness = cv2.FILLED
margin = 2
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
pos = (pos[0] + 5, pos[1] + int(txt_size[0][1]*2))
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
buffer_size_s = 10
samplerate = 16000
window_length = int(0.025 * samplerate)
n_fft = 512
hop_length = int(0.01 * samplerate)
ma_width = 124
samples_buffer_block_size = hop_length
samples_buffer_nblocks = 1 + n_fft // samples_buffer_block_size
samples_buffer = np.zeros(samples_buffer_nblocks * samples_buffer_block_size)
samples_buffer_p = 0
spec_buffer_w = buffer_size_s * samplerate // samples_buffer_block_size
# Also add some historical padding
# write in 2 locations if within the last N seconds
# of buffer
buffer_pad_size_s = 2
spec_buffer_h = 257
spec_buffer_pad = buffer_pad_size_s * samplerate // samples_buffer_block_size
spec_buffer = np.zeros((spec_buffer_h, spec_buffer_pad + spec_buffer_w))
spec_buffer_p = 0
# modeled after: https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#stft
fft_window = get_window('hann', window_length, fftbins=True)
fft_window = pad_center(fft_window, n_fft)
# normalize audio.
# should match how network was trained.
norm_win = get_window('hann', 5, fftbins=False)
norm_win = norm_win / norm_win.sum()
target_dBFS = -15.0
running_stats = RunningMeanStdDev()
bell = NotificationAudioPlayer(block_s=1.5)
columns = 80
def update_spectrogram(indata, frames, time, status):
global samples_buffer, spec_buffer, samples_buffer_p, spec_buffer_p, running_stats
if status:
text = ' ' + str(status) + ' '
print('\x1b[34;40m', text.center(columns, '#'),
'\x1b[0m', sep='')
if any(indata):
indata = np.array(indata[:,0])
# normalization seems to help prevent false alarms from fricatives
# but is not essential.
cur_dBFS = np.convolve(indata, norm_win, 'same').max()
a = 10**( (target_dBFS-cur_dBFS) / 20.0 )
normed_indata = a * indata
if samples_buffer_p < (samples_buffer_nblocks - 1):
print('buffering' + ('.'*samples_buffer_p))
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
samples_buffer_p += 1
elif samples_buffer_p == (samples_buffer_nblocks - 1):
ss = samples_buffer_p * samples_buffer_block_size
se = (samples_buffer_p + 1) * samples_buffer_block_size
samples_buffer[ss:se] = normed_indata
# fft
magnitude = np.abs(np.fft.rfft(fft_window * samples_buffer[-n_fft:], n=n_fft))
#
mag_db = amplitude_to_db(magnitude)
mag_db = np.clip(mag_db, -55, 65)
magnitude = db_to_amplitude(mag_db)
magnitude = magnitude**0.3
running_stats.update(magnitude)
magnitude -= running_stats.mean()
magnitude /= running_stats.std_dev()
# primary write
write_idx = (spec_buffer_p % spec_buffer_w)
spec_buffer[:, spec_buffer_pad + write_idx] = magnitude
# secondary buffer write
if spec_buffer_w < write_idx + spec_buffer_pad:
pad_write_idx = (write_idx + spec_buffer_pad) % spec_buffer_w
spec_buffer[:, pad_write_idx] = magnitude
spec_buffer_p += 1
samples_buffer = np.roll(samples_buffer, -samples_buffer_block_size)
else:
raise ValueError('samples_buffer_p out of range, is %i' % samples_buffer_p)
else:
print('no input')
def stream_spectrogram_of_microphone_audio(args):
|
def stream_inference_of_microphone_audio(args):
"""
The spectrum sits in a buffer with width spec_buffer_pad + spec_buffer_w .
The first spec_buffer_pad of it is a copy of the last spec_buffer_pad of it.
"""
with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
with tf.Session() as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], args.model_dir)
predictor = tf.contrib.predictor.from_saved_model(args.model_dir)
network_spec_w = args.model_input_width
spectrogram_predictions = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
spectrogram_predictions_ma = np.zeros((spec_buffer_w + spec_buffer_pad, 3))
# display buffer that can be overwritten with every new display
display_predictions = np.stack([np.arange(spec_buffer_w), np.zeros(spec_buffer_w)]).astype(int).T
frame = np.zeros((spec_buffer_h, spec_buffer_w, 3), dtype=np.uint8)
alpha = 0.025
N = 90
myfilt = alpha*((1-alpha)**np.arange(0,N))
myfilt /= myfilt[:60].sum()
last_pred_write = 0
perf = MovingWindowPerf()
while True:
# sleep(0.01) # restrict max fps to 100
imageify = spec_buffer[:,spec_buffer_pad:].copy()
imageify = (imageify - imageify.min()) / (1e-5 + imageify.max() - imageify.min())
imageify = (imageify * 255).astype(np.uint8)
frame[:,:,0] = imageify
frame[:,:,1] = imageify
frame[:,:,2] = imageify
idx_now = spec_buffer_p % spec_buffer_w
# we look into the past
se = idx_now + spec_buffer_pad
ss = se - network_spec_w
next_input = np.expand_dims(spec_buffer[:, ss:se], 0)
prediction = predictor({"spectrograms": next_input })['softmax']
perf.tick()
prediction = prediction[0] # batch size of one
spectrogram_predictions[last_pred_write:se,:] = prediction[-1,:] # write latest prediction
latest_ma = spectrogram_predictions[(se-ma_width):se,2].mean()
spectrogram_predictions_ma[last_pred_write:se,:] = latest_ma # write the latest moving average
last_pred_write = se
pred_class = np.argmax(prediction[-1,:])
# erase the future
spectrogram_predictions[se+1:] = 0
spectrogram_predictions_ma[se+1:] = 0
# play a bell on WW detection
if latest_ma >= args.detection_threshold:
bell.play(device=args.device)
### display code
white = (255,255,255)
blue = (255,0,0)
red = (0,0,255)
green = (0,255,0)
colors = [green, blue, red]
activities = ['voice', 'silence', 'alexa']
for i, color in enumerate(colors):
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=color)
# display moving average
display_predictions[:,1] = (spec_buffer_h - (spectrogram_predictions_ma[spec_buffer_pad:, i] * spec_buffer_h)).astype(int)
cv2.polylines(frame, [display_predictions], isClosed=False, color=white)
cv2.line(frame, (idx_now, 0), (idx_now, spec_buffer_h), green, 2) # moving vertical line
thresh_display_height = spec_buffer_h - int(args.detection_threshold * spec_buffer_h)
cv2.line(frame, (0, thresh_display_height), (spec_buffer_w, thresh_display_height), white, 2) # horizontal line
__draw_label(frame, activities[pred_class], colors[pred_class], (spec_buffer_w//2, 0))
__draw_label(frame, perf.fps_str('inferences/sec'), green)
cv2.imshow("Press 'q' to quit", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def main():
usage_line = ' press q to quit '
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__ + '\n\nSupported keys:' + usage_line,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-m', '--model_dir', type=str, default='/Users/ilyak/Downloads/spectrogram03.20.20/1584800014',
help='...')
parser.add_argument(
'-w', '--model_input_width', type=int, default=31,
help='This is the width of the spectrogram input the model expects.')
parser.add_argument(
'-t', '--detection_threshold', type=float, default=0.280,
help='The threshold that the moving average of WW predictions needs to meet to count as a WW detection.')
args = parser.parse_args(remaining)
print(usage_line)
stream_inference_of_microphone_audio(args)
if __name__ == '__main__':
main()
| with sd.InputStream(device=args.device, channels=1, callback=update_spectrogram,
blocksize=samples_buffer_block_size,
samplerate=samplerate):
while True:
sleep(0.02)
cv2.imshow("Press 'q' to quit", np.asarray((spec_buffer * 255).astype(np.uint8)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break | identifier_body |
flickr_speech.py | """TODO(rpeloff)
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: September 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import numpy as np
from moonshot.data import flickr_audio
from moonshot.experiments import base
from moonshot.utils import file_io
# standard scaling mean and variance computed from training data (background_train.csv)
# global statistics across all dimensions
train_global_mean = {} # pylint: disable=invalid-name
train_global_var = {} # pylint: disable=invalid-name
train_global_mean["mfcc"] = -0.0010950847199735192
train_global_var["mfcc"] = 0.3567110590621045
train_global_mean["fbank"] = 17.509969510358815
train_global_var["fbank"] = 14.452916650749247
# statistics per feature dimension
train_features_mean = {} # pylint: disable=invalid-name
train_features_var = {} # pylint: disable=invalid-name
train_features_mean["mfcc"] = np.array([
+1.32153148e-01, +5.34269911e-02, -2.15415947e-02, -4.94970795e-02,
-1.23090949e-01, +1.40466060e-02, +9.75989624e-02, +1.47936022e-02,
-4.13564958e-02, -2.91437051e-02, -5.80961896e-03, -1.96842286e-02,
-4.55897921e-02, +2.44488166e-03, -3.23168103e-03, -6.59671831e-03,
-2.67478198e-03, -3.82002095e-03, -3.47037479e-03, -3.26529530e-03,
+1.05246510e-03, +1.89005922e-03, -2.15055509e-04, +1.25535136e-03,
-1.09850116e-03, +8.23052806e-05, +1.83033459e-03, +1.21922030e-03,
-1.84041176e-03, +6.59660647e-05, -9.16896159e-04, -1.48896116e-03,
-1.10792069e-03, -6.03055868e-04, +7.30406810e-04, +4.05984679e-04,
+2.76476503e-04, -1.50802754e-05, +7.71538868e-05])
train_features_var["mfcc"] = np.array([
0.91296818, 1.00177075, 1.04598884, 0.97943741, 1.10167072, 0.98497306,
1.03107042, 0.99571673, 0.99097156, 0.98902959, 1.00651163, 1.01512197,
1.01092469, 0.04358867, 0.03717848, 0.04696080, 0.03748896, 0.04700193,
0.04922860, 0.05028348, 0.05473835, 0.05797368, 0.05976523, 0.06351620,
0.06509738, 0.06707076, 0.00604690, 0.00509468, 0.00710361, 0.00539015,
0.00699638, 0.00804983, 0.00827325, 0.00931896, 0.01004667, 0.01042689,
0.01145421, 0.01172219, 0.01229539])
train_features_mean["fbank"] = np.array([
14.94743563, 16.45409437, 17.13997318, 17.21678179, 17.46014044,
17.88050357, 18.14726162, 18.33959425, 18.21709028, 18.09417002,
17.90817997, 17.79310673, 17.69135520, 17.57243688, 17.44124024,
17.42636888, 17.58523693, 17.76074110, 17.89012335, 17.92002578,
17.96491240, 18.03654056, 18.12163615, 18.21561456, 18.19172980,
18.14125008, 18.16387385, 18.19773438, 18.19301181, 18.05073942,
17.81676462, 17.59843241, 17.36506128, 17.01731537, 16.67868699,
16.57665983, 16.61765614, 16.59339746, 16.32898486, 15.64291821])
train_features_var["fbank"] = np.array([
+9.80457338, 11.02937828, 12.10755132, 11.95119139, 12.28248128,
13.20537295, 14.11372048, 15.14463048, 15.78990233, 15.72936093,
15.36991563, 15.08655164, 14.52818211, 14.26114153, 14.22071484,
14.07992226, 13.87976692, 13.91862834, 14.09054846, 13.93680669,
13.58307387, 13.43652145, 13.53714681, 13.73358586, 13.91080542,
13.64155872, 13.51368075, 13.79055016, 13.91272786, 13.51251354,
13.36768644, 13.75936469, 14.12302948, 14.35858512, 14.42953534,
14.69363512, 14.73356627, 14.96371951, 14.97985992, 14.632985])
class FlickrSpeech(base.Experiment):
def __init__(self, features="mfcc", keywords_split="one_shot_evaluation",
embed_dir=None, preprocess_func=None, speaker_mode="baseline",
**kwargs):
"""TODO
`features` one of `["mfcc", "fbank"]`.
`keywords_split` one of `['one_shot_evaluation', 'one_shot_development',
'background_train', 'background_dev', 'background_test']`.
`speaker_mode` options:
"baseline"
- randomly choose learning and evaluation samples in episode labels
"difficult":
- choose learning samples as usual
- choose evaluation samples from speakers not seen during learning
"distractor":
- choose a random speaker for evaluation
- choose a random label for evaluation
- choose learning samples from same speaker as evaluation
- except for the evaluation label, sample from different speaker(s)
"""
super().__init__(**kwargs)
logging.log(logging.INFO, f"Creating Flickr audio experiment")
assert features in ["mfcc", "fbank"]
assert keywords_split in [
"one_shot_evaluation", "one_shot_development", "background_train",
"background_dev", "background_test"]
assert speaker_mode in ["baseline", "difficult", "distractor"]
if keywords_split == "background_test":
subset = "test"
elif keywords_split == "background_dev":
subset = "dev"
else: # rest fall under train subset
subset = "train"
self.speaker_mode = speaker_mode
# load Flickr 8k keywords set
keywords_path = os.path.join(
"data", "splits", "flickr8k", f"{keywords_split}.csv")
keywords_set = file_io.read_csv(keywords_path, skip_first=True)
# load aligned Flickr Audio UIDs and metadata
faudio_path = os.path.join(
"data", "splits", "flickr8k", f"faudio_{keywords_split}.txt")
faudio_uids = file_io.read_csv(faudio_path)[0]
self.faudio_uids = np.asarray(faudio_uids)
self.faudio_metadata = flickr_audio.extract_all_uid_metadata(
self.faudio_uids)
# load audio paths
audio_paths = flickr_audio.fetch_audio_paths(
os.path.join("data", "processed", "flickr_audio", features, subset),
self.faudio_uids)
# load audio embedding paths if specified
if embed_dir is not None:
embed_paths = []
for audio_path in audio_paths:
embed_paths.append(
os.path.join(
embed_dir, "flickr_audio", f"{keywords_split}",
f"{os.path.split(audio_path)[1]}.tfrecord"))
assert os.path.exists(embed_paths[-1])
self.keywords_set = tuple(np.asarray(x) for x in keywords_set)
self.audio_paths = np.asarray(audio_paths)
self.embed_paths = None
if embed_dir is not None:
self.embed_paths = np.asarray(embed_paths)
# get unique keywords and keyword class label lookup dict
self.keywords = sorted(np.unique(self.keywords_set[3]).tolist())
self.keyword_labels = {
keyword: idx for idx, keyword in enumerate(self.keywords)}
# get unique speakers and valid distractor speakers and labels
self.speakers = np.unique(self.faudio_metadata[2])
distractor_speaker_labels = {}
for speaker in self.speakers:
speaker_idx = np.where(
self.faudio_metadata[2] == speaker)[0]
unique_keywords, counts = np.unique(
self.keywords_set[3][speaker_idx], return_counts=True)
speaker_labels = []
for keyword, count in zip(unique_keywords, counts):
if count > 5: # constrain min. training samples per keyword
speaker_labels.append(keyword)
if len(speaker_labels) < 10: # constrain min. keywords per speaker
continue
else:
distractor_speaker_labels[speaker] = speaker_labels
self.distractor_speaker_labels = distractor_speaker_labels
# get lookup for unique indices per class label
self.class_unique_indices = {}
for keyword in self.keywords:
cls_idx = np.where(self.keywords_set[3] == keyword)[0]
self.class_unique_indices[keyword] = cls_idx
# set speech data as raw paths or extracted embedding paths
if self.embed_paths is None:
self.speech_data = self.audio_paths
else:
self.speech_data = self.embed_paths
if preprocess_func is not None:
self.speech_data = preprocess_func(self.speech_data)
@property
def data(self):
return self.speech_data
def _sample_episode(self, L, K, N, episode_labels=None):
# sample episode learning task (defined by L-way classes)
if episode_labels is None:
episode_labels = self.rng.choice(self.keywords, L, replace=False)
if self.speaker_mode == "distractor":
# choose a random speaker & label (from valid set) for evaluation
query_speaker = self.rng.choice(
list(self.distractor_speaker_labels.keys()), 1)[0]
episode_labels = self.rng.choice(
self.distractor_speaker_labels[query_speaker], L, replace=False)
query_label = self.rng.choice(episode_labels, 1)[0]
# sample learning examples from episode task
x_train_idx, y_train = [], []
for ep_label in episode_labels:
valid_class_indices = self.class_unique_indices[ep_label]
if self.speaker_mode == "distractor":
if ep_label == query_label:
# choose different speakers
valid_speaker_indices = np.where(np.isin(
self.faudio_metadata[2],
list(self.distractor_speaker_labels.keys())))[0]
valid_speaker_indices = np.intersect1d(
valid_speaker_indices,
np.where(
self.faudio_metadata[2] != query_speaker)[0])
else:
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, K, replace=False)
x_train_idx.extend(rand_cls_idx)
y_train.extend([ep_label] * K)
# sample evaluation examples from episode task
ep_test_labels_idx = self.rng.choice(
np.arange(len(episode_labels)), N, replace=True)
y_test = episode_labels[ep_test_labels_idx]
if self.speaker_mode == "difficult": # choose different speakers
|
if self.speaker_mode == "distractor": # all evaluation samples same label
y_test = [query_label] * N
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
x_test_idx = []
for test_label in y_test:
valid_class_indices = self.class_unique_indices[test_label]
if self.speaker_mode == "difficult" or self.speaker_mode == "distractor":
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, 1, replace=False)
x_test_idx.extend(rand_cls_idx)
curr_episode_train = np.asarray(x_train_idx), np.asarray(y_train)
self.curr_episode_train = curr_episode_train
curr_episode_test = np.asarray(x_test_idx), np.asarray(y_test)
self.curr_episode_test = curr_episode_test
return curr_episode_train, curr_episode_test
@property
def _learning_samples(self):
return (
self.data[self.curr_episode_train[0]], self.curr_episode_train[1])
@property
def _evaluation_samples(self):
return (
self.data[self.curr_episode_test[0]], self.curr_episode_test[1])
| train_speakers = self.faudio_metadata[2][x_train_idx]
valid_speaker_indices = np.where(np.invert(np.isin(
self.faudio_metadata[2], train_speakers)))[0] | conditional_block |
flickr_speech.py | """TODO(rpeloff)
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: September 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import numpy as np
from moonshot.data import flickr_audio
from moonshot.experiments import base
from moonshot.utils import file_io
# standard scaling mean and variance computed from training data (background_train.csv)
# global statistics across all dimensions
train_global_mean = {} # pylint: disable=invalid-name
train_global_var = {} # pylint: disable=invalid-name
train_global_mean["mfcc"] = -0.0010950847199735192
train_global_var["mfcc"] = 0.3567110590621045
train_global_mean["fbank"] = 17.509969510358815
train_global_var["fbank"] = 14.452916650749247
# statistics per feature dimension
train_features_mean = {} # pylint: disable=invalid-name
train_features_var = {} # pylint: disable=invalid-name
train_features_mean["mfcc"] = np.array([
+1.32153148e-01, +5.34269911e-02, -2.15415947e-02, -4.94970795e-02,
-1.23090949e-01, +1.40466060e-02, +9.75989624e-02, +1.47936022e-02,
-4.13564958e-02, -2.91437051e-02, -5.80961896e-03, -1.96842286e-02,
-4.55897921e-02, +2.44488166e-03, -3.23168103e-03, -6.59671831e-03,
-2.67478198e-03, -3.82002095e-03, -3.47037479e-03, -3.26529530e-03,
+1.05246510e-03, +1.89005922e-03, -2.15055509e-04, +1.25535136e-03,
-1.09850116e-03, +8.23052806e-05, +1.83033459e-03, +1.21922030e-03,
-1.84041176e-03, +6.59660647e-05, -9.16896159e-04, -1.48896116e-03,
-1.10792069e-03, -6.03055868e-04, +7.30406810e-04, +4.05984679e-04,
+2.76476503e-04, -1.50802754e-05, +7.71538868e-05])
train_features_var["mfcc"] = np.array([
0.91296818, 1.00177075, 1.04598884, 0.97943741, 1.10167072, 0.98497306,
1.03107042, 0.99571673, 0.99097156, 0.98902959, 1.00651163, 1.01512197,
1.01092469, 0.04358867, 0.03717848, 0.04696080, 0.03748896, 0.04700193,
0.04922860, 0.05028348, 0.05473835, 0.05797368, 0.05976523, 0.06351620,
0.06509738, 0.06707076, 0.00604690, 0.00509468, 0.00710361, 0.00539015,
0.00699638, 0.00804983, 0.00827325, 0.00931896, 0.01004667, 0.01042689,
0.01145421, 0.01172219, 0.01229539])
train_features_mean["fbank"] = np.array([
14.94743563, 16.45409437, 17.13997318, 17.21678179, 17.46014044,
17.88050357, 18.14726162, 18.33959425, 18.21709028, 18.09417002,
17.90817997, 17.79310673, 17.69135520, 17.57243688, 17.44124024,
17.42636888, 17.58523693, 17.76074110, 17.89012335, 17.92002578,
17.96491240, 18.03654056, 18.12163615, 18.21561456, 18.19172980,
18.14125008, 18.16387385, 18.19773438, 18.19301181, 18.05073942,
17.81676462, 17.59843241, 17.36506128, 17.01731537, 16.67868699,
16.57665983, 16.61765614, 16.59339746, 16.32898486, 15.64291821])
train_features_var["fbank"] = np.array([
+9.80457338, 11.02937828, 12.10755132, 11.95119139, 12.28248128,
13.20537295, 14.11372048, 15.14463048, 15.78990233, 15.72936093,
15.36991563, 15.08655164, 14.52818211, 14.26114153, 14.22071484,
14.07992226, 13.87976692, 13.91862834, 14.09054846, 13.93680669,
13.58307387, 13.43652145, 13.53714681, 13.73358586, 13.91080542,
13.64155872, 13.51368075, 13.79055016, 13.91272786, 13.51251354,
13.36768644, 13.75936469, 14.12302948, 14.35858512, 14.42953534,
14.69363512, 14.73356627, 14.96371951, 14.97985992, 14.632985])
class FlickrSpeech(base.Experiment):
def __init__(self, features="mfcc", keywords_split="one_shot_evaluation",
embed_dir=None, preprocess_func=None, speaker_mode="baseline",
**kwargs):
|
@property
def data(self):
return self.speech_data
def _sample_episode(self, L, K, N, episode_labels=None):
# sample episode learning task (defined by L-way classes)
if episode_labels is None:
episode_labels = self.rng.choice(self.keywords, L, replace=False)
if self.speaker_mode == "distractor":
# choose a random speaker & label (from valid set) for evaluation
query_speaker = self.rng.choice(
list(self.distractor_speaker_labels.keys()), 1)[0]
episode_labels = self.rng.choice(
self.distractor_speaker_labels[query_speaker], L, replace=False)
query_label = self.rng.choice(episode_labels, 1)[0]
# sample learning examples from episode task
x_train_idx, y_train = [], []
for ep_label in episode_labels:
valid_class_indices = self.class_unique_indices[ep_label]
if self.speaker_mode == "distractor":
if ep_label == query_label:
# choose different speakers
valid_speaker_indices = np.where(np.isin(
self.faudio_metadata[2],
list(self.distractor_speaker_labels.keys())))[0]
valid_speaker_indices = np.intersect1d(
valid_speaker_indices,
np.where(
self.faudio_metadata[2] != query_speaker)[0])
else:
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, K, replace=False)
x_train_idx.extend(rand_cls_idx)
y_train.extend([ep_label] * K)
# sample evaluation examples from episode task
ep_test_labels_idx = self.rng.choice(
np.arange(len(episode_labels)), N, replace=True)
y_test = episode_labels[ep_test_labels_idx]
if self.speaker_mode == "difficult": # choose different speakers
train_speakers = self.faudio_metadata[2][x_train_idx]
valid_speaker_indices = np.where(np.invert(np.isin(
self.faudio_metadata[2], train_speakers)))[0]
if self.speaker_mode == "distractor": # all evaluation samples same label
y_test = [query_label] * N
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
x_test_idx = []
for test_label in y_test:
valid_class_indices = self.class_unique_indices[test_label]
if self.speaker_mode == "difficult" or self.speaker_mode == "distractor":
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, 1, replace=False)
x_test_idx.extend(rand_cls_idx)
curr_episode_train = np.asarray(x_train_idx), np.asarray(y_train)
self.curr_episode_train = curr_episode_train
curr_episode_test = np.asarray(x_test_idx), np.asarray(y_test)
self.curr_episode_test = curr_episode_test
return curr_episode_train, curr_episode_test
@property
def _learning_samples(self):
return (
self.data[self.curr_episode_train[0]], self.curr_episode_train[1])
@property
def _evaluation_samples(self):
return (
self.data[self.curr_episode_test[0]], self.curr_episode_test[1])
| """TODO
`features` one of `["mfcc", "fbank"]`.
`keywords_split` one of `['one_shot_evaluation', 'one_shot_development',
'background_train', 'background_dev', 'background_test']`.
`speaker_mode` options:
"baseline"
- randomly choose learning and evaluation samples in episode labels
"difficult":
- choose learning samples as usual
- choose evaluation samples from speakers not seen during learning
"distractor":
- choose a random speaker for evaluation
- choose a random label for evaluation
- choose learning samples from same speaker as evaluation
- except for the evaluation label, sample from different speaker(s)
"""
super().__init__(**kwargs)
logging.log(logging.INFO, f"Creating Flickr audio experiment")
assert features in ["mfcc", "fbank"]
assert keywords_split in [
"one_shot_evaluation", "one_shot_development", "background_train",
"background_dev", "background_test"]
assert speaker_mode in ["baseline", "difficult", "distractor"]
if keywords_split == "background_test":
subset = "test"
elif keywords_split == "background_dev":
subset = "dev"
else: # rest fall under train subset
subset = "train"
self.speaker_mode = speaker_mode
# load Flickr 8k keywords set
keywords_path = os.path.join(
"data", "splits", "flickr8k", f"{keywords_split}.csv")
keywords_set = file_io.read_csv(keywords_path, skip_first=True)
# load aligned Flickr Audio UIDs and metadata
faudio_path = os.path.join(
"data", "splits", "flickr8k", f"faudio_{keywords_split}.txt")
faudio_uids = file_io.read_csv(faudio_path)[0]
self.faudio_uids = np.asarray(faudio_uids)
self.faudio_metadata = flickr_audio.extract_all_uid_metadata(
self.faudio_uids)
# load audio paths
audio_paths = flickr_audio.fetch_audio_paths(
os.path.join("data", "processed", "flickr_audio", features, subset),
self.faudio_uids)
# load audio embedding paths if specified
if embed_dir is not None:
embed_paths = []
for audio_path in audio_paths:
embed_paths.append(
os.path.join(
embed_dir, "flickr_audio", f"{keywords_split}",
f"{os.path.split(audio_path)[1]}.tfrecord"))
assert os.path.exists(embed_paths[-1])
self.keywords_set = tuple(np.asarray(x) for x in keywords_set)
self.audio_paths = np.asarray(audio_paths)
self.embed_paths = None
if embed_dir is not None:
self.embed_paths = np.asarray(embed_paths)
# get unique keywords and keyword class label lookup dict
self.keywords = sorted(np.unique(self.keywords_set[3]).tolist())
self.keyword_labels = {
keyword: idx for idx, keyword in enumerate(self.keywords)}
# get unique speakers and valid distractor speakers and labels
self.speakers = np.unique(self.faudio_metadata[2])
distractor_speaker_labels = {}
for speaker in self.speakers:
speaker_idx = np.where(
self.faudio_metadata[2] == speaker)[0]
unique_keywords, counts = np.unique(
self.keywords_set[3][speaker_idx], return_counts=True)
speaker_labels = []
for keyword, count in zip(unique_keywords, counts):
if count > 5: # constrain min. training samples per keyword
speaker_labels.append(keyword)
if len(speaker_labels) < 10: # constrain min. keywords per speaker
continue
else:
distractor_speaker_labels[speaker] = speaker_labels
self.distractor_speaker_labels = distractor_speaker_labels
# get lookup for unique indices per class label
self.class_unique_indices = {}
for keyword in self.keywords:
cls_idx = np.where(self.keywords_set[3] == keyword)[0]
self.class_unique_indices[keyword] = cls_idx
# set speech data as raw paths or extracted embedding paths
if self.embed_paths is None:
self.speech_data = self.audio_paths
else:
self.speech_data = self.embed_paths
if preprocess_func is not None:
self.speech_data = preprocess_func(self.speech_data) | identifier_body |
flickr_speech.py | """TODO(rpeloff)
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: September 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import numpy as np
from moonshot.data import flickr_audio
from moonshot.experiments import base
from moonshot.utils import file_io
# standard scaling mean and variance computed from training data (background_train.csv)
# global statistics across all dimensions
train_global_mean = {} # pylint: disable=invalid-name
train_global_var = {} # pylint: disable=invalid-name
train_global_mean["mfcc"] = -0.0010950847199735192 | train_global_mean["fbank"] = 17.509969510358815
train_global_var["fbank"] = 14.452916650749247
# statistics per feature dimension
train_features_mean = {} # pylint: disable=invalid-name
train_features_var = {} # pylint: disable=invalid-name
train_features_mean["mfcc"] = np.array([
+1.32153148e-01, +5.34269911e-02, -2.15415947e-02, -4.94970795e-02,
-1.23090949e-01, +1.40466060e-02, +9.75989624e-02, +1.47936022e-02,
-4.13564958e-02, -2.91437051e-02, -5.80961896e-03, -1.96842286e-02,
-4.55897921e-02, +2.44488166e-03, -3.23168103e-03, -6.59671831e-03,
-2.67478198e-03, -3.82002095e-03, -3.47037479e-03, -3.26529530e-03,
+1.05246510e-03, +1.89005922e-03, -2.15055509e-04, +1.25535136e-03,
-1.09850116e-03, +8.23052806e-05, +1.83033459e-03, +1.21922030e-03,
-1.84041176e-03, +6.59660647e-05, -9.16896159e-04, -1.48896116e-03,
-1.10792069e-03, -6.03055868e-04, +7.30406810e-04, +4.05984679e-04,
+2.76476503e-04, -1.50802754e-05, +7.71538868e-05])
train_features_var["mfcc"] = np.array([
0.91296818, 1.00177075, 1.04598884, 0.97943741, 1.10167072, 0.98497306,
1.03107042, 0.99571673, 0.99097156, 0.98902959, 1.00651163, 1.01512197,
1.01092469, 0.04358867, 0.03717848, 0.04696080, 0.03748896, 0.04700193,
0.04922860, 0.05028348, 0.05473835, 0.05797368, 0.05976523, 0.06351620,
0.06509738, 0.06707076, 0.00604690, 0.00509468, 0.00710361, 0.00539015,
0.00699638, 0.00804983, 0.00827325, 0.00931896, 0.01004667, 0.01042689,
0.01145421, 0.01172219, 0.01229539])
train_features_mean["fbank"] = np.array([
14.94743563, 16.45409437, 17.13997318, 17.21678179, 17.46014044,
17.88050357, 18.14726162, 18.33959425, 18.21709028, 18.09417002,
17.90817997, 17.79310673, 17.69135520, 17.57243688, 17.44124024,
17.42636888, 17.58523693, 17.76074110, 17.89012335, 17.92002578,
17.96491240, 18.03654056, 18.12163615, 18.21561456, 18.19172980,
18.14125008, 18.16387385, 18.19773438, 18.19301181, 18.05073942,
17.81676462, 17.59843241, 17.36506128, 17.01731537, 16.67868699,
16.57665983, 16.61765614, 16.59339746, 16.32898486, 15.64291821])
train_features_var["fbank"] = np.array([
+9.80457338, 11.02937828, 12.10755132, 11.95119139, 12.28248128,
13.20537295, 14.11372048, 15.14463048, 15.78990233, 15.72936093,
15.36991563, 15.08655164, 14.52818211, 14.26114153, 14.22071484,
14.07992226, 13.87976692, 13.91862834, 14.09054846, 13.93680669,
13.58307387, 13.43652145, 13.53714681, 13.73358586, 13.91080542,
13.64155872, 13.51368075, 13.79055016, 13.91272786, 13.51251354,
13.36768644, 13.75936469, 14.12302948, 14.35858512, 14.42953534,
14.69363512, 14.73356627, 14.96371951, 14.97985992, 14.632985])
class FlickrSpeech(base.Experiment):
def __init__(self, features="mfcc", keywords_split="one_shot_evaluation",
embed_dir=None, preprocess_func=None, speaker_mode="baseline",
**kwargs):
"""TODO
`features` one of `["mfcc", "fbank"]`.
`keywords_split` one of `['one_shot_evaluation', 'one_shot_development',
'background_train', 'background_dev', 'background_test']`.
`speaker_mode` options:
"baseline"
- randomly choose learning and evaluation samples in episode labels
"difficult":
- choose learning samples as usual
- choose evaluation samples from speakers not seen during learning
"distractor":
- choose a random speaker for evaluation
- choose a random label for evaluation
- choose learning samples from same speaker as evaluation
- except for the evaluation label, sample from different speaker(s)
"""
super().__init__(**kwargs)
logging.log(logging.INFO, f"Creating Flickr audio experiment")
assert features in ["mfcc", "fbank"]
assert keywords_split in [
"one_shot_evaluation", "one_shot_development", "background_train",
"background_dev", "background_test"]
assert speaker_mode in ["baseline", "difficult", "distractor"]
if keywords_split == "background_test":
subset = "test"
elif keywords_split == "background_dev":
subset = "dev"
else: # rest fall under train subset
subset = "train"
self.speaker_mode = speaker_mode
# load Flickr 8k keywords set
keywords_path = os.path.join(
"data", "splits", "flickr8k", f"{keywords_split}.csv")
keywords_set = file_io.read_csv(keywords_path, skip_first=True)
# load aligned Flickr Audio UIDs and metadata
faudio_path = os.path.join(
"data", "splits", "flickr8k", f"faudio_{keywords_split}.txt")
faudio_uids = file_io.read_csv(faudio_path)[0]
self.faudio_uids = np.asarray(faudio_uids)
self.faudio_metadata = flickr_audio.extract_all_uid_metadata(
self.faudio_uids)
# load audio paths
audio_paths = flickr_audio.fetch_audio_paths(
os.path.join("data", "processed", "flickr_audio", features, subset),
self.faudio_uids)
# load audio embedding paths if specified
if embed_dir is not None:
embed_paths = []
for audio_path in audio_paths:
embed_paths.append(
os.path.join(
embed_dir, "flickr_audio", f"{keywords_split}",
f"{os.path.split(audio_path)[1]}.tfrecord"))
assert os.path.exists(embed_paths[-1])
self.keywords_set = tuple(np.asarray(x) for x in keywords_set)
self.audio_paths = np.asarray(audio_paths)
self.embed_paths = None
if embed_dir is not None:
self.embed_paths = np.asarray(embed_paths)
# get unique keywords and keyword class label lookup dict
self.keywords = sorted(np.unique(self.keywords_set[3]).tolist())
self.keyword_labels = {
keyword: idx for idx, keyword in enumerate(self.keywords)}
# get unique speakers and valid distractor speakers and labels
self.speakers = np.unique(self.faudio_metadata[2])
distractor_speaker_labels = {}
for speaker in self.speakers:
speaker_idx = np.where(
self.faudio_metadata[2] == speaker)[0]
unique_keywords, counts = np.unique(
self.keywords_set[3][speaker_idx], return_counts=True)
speaker_labels = []
for keyword, count in zip(unique_keywords, counts):
if count > 5: # constrain min. training samples per keyword
speaker_labels.append(keyword)
if len(speaker_labels) < 10: # constrain min. keywords per speaker
continue
else:
distractor_speaker_labels[speaker] = speaker_labels
self.distractor_speaker_labels = distractor_speaker_labels
# get lookup for unique indices per class label
self.class_unique_indices = {}
for keyword in self.keywords:
cls_idx = np.where(self.keywords_set[3] == keyword)[0]
self.class_unique_indices[keyword] = cls_idx
# set speech data as raw paths or extracted embedding paths
if self.embed_paths is None:
self.speech_data = self.audio_paths
else:
self.speech_data = self.embed_paths
if preprocess_func is not None:
self.speech_data = preprocess_func(self.speech_data)
@property
def data(self):
return self.speech_data
def _sample_episode(self, L, K, N, episode_labels=None):
# sample episode learning task (defined by L-way classes)
if episode_labels is None:
episode_labels = self.rng.choice(self.keywords, L, replace=False)
if self.speaker_mode == "distractor":
# choose a random speaker & label (from valid set) for evaluation
query_speaker = self.rng.choice(
list(self.distractor_speaker_labels.keys()), 1)[0]
episode_labels = self.rng.choice(
self.distractor_speaker_labels[query_speaker], L, replace=False)
query_label = self.rng.choice(episode_labels, 1)[0]
# sample learning examples from episode task
x_train_idx, y_train = [], []
for ep_label in episode_labels:
valid_class_indices = self.class_unique_indices[ep_label]
if self.speaker_mode == "distractor":
if ep_label == query_label:
# choose different speakers
valid_speaker_indices = np.where(np.isin(
self.faudio_metadata[2],
list(self.distractor_speaker_labels.keys())))[0]
valid_speaker_indices = np.intersect1d(
valid_speaker_indices,
np.where(
self.faudio_metadata[2] != query_speaker)[0])
else:
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, K, replace=False)
x_train_idx.extend(rand_cls_idx)
y_train.extend([ep_label] * K)
# sample evaluation examples from episode task
ep_test_labels_idx = self.rng.choice(
np.arange(len(episode_labels)), N, replace=True)
y_test = episode_labels[ep_test_labels_idx]
if self.speaker_mode == "difficult": # choose different speakers
train_speakers = self.faudio_metadata[2][x_train_idx]
valid_speaker_indices = np.where(np.invert(np.isin(
self.faudio_metadata[2], train_speakers)))[0]
if self.speaker_mode == "distractor": # all evaluation samples same label
y_test = [query_label] * N
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
x_test_idx = []
for test_label in y_test:
valid_class_indices = self.class_unique_indices[test_label]
if self.speaker_mode == "difficult" or self.speaker_mode == "distractor":
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, 1, replace=False)
x_test_idx.extend(rand_cls_idx)
curr_episode_train = np.asarray(x_train_idx), np.asarray(y_train)
self.curr_episode_train = curr_episode_train
curr_episode_test = np.asarray(x_test_idx), np.asarray(y_test)
self.curr_episode_test = curr_episode_test
return curr_episode_train, curr_episode_test
@property
def _learning_samples(self):
return (
self.data[self.curr_episode_train[0]], self.curr_episode_train[1])
@property
def _evaluation_samples(self):
return (
self.data[self.curr_episode_test[0]], self.curr_episode_test[1]) | train_global_var["mfcc"] = 0.3567110590621045 | random_line_split |
flickr_speech.py | """TODO(rpeloff)
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: September 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import numpy as np
from moonshot.data import flickr_audio
from moonshot.experiments import base
from moonshot.utils import file_io
# standard scaling mean and variance computed from training data (background_train.csv)
# global statistics across all dimensions
train_global_mean = {} # pylint: disable=invalid-name
train_global_var = {} # pylint: disable=invalid-name
train_global_mean["mfcc"] = -0.0010950847199735192
train_global_var["mfcc"] = 0.3567110590621045
train_global_mean["fbank"] = 17.509969510358815
train_global_var["fbank"] = 14.452916650749247
# statistics per feature dimension
train_features_mean = {} # pylint: disable=invalid-name
train_features_var = {} # pylint: disable=invalid-name
train_features_mean["mfcc"] = np.array([
+1.32153148e-01, +5.34269911e-02, -2.15415947e-02, -4.94970795e-02,
-1.23090949e-01, +1.40466060e-02, +9.75989624e-02, +1.47936022e-02,
-4.13564958e-02, -2.91437051e-02, -5.80961896e-03, -1.96842286e-02,
-4.55897921e-02, +2.44488166e-03, -3.23168103e-03, -6.59671831e-03,
-2.67478198e-03, -3.82002095e-03, -3.47037479e-03, -3.26529530e-03,
+1.05246510e-03, +1.89005922e-03, -2.15055509e-04, +1.25535136e-03,
-1.09850116e-03, +8.23052806e-05, +1.83033459e-03, +1.21922030e-03,
-1.84041176e-03, +6.59660647e-05, -9.16896159e-04, -1.48896116e-03,
-1.10792069e-03, -6.03055868e-04, +7.30406810e-04, +4.05984679e-04,
+2.76476503e-04, -1.50802754e-05, +7.71538868e-05])
train_features_var["mfcc"] = np.array([
0.91296818, 1.00177075, 1.04598884, 0.97943741, 1.10167072, 0.98497306,
1.03107042, 0.99571673, 0.99097156, 0.98902959, 1.00651163, 1.01512197,
1.01092469, 0.04358867, 0.03717848, 0.04696080, 0.03748896, 0.04700193,
0.04922860, 0.05028348, 0.05473835, 0.05797368, 0.05976523, 0.06351620,
0.06509738, 0.06707076, 0.00604690, 0.00509468, 0.00710361, 0.00539015,
0.00699638, 0.00804983, 0.00827325, 0.00931896, 0.01004667, 0.01042689,
0.01145421, 0.01172219, 0.01229539])
train_features_mean["fbank"] = np.array([
14.94743563, 16.45409437, 17.13997318, 17.21678179, 17.46014044,
17.88050357, 18.14726162, 18.33959425, 18.21709028, 18.09417002,
17.90817997, 17.79310673, 17.69135520, 17.57243688, 17.44124024,
17.42636888, 17.58523693, 17.76074110, 17.89012335, 17.92002578,
17.96491240, 18.03654056, 18.12163615, 18.21561456, 18.19172980,
18.14125008, 18.16387385, 18.19773438, 18.19301181, 18.05073942,
17.81676462, 17.59843241, 17.36506128, 17.01731537, 16.67868699,
16.57665983, 16.61765614, 16.59339746, 16.32898486, 15.64291821])
train_features_var["fbank"] = np.array([
+9.80457338, 11.02937828, 12.10755132, 11.95119139, 12.28248128,
13.20537295, 14.11372048, 15.14463048, 15.78990233, 15.72936093,
15.36991563, 15.08655164, 14.52818211, 14.26114153, 14.22071484,
14.07992226, 13.87976692, 13.91862834, 14.09054846, 13.93680669,
13.58307387, 13.43652145, 13.53714681, 13.73358586, 13.91080542,
13.64155872, 13.51368075, 13.79055016, 13.91272786, 13.51251354,
13.36768644, 13.75936469, 14.12302948, 14.35858512, 14.42953534,
14.69363512, 14.73356627, 14.96371951, 14.97985992, 14.632985])
class | (base.Experiment):
def __init__(self, features="mfcc", keywords_split="one_shot_evaluation",
embed_dir=None, preprocess_func=None, speaker_mode="baseline",
**kwargs):
"""TODO
`features` one of `["mfcc", "fbank"]`.
`keywords_split` one of `['one_shot_evaluation', 'one_shot_development',
'background_train', 'background_dev', 'background_test']`.
`speaker_mode` options:
"baseline"
- randomly choose learning and evaluation samples in episode labels
"difficult":
- choose learning samples as usual
- choose evaluation samples from speakers not seen during learning
"distractor":
- choose a random speaker for evaluation
- choose a random label for evaluation
- choose learning samples from same speaker as evaluation
- except for the evaluation label, sample from different speaker(s)
"""
super().__init__(**kwargs)
logging.log(logging.INFO, f"Creating Flickr audio experiment")
assert features in ["mfcc", "fbank"]
assert keywords_split in [
"one_shot_evaluation", "one_shot_development", "background_train",
"background_dev", "background_test"]
assert speaker_mode in ["baseline", "difficult", "distractor"]
if keywords_split == "background_test":
subset = "test"
elif keywords_split == "background_dev":
subset = "dev"
else: # rest fall under train subset
subset = "train"
self.speaker_mode = speaker_mode
# load Flickr 8k keywords set
keywords_path = os.path.join(
"data", "splits", "flickr8k", f"{keywords_split}.csv")
keywords_set = file_io.read_csv(keywords_path, skip_first=True)
# load aligned Flickr Audio UIDs and metadata
faudio_path = os.path.join(
"data", "splits", "flickr8k", f"faudio_{keywords_split}.txt")
faudio_uids = file_io.read_csv(faudio_path)[0]
self.faudio_uids = np.asarray(faudio_uids)
self.faudio_metadata = flickr_audio.extract_all_uid_metadata(
self.faudio_uids)
# load audio paths
audio_paths = flickr_audio.fetch_audio_paths(
os.path.join("data", "processed", "flickr_audio", features, subset),
self.faudio_uids)
# load audio embedding paths if specified
if embed_dir is not None:
embed_paths = []
for audio_path in audio_paths:
embed_paths.append(
os.path.join(
embed_dir, "flickr_audio", f"{keywords_split}",
f"{os.path.split(audio_path)[1]}.tfrecord"))
assert os.path.exists(embed_paths[-1])
self.keywords_set = tuple(np.asarray(x) for x in keywords_set)
self.audio_paths = np.asarray(audio_paths)
self.embed_paths = None
if embed_dir is not None:
self.embed_paths = np.asarray(embed_paths)
# get unique keywords and keyword class label lookup dict
self.keywords = sorted(np.unique(self.keywords_set[3]).tolist())
self.keyword_labels = {
keyword: idx for idx, keyword in enumerate(self.keywords)}
# get unique speakers and valid distractor speakers and labels
self.speakers = np.unique(self.faudio_metadata[2])
distractor_speaker_labels = {}
for speaker in self.speakers:
speaker_idx = np.where(
self.faudio_metadata[2] == speaker)[0]
unique_keywords, counts = np.unique(
self.keywords_set[3][speaker_idx], return_counts=True)
speaker_labels = []
for keyword, count in zip(unique_keywords, counts):
if count > 5: # constrain min. training samples per keyword
speaker_labels.append(keyword)
if len(speaker_labels) < 10: # constrain min. keywords per speaker
continue
else:
distractor_speaker_labels[speaker] = speaker_labels
self.distractor_speaker_labels = distractor_speaker_labels
# get lookup for unique indices per class label
self.class_unique_indices = {}
for keyword in self.keywords:
cls_idx = np.where(self.keywords_set[3] == keyword)[0]
self.class_unique_indices[keyword] = cls_idx
# set speech data as raw paths or extracted embedding paths
if self.embed_paths is None:
self.speech_data = self.audio_paths
else:
self.speech_data = self.embed_paths
if preprocess_func is not None:
self.speech_data = preprocess_func(self.speech_data)
@property
def data(self):
return self.speech_data
def _sample_episode(self, L, K, N, episode_labels=None):
# sample episode learning task (defined by L-way classes)
if episode_labels is None:
episode_labels = self.rng.choice(self.keywords, L, replace=False)
if self.speaker_mode == "distractor":
# choose a random speaker & label (from valid set) for evaluation
query_speaker = self.rng.choice(
list(self.distractor_speaker_labels.keys()), 1)[0]
episode_labels = self.rng.choice(
self.distractor_speaker_labels[query_speaker], L, replace=False)
query_label = self.rng.choice(episode_labels, 1)[0]
# sample learning examples from episode task
x_train_idx, y_train = [], []
for ep_label in episode_labels:
valid_class_indices = self.class_unique_indices[ep_label]
if self.speaker_mode == "distractor":
if ep_label == query_label:
# choose different speakers
valid_speaker_indices = np.where(np.isin(
self.faudio_metadata[2],
list(self.distractor_speaker_labels.keys())))[0]
valid_speaker_indices = np.intersect1d(
valid_speaker_indices,
np.where(
self.faudio_metadata[2] != query_speaker)[0])
else:
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, K, replace=False)
x_train_idx.extend(rand_cls_idx)
y_train.extend([ep_label] * K)
# sample evaluation examples from episode task
ep_test_labels_idx = self.rng.choice(
np.arange(len(episode_labels)), N, replace=True)
y_test = episode_labels[ep_test_labels_idx]
if self.speaker_mode == "difficult": # choose different speakers
train_speakers = self.faudio_metadata[2][x_train_idx]
valid_speaker_indices = np.where(np.invert(np.isin(
self.faudio_metadata[2], train_speakers)))[0]
if self.speaker_mode == "distractor": # all evaluation samples same label
y_test = [query_label] * N
# choose same speaker
valid_speaker_indices = np.where(
self.faudio_metadata[2] == query_speaker)[0]
x_test_idx = []
for test_label in y_test:
valid_class_indices = self.class_unique_indices[test_label]
if self.speaker_mode == "difficult" or self.speaker_mode == "distractor":
valid_class_indices = np.intersect1d(
valid_class_indices, valid_speaker_indices)
rand_cls_idx = self.rng.choice(
valid_class_indices, 1, replace=False)
x_test_idx.extend(rand_cls_idx)
curr_episode_train = np.asarray(x_train_idx), np.asarray(y_train)
self.curr_episode_train = curr_episode_train
curr_episode_test = np.asarray(x_test_idx), np.asarray(y_test)
self.curr_episode_test = curr_episode_test
return curr_episode_train, curr_episode_test
@property
def _learning_samples(self):
return (
self.data[self.curr_episode_train[0]], self.curr_episode_train[1])
@property
def _evaluation_samples(self):
return (
self.data[self.curr_episode_test[0]], self.curr_episode_test[1])
| FlickrSpeech | identifier_name |
UserController.go | package HomeControllers
import (
"fmt"
"strings"
"time"
"os"
"github.com/TruthHun/DocHub/helper"
"github.com/TruthHun/DocHub/helper/conv"
"github.com/TruthHun/DocHub/models"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/validation"
)
type UserController struct {
BaseController
}
func (this *UserController) Prepare() {
this.BaseController.Prepare()
this.Xsrf()
}
//会员中心
func (this *UserController) Get() {
uid, _ := this.GetInt(":uid")
path := this.GetString(":splat")
params := conv.Path2Map(path)
//排序
sort := "new"
if param, ok := params["sort"]; ok {
sort = param
}
//页码
p := 1
if page, ok := params["p"]; ok {
p = helper.Interface2Int(page)
if p < 1 {
p = 1
}
}
switch sort {
case "dcnt":
sort = "dcnt"
case "score":
sort = "score"
case "vcnt":
sort = "vcnt"
case "ccnt":
sort = "ccnt"
default:
sort = "new"
}
//显示风格
style := "list"
if s, ok := params["style"]; ok {
style = s
}
if style != "th" {
style = "list"
}
//cid:collect folder id ,收藏夹id
cid := 0
if s, ok := params["cid"]; ok {
cid = helper.Interface2Int(s)
}
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
this.Data["Uid"] = uid
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
if cid > 0 {
sql := fmt.Sprintf("select Title,Cnt from %v where Id=? limit 1", models.GetTableCollectFolder())
var params []orm.Params
orm.NewOrm().Raw(sql, cid).Values(¶ms)
if len(params) == 0 {
this.Redirect(fmt.Sprintf("/user/%v/collect", uid), 302)
return
}
this.Data["Folder"] = params[0]
fields := "di.Id,di.`Uid`, di.`Cid`, di.`TimeCreate`, di.`Dcnt`, di.`Vcnt`, di.`Ccnt`, di.`Score`, di.`Status`, di.`ChanelId`, di.`Pid`,c.Title Category,u.Username,d.Title,ds.`Md5`, ds.`Ext`, ds.`ExtCate`, ds.`ExtNum`, ds.`Page`, ds.`Size`"
sqlFormat := `
select %v from %v di left join %v u on di.Uid=u.Id
left join %v clt on clt.Did=di.Id
left join %v d on d.Id=di.Id
left join %v c on c.Id=di.cid
left join %v ds on ds.Id=di.DsId
where %v order by %v limit %v,%v
`
sql = fmt.Sprintf(sqlFormat,
fields,
models.GetTableDocumentInfo(),
models.GetTableUser(),
models.GetTableCollect(),
models.GetTableDocument(),
models.GetTableCategory(),
models.GetTableDocumentStore(),
fmt.Sprintf("clt.Cid=%v", cid),
"clt.Id desc",
(p-1)*listRows, listRows,
)
var data []orm.Params
orm.NewOrm().Raw(sql).Values(&data)
this.Data["Lists"] = data
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(params[0]["Cnt"]), listRows, p, fmt.Sprintf("/user/%v/doc/cid/%v", user["Id"], cid), "sort", sort, "style", style)
} else {
this.Data["Lists"], _, _ = models.GetDocList(uid, 0, 0, 0, p, listRows, sort, 1)
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(user["Document"]), listRows, p, fmt.Sprintf("/user/%v/doc", user["Id"]), "sort", sort, "style", style)
}
this.Data["Tab"] = "doc"
this.Data["Cid"] = cid
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Sort"] = sort
this.Data["Style"] = style
this.Data["P"] = p
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Doc", "文档列表-会员中心-"+user["Username"].(string), "会员中心,文档列表,"+user["Username"].(string), "文档列表-会员中心-"+user["Username"].(string), this.Sys.Site)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "index.html"
}
//金币记录
func (this *UserController) Coin() {
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
lists, _, _ := models.GetList(models.GetTableCoinLog(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 当页码大于0,则以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["Tab"] = "coin"
this.Data["IsUser"] = true
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Coin", "财富记录—会员中心-"+user["Username"].(string), "会员中心,财富记录,"+user["Username"].(string), "财富记录—会员中心-"+user["Username"].(string), this.Sys.Site)
this.TplName = "coin.html"
}
// 收藏夹
func (this *UserController) Collect() {
this.Data["Tab"] = "collect"
action := this.GetString("action")
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 100
lists, _, _ := models.GetList(models.GetTableCollectFolder(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 页码大于1,以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Uid"] = uid
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "collect.html"
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Folder", "收藏夹—会员中心-"+user["Username"].(string), "会员中心,收藏夹,"+user["Username"].(string), "收藏夹—会员中心-"+user["Username"].(string), this.Sys.Site)
if action == "edit" {
this.Data["Edit"] = true
} else {
this.Data["Edit"] = false
}
}
//用户登录
func (this *UserController) Login() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
// GET 请求
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员登录", "会员登录", "会员登录", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "login.html"
return
}
type Post struct {
Email, Password string
}
var post struct {
Email, Password string
}
this.ParseForm(&post)
valid := validation.Validation{}
res := valid.Email(post.Email, "Email")
if !res.Ok {
this.ResponseJson(false, "登录失败,邮箱格式不正确")
}
ModelUser := models.NewUser()
users, rows, err := ModelUser.UserList(1, 1, "", "", "u.`email`=? and u.`password`=?", post.Email, helper.MyMD5(post.Password))
if rows == 0 || err != nil {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "登录失败,邮箱或密码不正确")
}
user := users[0]
this.IsLogin = helper.Interface2Int(user["Id"])
if this.IsLogin > 0 {
//查询用户有没有被封禁
if info := ModelUser.UserInfo(this.IsLogin); info.Status == false { //被封禁了
this.ResponseJson(false, "登录失败,您的账号已被管理员禁用")
}
this.BaseController.SetCookieLogin(this.IsLogin)
this.ResponseJson(true, "登录成功")
}
this.ResponseJson(false, "登录失败,未知错误!")
}
//用户退出登录
func (this *UserController) Logout() {
this.ResetCookie()
if v, ok := this.Ctx.Request.Header["X-Requested-With"]; ok && v[0] == "XMLHttpRequest" {
this.ResponseJson(true, "退出登录成功")
}
this.Redirect("/", 302)
}
//会员注册[GET/POST]
func (this *UserController) Reg() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["IsUser"] = true
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员注册", "会员注册", "会员注册", this.Sys.Site)
this.Data["PageId"] = "wenku-reg"
if this.Sys.IsCloseReg {
this.TplName = "regclose.html"
} else {
this.TplName = "reg.html"
}
return
}
if this.Sys.IsCloseReg {
this.ResponseJson(false, "注册失败,站点已关闭注册功能")
}
//先验证邮箱验证码是否正确
email := this.GetString("email")
code := this.GetString("code")
sessEmail := fmt.Sprintf("%v", this.GetSession("RegMail"))
sessCode := fmt.Sprintf("%v", this.GetSession("RegCode"))
if sessEmail != email || sessCode != code {
this.ResponseJson(false, "邮箱验证码不正确,请重新输入或重新获取")
}
// 注册
err, uid := models.NewUser().Reg(
email,
this.GetString("username"),
this.GetString("password"),
this.GetString("repassword"),
this.GetString("intro"),
)
if err != nil || uid == 0 {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "注册失败")
}
models.Regulate(models.GetTableSys(), "CntUser", 1, "Id=1") //站点用户数量增加
this.IsLogin = uid | }
// 发送邮件
func (this *UserController) SendMail() {
if len(this.Ctx.GetCookie(beego.AppConfig.String("SessionName"))) == 0 {
this.Redirect("/", 302)
return
}
//发送邮件的类型:注册(reg)和找回密码(findpwd)
t := this.GetString("type")
if t != "reg" && t != "findpwd" {
this.ResponseJson(false, "邮件发送类型不正确")
}
valid := validation.Validation{}
email := this.GetString("email")
res := valid.Email(email, "mail")
if res.Error != nil || !res.Ok {
this.ResponseJson(false, "邮箱格式不正确")
}
//检测邮箱是否已被注册
ModelUser := models.NewUser()
user := ModelUser.GetUserField(orm.NewCondition().And("email", email))
//注册邮件
if t == "reg" {
if user.Id > 0 {
this.ResponseJson(false, "该邮箱已经被注册会员")
}
code := helper.RandStr(6, 0)
fmt.Print(code)
//发送验证是否成功注释掉了
err := models.SendMail(email, fmt.Sprintf("%v会员注册验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailReg, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("RegMail", email)
this.SetSession("RegCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
// 找回密码
if user.Id == 0 {
this.ResponseJson(false, "邮箱不存在")
}
code := helper.RandStr(6, 0)
err := models.SendMail(email, fmt.Sprintf("%v找回密码验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailFindPwd, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("FindPwdMail", email)
this.SetSession("FindPwdCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
//会员签到,增加金币
func (this *UserController) Sign() {
if this.IsLogin == 0 {
this.ResponseJson(false, "签到失败,请先登录")
}
var data = models.Sign{
Uid: this.IsLogin,
Date: time.Now().Format("20060102"),
}
_, err := orm.NewOrm().Insert(&data)
if err != nil {
this.ResponseJson(false, "签到失败,您今天已签到")
}
if err = models.Regulate(models.GetTableUserInfo(), "Coin", this.Sys.Sign, fmt.Sprintf("Id=%v", this.IsLogin)); err == nil {
log := models.CoinLog{
Uid: this.IsLogin,
Coin: this.Sys.Sign,
Log: fmt.Sprintf("于%v签到成功,增加 %v 个金币", time.Now().Format("2006-01-02 15:04:05"), this.Sys.Sign),
}
models.NewCoinLog().LogRecord(log)
}
this.ResponseJson(true, fmt.Sprintf("恭喜您,今日签到成功,领取了 %v 个金币", this.Sys.Sign))
}
// 检测用户是否已登录
func (this *UserController) CheckLogin() {
if this.BaseController.IsLogin > 0 {
this.ResponseJson(true, "已登录")
}
this.ResponseJson(false, "您当前处于未登录状态,请先登录")
}
// 创建收藏夹
func (this *UserController) CreateCollectFolder() {
if this.IsLogin == 0 {
this.ResponseJson(false, "您当前未登录,请先登录")
}
cover := ""
timestamp := int(time.Now().Unix())
//文件在文档库中未存在,则接收文件并做处理
f, fh, err := this.GetFile("Cover")
if err == nil {
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := slice[len(slice)-1]
dir := fmt.Sprintf("./uploads/%v/%v/", time.Now().Format("2006-01-02"), this.IsLogin)
os.MkdirAll(dir, 0777)
file := helper.MyMD5(fmt.Sprintf("%v-%v-%v", timestamp, this.IsLogin, fh.Filename)) + "." + ext
err = this.SaveToFile("Cover", dir+file)
if err == nil {
//将图片移动到OSS
err = models.NewOss().MoveToOss(dir+file, file, true, true)
helper.Logger.Debug(dir + file)
if err != nil {
helper.Logger.Error(err.Error())
}
cover = file
}
}
// 收藏夹
folder := models.CollectFolder{
Uid: this.IsLogin,
Title: this.GetString("Title"),
Description: this.GetString("Description"),
TimeCreate: int(time.Now().Unix()),
Cnt: 0,
Cover: cover,
}
// 收藏夹 Id 大于0,则表示编辑收藏夹
folder.Id, _ = this.GetInt("Id")
if folder.Id > 0 { // 编辑收藏夹
cols := []string{"Title", "Description"}
if len(cover) > 0 {
cols = append(cols, "Cover")
}
if _, err = orm.NewOrm().Update(&folder, cols...); err == nil {
this.ResponseJson(true, "收藏夹编辑成功")
}
} else { // 创建收藏夹
if _, err = orm.NewOrm().Insert(&folder); err == nil { //收藏夹数量+1
models.Regulate(models.GetTableUserInfo(), "Collect", 1, "Id=?", this.IsLogin)
this.ResponseJson(true, "收藏夹创建成功")
}
}
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "操作失败,请重试")
}
this.ResponseJson(true, "操作成功")
}
// 找回密码
func (this *UserController) FindPwd() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Findpwd", "找回密码", "找回密码", "找回密码", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "findpwd.html"
return
}
rules := map[string][]string{
"username": {"required", "mincount:2", "maxcount:16"},
"email": {"required", "email"},
"code": {"required", "len:6"},
"password": {"required", "mincount:6"},
"repassword": {"required", "mincount:6"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
if _, ok := errs["username"]; ok {
this.ResponseJson(false, "用户名限2-16个字符")
}
if _, ok := errs["email"]; ok {
this.ResponseJson(false, "邮箱格式不正确")
}
if _, ok := errs["code"]; ok {
this.ResponseJson(false, "请输入6位验证码")
}
if _, ok := errs["password"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
if _, ok := errs["repassword"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
}
//校验验证码和邮箱是否匹配
if fmt.Sprintf("%v", this.GetSession("FindPwdMail")) != params["email"].(string) || fmt.Sprintf("%v", this.GetSession("FindPwdCode")) != params["code"].(string) {
this.ResponseJson(false, "验证码不正确,修改密码失败")
}
pwd := helper.MyMD5(params["password"].(string))
repwd := helper.MyMD5(params["repassword"].(string))
if pwd != repwd {
this.ResponseJson(false, "确认密码和密码不一致")
}
user := models.NewUser().GetUserField(orm.NewCondition().And("Email", params["email"]))
if user.Id == 0 || user.Username != params["username"].(string) {
this.ResponseJson(false, "重置密码失败,用户名与邮箱不匹配")
}
_, err := models.UpdateByIds("user", "Password", pwd, user.Id)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "重置密码失败,请刷新页面重试")
}
this.DelSession("FindPwdMail")
this.DelSession("FindPwdCode")
this.ResponseJson(true, "重置密码成功,请重新登录")
}
//删除文档
func (this *UserController) DocDel() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
docid, _ := this.GetInt(":doc")
if docid == 0 {
this.ResponseJson(false, "删除失败,文档不存在")
}
errs := models.NewDocumentRecycle().RemoveToRecycle(this.IsLogin, true, docid)
if len(errs) > 0 {
helper.Logger.Error("删除失败:%v", strings.Join(errs, "; "))
this.ResponseJson(false, "删除失败,文档不存在")
}
this.ResponseJson(true, "删除成功")
}
//文档编辑
func (this *UserController) DocEdit() {
if this.IsLogin == 0 {
this.Redirect("/user", 302)
}
docId, _ := this.GetInt(":doc")
if docId == 0 {
this.Redirect("/user", 302)
}
info := models.DocumentInfo{Id: docId}
err := orm.NewOrm().Read(&info)
if err != nil {
helper.Logger.Error(err.Error())
this.Redirect("/user", 302)
}
if info.Uid != this.IsLogin { // 文档所属用户id与登录的用户id不一致
this.Redirect("/user", 302)
}
doc := models.Document{Id: docId}
// POST
if this.Ctx.Request.Method == "POST" {
ruels := map[string][]string{
"Title": {"required", "unempty"},
"Chanel": {"required", "gt:0", "int"},
"Pid": {"required", "gt:0", "int"},
"Cid": {"required", "gt:0", "int"},
"Tags": {"required"},
"Intro": {"required"},
"Price": {"required", "int"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, ruels)
if len(errs) > 0 {
this.ResponseJson(false, "参数错误")
}
doc.Title = params["Title"].(string)
doc.Keywords = params["Tags"].(string)
doc.Description = params["Intro"].(string)
info.Pid = params["Pid"].(int)
info.Cid = params["Cid"].(int)
info.ChanelId = params["Chanel"].(int)
info.Price = params["Price"].(int)
info.TimeUpdate = int(time.Now().Unix())
orm.NewOrm().Update(&doc, "Title", "Keywords", "Description")
orm.NewOrm().Update(&info, "Pid", "Cid", "ChanelId", "Price")
//原分类-1
models.Regulate(models.GetTableCategory(), "Cnt", -1, fmt.Sprintf("Id in(%v,%v,%v)", info.ChanelId, info.Cid, info.Pid))
//新分类+1
models.Regulate(models.GetTableCategory(), "Cnt", 1, fmt.Sprintf("Id in(%v,%v,%v)", params["Chanel"], params["Cid"], params["Pid"]))
this.ResponseJson(true, "文档编辑成功")
}
// GET
err = orm.NewOrm().Read(&doc)
if err != nil {
helper.Logger.Error(err.Error())
this.Redirect("/user", 302)
}
cond := orm.NewCondition().And("status", 1)
data, _, _ := models.GetList(models.GetTableCategory(), 1, 2000, cond, "sort")
this.Data["User"], _, _ = models.NewUser().GetById(this.IsLogin)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
this.Data["IsUser"] = true
this.Data["Cates"], _ = conv.InterfaceToJson(data)
this.Data["json"] = data
this.Data["PageId"] = "wenku-user"
this.Data["Info"] = info
this.Data["Doc"] = doc
this.Data["Tab"] = "doc"
this.TplName = "edit.html"
}
//删除收藏(针对收藏夹)
func (this *UserController) CollectFolderDel() {
cid, _ := this.GetInt(":cid")
if cid > 0 && this.IsLogin > 0 {
err := models.NewCollect().DelFolder(cid, this.IsLogin)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, err.Error())
}
this.ResponseJson(true, "收藏夹删除成功")
}
this.ResponseJson(false, "删除失败,参数错误")
}
//取消收藏(针对文档)
func (this *UserController) CollectCancel() {
cid, _ := this.GetInt(":cid")
did, _ := this.GetInt(":did")
if err := models.NewCollect().Cancel(did, cid, this.IsLogin); err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "移除收藏失败,可能您为收藏该文档")
}
this.ResponseJson(true, "移除收藏成功")
}
//更换头像
func (this *UserController) Avatar() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
//dir := fmt.Sprintf("./uploads/%v/%v", time.Now().Format("2006-01-02"), this.IsLogin)
dir := fmt.Sprintf("./static/header")
os.MkdirAll(dir, 0777)
f, fh, err := this.GetFile("Avatar")
if err != nil {
helper.Logger.Error("用户(%v)更新头像失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件上传失败")
}
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := strings.ToLower(slice[len(slice)-1])
if !(ext == "jpg" || ext == "jpeg" || ext == "png" || ext == "gif") {
this.ResponseJson(false, "头像图片格式只支持jpg、jpeg、png和gif")
}
tmpFile := dir + "/" + helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
//saveFile := helper.MyMD5(tmpFile) + "." + ext 进行了二次加密
saveFile := helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
err = this.SaveToFile("Avatar", tmpFile)
if err != nil {
helper.Logger.Error("用户(%v)头像保存失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
err = models.NewOss().MoveToOss(tmpFile, saveFile, true, true)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
//查询数据库用户数据
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(user.Avatar) > 0 {
//删除原头像图片
go models.NewOss().DelFromOss(true, user.Avatar)
}
user.Avatar = saveFile
rows, err := orm.NewOrm().Update(&user, "Avatar")
if rows > 0 && err == nil {
this.ResponseJson(true, "头像更新成功")
}
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "头像更新失败")
}
//编辑个人信息
func (this *UserController) Edit() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
changepwd := false
cols := []string{"Intro"}
rules := map[string][]string{
"OldPassword": {"required"},
"NewPassword": {"required"},
"RePassword": {"required"},
"Intro": {"required"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
this.ResponseJson(false, "参数不正确")
}
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(params["OldPassword"].(string)) > 0 || len(params["NewPassword"].(string)) > 0 || len(params["RePassword"].(string)) > 0 {
if len(params["NewPassword"].(string)) < 6 || len(params["RePassword"].(string)) < 6 {
this.ResponseJson(false, "密码长度必须至少6个字符")
}
opwd := helper.MyMD5(params["OldPassword"].(string))
npwd := helper.MyMD5(params["NewPassword"].(string))
rpwd := helper.MyMD5(params["RePassword"].(string))
if user.Password != opwd {
this.ResponseJson(false, "原密码不正确")
}
if npwd != rpwd {
this.ResponseJson(false, "确认密码和新密码必须一致")
}
if opwd == npwd {
this.ResponseJson(false, "确认密码不能与原密码相同")
}
user.Password = rpwd
cols = append(cols, "Password")
changepwd = true
}
user.Intro = params["Intro"].(string)
affected, err := orm.NewOrm().Update(&user, cols...)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "设置失败,请刷新页面重试")
}
if affected == 0 {
this.ResponseJson(true, "设置失败,可能您未对内容做更改")
}
if changepwd {
this.ResetCookie()
this.ResponseJson(true, "设置成功,您设置了新密码,请重新登录")
}
this.ResponseJson(true, "设置成功")
} | this.SetCookieLogin(uid)
this.ResponseJson(true, "会员注册成功") | random_line_split |
UserController.go | package HomeControllers
import (
"fmt"
"strings"
"time"
"os"
"github.com/TruthHun/DocHub/helper"
"github.com/TruthHun/DocHub/helper/conv"
"github.com/TruthHun/DocHub/models"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/validation"
)
type UserController struct {
BaseController
}
func (this *UserController) Prepare() {
this.BaseController.Prepare()
this.Xsrf()
}
//会员中心
func (this *UserController) Get() {
uid, _ := this.GetInt(":uid")
path := this.GetString(":splat")
params := conv.Path2Map(path)
//排序
sort := "new"
if param, ok := params["sort"]; ok {
sort = param
}
//页码
p := 1
if page, ok := params["p"]; ok {
p = helper.Interface2Int(page)
if p < 1 {
p = 1
}
}
switch sort {
case "dcnt":
sort = "dcnt"
case "score":
sort = "score"
case "vcnt":
sort = "vcnt"
case "ccnt":
sort = "ccnt"
default:
sort = "new"
}
//显示风格
style := "list"
if s, ok := params["style"]; ok {
style = s
}
if style != "th" {
style = "list"
}
//cid:collect folder id ,收藏夹id
cid := 0
if s, ok := params["cid"]; ok {
cid = helper.Interface2Int(s)
}
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
this.Data["Uid"] = uid
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
if cid > 0 {
sql := fmt.Sprintf("select Title,Cnt from %v where Id=? limit 1", models.GetTableCollectFolder())
var params []orm.Params
orm.NewOrm().Raw(sql, cid).Values(¶ms)
if len(params) == 0 {
this.Redirect(fmt.Sprintf("/user/%v/collect", uid), 302)
return
}
this.Data["Folder"] = params[0]
fields := "di.Id,di.`Uid`, di.`Cid`, di.`TimeCreate`, di.`Dcnt`, di.`Vcnt`, di.`Ccnt`, di.`Score`, di.`Status`, di.`ChanelId`, di.`Pid`,c.Title Category,u.Username,d.Title,ds.`Md5`, ds.`Ext`, ds.`ExtCate`, ds.`ExtNum`, ds.`Page`, ds.`Size`"
sqlFormat := `
select %v from %v di left join %v u on di.Uid=u.Id
left join %v clt on clt.Did=di.Id
left join %v d on d.Id=di.Id
left join %v c on c.Id=di.cid
left join %v ds on ds.Id=di.DsId
where %v order by %v limit %v,%v
`
sql = fmt.Sprintf(sqlFormat,
fields,
models.GetTableDocumentInfo(),
models.GetTableUser(),
models.GetTableCollect(),
models.GetTableDocument(),
models.GetTableCategory(),
models.GetTableDocumentStore(),
fmt.Sprintf("clt.Cid=%v", cid),
"clt.Id desc",
(p-1)*listRows, listRows,
)
var data []orm.Params
orm.NewOrm().Raw(sql).Values(&data)
this.Data["Lists"] = data
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(params[0]["Cnt"]), listRows, p, fmt.Sprintf("/user/%v/doc/cid/%v", user["Id"], cid), "sort", sort, "style", style)
} else {
this.Data["Lists"], _, _ = models.GetDocList(uid, 0, 0, 0, p, listRows, sort, 1)
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(user["Document"]), listRows, p, fmt.Sprintf("/user/%v/doc", user["Id"]), "sort", sort, "style", style)
}
this.Data["Tab"] = "doc"
this.Data["Cid"] = cid
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Sort"] = sort
this.Data["Style"] = style
this.Data["P"] = p
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Doc", "文档列表-会员中心-"+user["Username"].(string), "会员中心,文档列表,"+user["Username"].(string), "文档列表-会员中心-"+user["Username"].(string), this.Sys.Site)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "index.html"
}
//金币记录
func (this *UserController) Coin() {
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
lists, _, _ := models.GetList(models.GetTableCoinLog(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 当页码大于0,则以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["Tab"] = "coin"
this.Data["IsUser"] = true
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Coin", "财富记录—会员中心-"+user["Username"].(string), "会员中心,财富记录,"+user["Username"].(string), "财富记录—会员中心-"+user["Username"].(string), this.Sys.Site)
this.TplName = "coin.html"
}
// 收藏夹
func (this *UserController) Collect() {
this.Data["Tab"] = "collect"
action := this.GetString("action")
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 100
lists, _, _ := models.GetList(models.GetTableCollectFolder(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 页码大于1,以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Uid"] = uid
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "collect.html"
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Folder", "收藏夹—会员中心-"+user["Username"].(string), "会员中心,收藏夹,"+user["Username"].(string), "收藏夹—会员中心-"+user["Username"].(string), this.Sys.Site)
if action == "edit" {
this.Data["Edit"] = true
} else {
this.Data["Edit"] = false
}
}
//用户登录
func (this *UserController) Login() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
// GET 请求
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员登录", "会员登录", "会员登录", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "login.html"
return
}
type Post struct {
Email, Password string
}
var post struct {
Email, Password string
}
this.ParseForm(&post)
valid := validation.Validation{}
res := valid.Email(post.Email, "Email")
if !res.Ok {
this.ResponseJson(false, "登录失败,邮箱格式不正确")
}
ModelUser := models.NewUser()
users, rows, err := ModelUser.UserList(1, 1, "", "", "u.`email`=? and u.`password`=?", post.Email, helper.MyMD5(post.Password))
if rows == 0 || err != nil {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "登录失败,邮箱或密码不正确")
}
user := users[0]
this.IsLogin = helper.Interface2Int(user["Id"])
if this.IsLogin > 0 {
//查询用户有没有被封禁
if info := ModelUser.UserInfo(this.IsLogin); info.Status == false { //被封禁了
this.ResponseJson(false, "登录失败,您的账号已被管理员禁用")
}
this.BaseController.SetCookieLogin(this.IsLogin)
this.ResponseJson(true, "登录成功")
}
this.ResponseJson(false, "登录失败,未知错误!")
}
//用户退出登录
func (this *UserController) Logout() {
this.ResetCookie()
if v, ok := this.Ctx.Request.Header["X-Requested-With"]; ok && v[0] == "XMLHttpRequest" {
this.ResponseJson(true, "退出登录成功")
}
this.Redirect("/", 302)
}
//会员注册[GET/POST]
func (this *UserController) Reg() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["IsUser"] = true
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员注册", "会员注册", "会员注册", this.Sys.Site)
this.Data["PageId"] = "wenku-reg"
if this.Sys.IsCloseReg {
this.TplName = "regclose.html"
} else {
this.TplName = "reg.html"
}
return
}
if this.Sys.IsCloseReg {
this.ResponseJson(false, "注册失败,站点已关闭注册功能")
}
//先验证邮箱验证码是否正确
email := this.GetString("email")
code := this.GetString("code")
sessEmail := fmt.Sprintf("%v", this.GetSession("RegMail"))
sessCode := fmt.Sprintf("%v", this.GetSession("RegCode"))
if sessEmail != email || sessCode != code {
this.ResponseJson(false, "邮箱验证码不正确,请重新输入或重新获取")
}
// 注册
err, uid := models.NewUser().Reg(
email,
this.GetString("username"),
this.GetString("password"),
this.GetString("repassword"),
this.GetString("intro"),
)
if err != nil || uid == 0 {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "注册失败")
}
models.Regulate(models.GetTableSys(), "CntUser", 1, "Id=1") //站点用户数量增加
this.IsLogin = uid
this.SetCookieLogin(uid)
this.ResponseJson(true, "会员注册成功")
}
// 发送邮件
func (this *UserController) SendMail() {
if len(this.Ctx.GetCookie(beego.AppConfig.String("SessionName"))) == 0 {
this.Redirect("/", 302)
return
}
//发送邮件的类型:注册(reg)和找回密码(findpwd)
t := this.GetString("type")
if t != "reg" && t != "findpwd" {
this.ResponseJson(false, "邮件发送类型不正确")
}
valid := validation.Validation{}
email := this.GetString("email")
res := valid.Email(email, "mail")
if res.Error != nil || !res.Ok {
this.ResponseJson(false, "邮箱格式不正确")
}
//检测邮箱是否已被注册
ModelUser := models.NewUser()
user := ModelUser.GetUserField(orm.NewCondition().And("email", email))
//注册邮件
if t == "reg" {
if user.Id > 0 {
this.ResponseJson(false, "该邮箱已经被注册会员")
}
code := helper.RandStr(6, 0)
fmt.Print(code)
//发送验证是否成功注释掉了
err := models.SendMail(email, fmt.Sprintf("%v会员注册验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailReg, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("RegMail", email)
this.SetSession("RegCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
// 找回密码
if user.Id == 0 {
this.ResponseJson(false, "邮箱不存在")
}
code := helper.RandStr(6, 0)
err := models.SendMail(email, fmt.Sprintf("%v找回密码验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailFindPwd, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("FindPwdMail", email)
this.SetSession("FindPwdCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
//会员签到,增加金币
func (this *UserController) Sign() {
if this.IsLogin == 0 {
this.ResponseJson(false, "签到失败,请先登录")
}
var data = models.Sign{
Uid: this.IsLogin,
Date: time.Now().Format("20060102"),
}
_, err := orm.NewOrm().Insert(&data)
if err != nil {
this.ResponseJson(false, "签到失败,您今天已签到")
}
if err = models.Regulate(models.GetTableUserInfo(), "Coin", this.Sys.Sign, fmt.Sprintf("Id=%v", this.IsLogin)); err == nil {
log := models.CoinLog{
Uid: this.IsLogin,
Coin: this.Sys.Sign,
Log: fmt.Sprintf("于%v签到成功,增加 %v 个金币", time.Now().Format("2006-01-02 15:04:05"), this.Sys.Sign),
}
models.NewCoinLog().LogRecord(log)
}
this.ResponseJson(true, fmt.Sprintf("恭喜您,今日签到成功,领取了 %v 个金币", this.Sys.Sign))
}
// 检测用户是否已登录
func (this *UserController) CheckLogin() {
if this.BaseController.IsLogin > 0 {
this.ResponseJson(true, "已登录")
}
this.ResponseJson(false, "您当前处于未登录状态,请先登录")
}
// 创建收藏夹
func (this *UserController) CreateCollectFolder() {
if this.IsLogin == 0 {
this.ResponseJson(false, "您当前未登录,请先登录")
}
cover := ""
timestamp := int(time.Now().Unix())
//文件在文档库中未存在,则接收文件并做处理
f, fh, err := this.GetFile("Cover")
if err == nil {
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := slice[len(slice)-1]
dir := fmt.Sprintf("./uploads/%v/%v/", time.Now().Format("2006-01-02"), this.IsLogin)
os.MkdirAll(dir, 0777)
file := helper.MyMD5(fmt.Sprintf("%v-%v-%v", timestamp, this.IsLogin, fh.Filename)) + "." + ext
err = this.SaveToFile("Cover", dir+file)
if err == nil {
//将图片移动到OSS
err = models.NewOss().MoveToOss(dir+file, file, true, true)
helper.Logger.Debug(dir + file)
if err != nil {
helper.Logger.Error(err.Error())
}
cover = file
}
}
// 收藏夹
folder := models.CollectFolder{
Uid: this.IsLogin,
Title: this.GetString("Title"),
Description: this.GetString("Description"),
TimeCreate: int(time.Now().Unix()),
Cnt: 0,
Cover: cover,
}
// 收藏夹 Id 大于0,则表示编辑收藏夹
folder.Id, _ = this.GetInt("Id")
if folder.Id > 0 { // 编辑收藏夹
cols := []string{"Title", "Description"}
if len(cover) > 0 {
cols = append(cols, "Cover")
}
if _, err = orm.NewOrm().Update(&folder, cols...); err == nil {
this.ResponseJson(true, "收藏夹编辑成功")
}
} else { // 创建收藏夹
if _, err = orm.NewOrm().Insert(&folder); err == nil { //收藏夹数量+1
models.Regulate(models.GetTableUserInfo(), "Collect", 1, "Id=?", this.IsLogin)
this.ResponseJson(true, "收藏夹创建成功")
}
}
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "操作失败,请重试")
}
this.ResponseJson(true, "操作成功")
}
// 找回密码
func (this *UserController) FindPwd() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Findpwd", "找回密码", "找回密码", "找回密码", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "findpwd.html"
return
}
rules := map[string][]string{
"username": {"required", "mincount:2", "maxcount:16"},
"email": {"required", "email"},
"code": {"required", "len:6"},
"password": {"required", "mincount:6"},
"repassword": {"required", "mincount:6"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
if _, ok := errs["username"]; ok {
this.ResponseJson(false, "用户名限2-16个字符")
}
if _, ok := errs["email"]; ok {
this.ResponseJson(false, "邮箱格式不正确")
}
if _, ok := errs["code"]; ok {
this.ResponseJson(false, "请输入6位验证码")
}
if _, ok := errs["password"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
if _, ok := errs["repassword"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
}
//校验验证码和邮箱是否匹配
if fmt.Sprintf("%v", this.GetSession("FindPwdMail")) != params["email"].(string) || fmt.Sprintf("%v", this.GetSession("FindPwdCode")) != params["code"].(string) {
this.ResponseJson(false, "验证码不正确,修改密码失败")
}
pwd := helper.MyMD5(params["password"].(string))
repwd := helper.MyMD5(params["repassword"].(string))
if pwd != repwd {
this.ResponseJson(false, "确认密码和密码不一致")
}
user := models.NewUser().GetUserField(orm.NewCondition().And("Email", params["email"]))
if user.Id == 0 || user.Username != params["username"].(string) {
this.ResponseJson(false, "重置密码失败,用户名与邮箱不匹配")
}
_, err := models.UpdateByIds("user", "Password", pwd, user.Id)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "重置密码失败,请刷新页面重试")
}
this.DelSession("FindPwdMail")
this.DelSession("FindPwdCode")
this.ResponseJson(true, "重置密码成功,请重新登录")
}
//删除文档
func (this *UserController) DocDel() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
docid, _ := this.GetInt(":doc")
if docid == 0 {
this.ResponseJson(false, "删除失败,文档不存在")
}
errs := models.NewDocumentRecycle().RemoveToRecycle(this.IsLogin, true, docid)
if len(errs) > 0 {
helper.Logger.Error("删除失败:%v", strings.Join(errs, "; "))
this.ResponseJson(false, "删除失败,文档不存在")
}
this.ResponseJson(true, "删除成功")
}
//文档编辑
func (this *UserController) DocEdit() {
if this.IsLogin == 0 {
this.Redirect("/user", 302)
}
docId, _ := this.GetInt(":doc")
if docId == 0 {
this.Redirect("/user", 302)
}
info := models.DocumentInfo{Id: docId}
err := orm.NewOrm().Read(&info)
if err != nil {
helper.Logger.Error(err.Error())
this.Redirect("/user", 302)
}
if info.Uid != this.IsLogin { // 文档所属用户id与登录的用户id不一致
this.Redirect("/user", 302)
}
doc := models.Document{Id: docId}
// POST
if this.Ctx.Request.Method == "POST" {
ruels := map[string][]string{
"Title": {"required", "unempty"},
"Chanel": {"required", "gt:0", "int"},
"Pid": {"required", "gt:0", "int"},
"Cid": {"required", "gt:0", "int"},
"Tags": {"required"},
"Intro": {"required"},
"Price": {"required", "int"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, ruels)
if len(errs) > 0 {
this.ResponseJson(false, "参数错误")
}
doc.Title = params["Title"].(string)
doc.Keywords = params["Tags"].(string)
doc.Description = params["Intro"].(string)
info.Pid = params["Pid"].(int)
info.Cid = params["Cid"].(int)
info.ChanelId = params["Chanel"].(int)
info.Price = params["Price"].(int)
info.TimeUpdate = int(time.Now().U | = orm.NewOrm().Read(&doc)
if err != nil {
helper.Logger.Error(err.Error())
this.Redirect("/user", 302)
}
cond := orm.NewCondition().And("status", 1)
data, _, _ := models.GetList(models.GetTableCategory(), 1, 2000, cond, "sort")
this.Data["User"], _, _ = models.NewUser().GetById(this.IsLogin)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
this.Data["IsUser"] = true
this.Data["Cates"], _ = conv.InterfaceToJson(data)
this.Data["json"] = data
this.Data["PageId"] = "wenku-user"
this.Data["Info"] = info
this.Data["Doc"] = doc
this.Data["Tab"] = "doc"
this.TplName = "edit.html"
}
//删除收藏(针对收藏夹)
func (this *UserController) CollectFolderDel() {
cid, _ := this.GetInt(":cid")
if cid > 0 && this.IsLogin > 0 {
err := models.NewCollect().DelFolder(cid, this.IsLogin)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, err.Error())
}
this.ResponseJson(true, "收藏夹删除成功")
}
this.ResponseJson(false, "删除失败,参数错误")
}
//取消收藏(针对文档)
func (this *UserController) CollectCancel() {
cid, _ := this.GetInt(":cid")
did, _ := this.GetInt(":did")
if err := models.NewCollect().Cancel(did, cid, this.IsLogin); err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "移除收藏失败,可能您为收藏该文档")
}
this.ResponseJson(true, "移除收藏成功")
}
//更换头像
func (this *UserController) Avatar() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
//dir := fmt.Sprintf("./uploads/%v/%v", time.Now().Format("2006-01-02"), this.IsLogin)
dir := fmt.Sprintf("./static/header")
os.MkdirAll(dir, 0777)
f, fh, err := this.GetFile("Avatar")
if err != nil {
helper.Logger.Error("用户(%v)更新头像失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件上传失败")
}
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := strings.ToLower(slice[len(slice)-1])
if !(ext == "jpg" || ext == "jpeg" || ext == "png" || ext == "gif") {
this.ResponseJson(false, "头像图片格式只支持jpg、jpeg、png和gif")
}
tmpFile := dir + "/" + helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
//saveFile := helper.MyMD5(tmpFile) + "." + ext 进行了二次加密
saveFile := helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
err = this.SaveToFile("Avatar", tmpFile)
if err != nil {
helper.Logger.Error("用户(%v)头像保存失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
err = models.NewOss().MoveToOss(tmpFile, saveFile, true, true)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
//查询数据库用户数据
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(user.Avatar) > 0 {
//删除原头像图片
go models.NewOss().DelFromOss(true, user.Avatar)
}
user.Avatar = saveFile
rows, err := orm.NewOrm().Update(&user, "Avatar")
if rows > 0 && err == nil {
this.ResponseJson(true, "头像更新成功")
}
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "头像更新失败")
}
//编辑个人信息
func (this *UserController) Edit() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
changepwd := false
cols := []string{"Intro"}
rules := map[string][]string{
"OldPassword": {"required"},
"NewPassword": {"required"},
"RePassword": {"required"},
"Intro": {"required"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
this.ResponseJson(false, "参数不正确")
}
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(params["OldPassword"].(string)) > 0 || len(params["NewPassword"].(string)) > 0 || len(params["RePassword"].(string)) > 0 {
if len(params["NewPassword"].(string)) < 6 || len(params["RePassword"].(string)) < 6 {
this.ResponseJson(false, "密码长度必须至少6个字符")
}
opwd := helper.MyMD5(params["OldPassword"].(string))
npwd := helper.MyMD5(params["NewPassword"].(string))
rpwd := helper.MyMD5(params["RePassword"].(string))
if user.Password != opwd {
this.ResponseJson(false, "原密码不正确")
}
if npwd != rpwd {
this.ResponseJson(false, "确认密码和新密码必须一致")
}
if opwd == npwd {
this.ResponseJson(false, "确认密码不能与原密码相同")
}
user.Password = rpwd
cols = append(cols, "Password")
changepwd = true
}
user.Intro = params["Intro"].(string)
affected, err := orm.NewOrm().Update(&user, cols...)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "设置失败,请刷新页面重试")
}
if affected == 0 {
this.ResponseJson(true, "设置失败,可能您未对内容做更改")
}
if changepwd {
this.ResetCookie()
this.ResponseJson(true, "设置成功,您设置了新密码,请重新登录")
}
this.ResponseJson(true, "设置成功")
}
| nix())
orm.NewOrm().Update(&doc, "Title", "Keywords", "Description")
orm.NewOrm().Update(&info, "Pid", "Cid", "ChanelId", "Price")
//原分类-1
models.Regulate(models.GetTableCategory(), "Cnt", -1, fmt.Sprintf("Id in(%v,%v,%v)", info.ChanelId, info.Cid, info.Pid))
//新分类+1
models.Regulate(models.GetTableCategory(), "Cnt", 1, fmt.Sprintf("Id in(%v,%v,%v)", params["Chanel"], params["Cid"], params["Pid"]))
this.ResponseJson(true, "文档编辑成功")
}
// GET
err | identifier_body |
UserController.go | package HomeControllers
import (
"fmt"
"strings"
"time"
"os"
"github.com/TruthHun/DocHub/helper"
"github.com/TruthHun/DocHub/helper/conv"
"github.com/TruthHun/DocHub/models"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/validation"
)
type UserController struct {
BaseController
}
func (this *UserController) Prepare() {
this.BaseController.Prepare()
this.Xsrf()
}
//会员中心
func (this *UserController) Get() {
uid, _ := this.GetInt(":uid")
path := this.GetString(":splat")
params := conv.Path2Map(path)
//排序
sort := "new"
if param, ok := params["sort"]; ok {
sort = param
}
//页码
p := 1
if page, ok := params["p"]; ok {
p = helper.Interface2Int(page)
if p < 1 {
p = 1
}
}
switch sort {
case "dcnt":
sort = "dcnt"
case "score":
sort = "score"
case "vcnt":
sort = "vcnt"
case "ccnt":
sort = "ccnt"
default:
sort = "new"
}
//显示风格
style := "list"
if s, ok := params["style"]; ok {
style = s
}
if style != "th" {
style = "list"
}
//cid:collect folder id ,收藏夹id
cid := 0
if s, ok := params["cid"]; ok {
cid = helper.Interface2Int(s)
}
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
this.Data["Uid"] = uid
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
if cid > 0 {
sql := fmt.Sprintf("select Title,Cnt from %v where Id=? limit 1", models.GetTableCollectFolder())
var params []orm.Params
orm.NewOrm().Raw(sql, cid).Values(¶ms)
if len(params) == 0 {
this.Redirect(fmt.Sprintf("/user/%v/collect", uid), 302)
return
}
this.Data["Folder"] = params[0]
fields := "di.Id,di.`Uid`, di.`Cid`, di.`TimeCreate`, di.`Dcnt`, di.`Vcnt`, di.`Ccnt`, di.`Score`, di.`Status`, di.`ChanelId`, di.`Pid`,c.Title Category,u.Username,d.Title,ds.`Md5`, ds.`Ext`, ds.`ExtCate`, ds.`ExtNum`, ds.`Page`, ds.`Size`"
sqlFormat := `
select %v from %v di left join %v u on di.Uid=u.Id
left join %v clt on clt.Did=di.Id
left join %v d on d.Id=di.Id
left join %v c on c.Id=di.cid
left join %v ds on ds.Id=di.DsId
where %v order by %v limit %v,%v
`
sql = fmt.Sprintf(sqlFormat,
fields,
models.GetTableDocumentInfo(),
models.GetTableUser(),
models.GetTableCollect(),
models.GetTableDocument(),
models.GetTableCategory(),
models.GetTableDocumentStore(),
fmt.Sprintf("clt.Cid=%v", cid),
"clt.Id desc",
(p-1)*listRows, listRows,
)
var data []orm.Params
orm.NewOrm().Raw(sql).Values(&data)
this.Data["Lists"] = data
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(params[0]["Cnt"]), listRows, p, fmt.Sprintf("/user/%v/doc/cid/%v", user["Id"], cid), "sort", sort, "style", style)
} else {
this.Data["Lists"], _, _ = models.GetDocList(uid, 0, 0, 0, p, listRows, sort, 1)
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(user["Document"]), listRows, p, fmt.Sprintf("/user/%v/doc", user["Id"]), "sort", sort, "style", style)
}
this.Data["Tab"] = "doc"
this.Data["Cid"] = cid
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Sort"] = sort
this.Data["Style"] = style
this.Data["P"] = p
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Doc", "文档列表-会员中心-"+user["Username"].(string), "会员中心,文档列表,"+user["Username"].(string), "文档列表-会员中心-"+user["Username"].(string), this.Sys.Site)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "index.html"
}
//金币记录
func (this *UserController) Coin() {
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
lists, _, _ := models.GetList(models.GetTableCoinLog(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 当页码大于0,则以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["Tab"] = "coin"
this.Data["IsUser"] = true
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Coin", "财富记录—会员中心-"+user["Username"].(string), "会员中心,财富记录,"+user["Username"].(string), "财富记录—会员中心-"+user["Username"].(string), this.Sys.Site)
this.TplName = "coin.html"
}
// 收藏夹
func (this *UserController) Collect() {
this.Data["Tab"] = "collect"
action := this.GetString("action")
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 100
lists, _, _ := models.GetList(models.GetTableCollectFolder(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 页码大于1,以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Uid"] = uid
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "collect.html"
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Folder", "收藏夹—会员中心-"+user["Username"].(string), "会员中心,收藏夹,"+user["Username"].(string), "收藏夹—会员中心-"+user["Username"].(string), this.Sys.Site)
if action == "edit" {
this.Data["Edit"] = true
} else {
this.Data["Edit"] = false
}
}
//用户登录
func (this *UserController) Login() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
// GET 请求
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员登录", "会员登录", "会员登录", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "login.html"
return
}
type Post struct {
Email, Password string
}
var post struct {
Email, Password string
}
this.ParseForm(&post)
valid := validation.Validation{}
res := valid.Email(post.Email, "Email")
if !res.Ok {
this.ResponseJson(false, "登录失败,邮箱格式不正确")
}
ModelUser := models.NewUser()
users, rows, err := ModelUser.UserList(1, 1, "", "", "u.`email`=? and u.`password`=?", post.Email, helper.MyMD5(post.Password))
if rows == 0 || err != nil {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "登录失败,邮箱或密码不正确")
}
user := users[0]
this.IsLogin = helper.Interface2Int(user["Id"])
if this.IsLogin > 0 {
//查询用户有没有被封禁
if info := ModelUser.UserInfo(this.IsLogin); info.Status == false { //被封禁了
this.ResponseJson(false, "登录失败,您的账号已被管理员禁用")
}
this.BaseController.SetCookieLogin(this.IsLogin)
this.ResponseJson(true, "登录成功")
}
this.ResponseJson(false, "登录失败,未知错误!")
}
//用户退出登录
func (this *UserController) Logout() {
this.ResetCookie()
if v, ok := this.Ctx.Request.Header["X-Requested-With"]; ok && v[0] == "XMLHttpRequest" {
this.ResponseJson(true, "退出登录成功")
}
this.Redirect("/", 302)
}
//会员注册[GET/POST]
func (this *UserController) Reg() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["IsUser"] = true
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员注册", "会员注册", "会员注册", this.Sys.Site)
this.Data["PageId"] = "wenku-reg"
if this.Sys.IsCloseReg {
this.TplName = "regclose.html"
} else {
this.TplName = "reg.html"
}
return
}
if this.Sys.IsCloseReg {
this.ResponseJson(false, "注册失败,站点已关闭注册功能")
}
//先验证邮箱验证码是否正确
email := this.GetString("email")
code := this.GetString("code")
sessEmail := fmt.Sprintf("%v", this.GetSession("RegMail"))
sessCode := fmt.Sprintf("%v", this.GetSession("RegCode"))
if sessEmail != email || sessCode != code {
this.ResponseJson(false, "邮箱验证码不正确,请重新输入或重新获取")
}
// 注册
err, uid := models.NewUser().Reg(
email,
this.GetString("username"),
this.GetString("password"),
this.GetString("repassword"),
this.GetString("intro"),
)
if err != nil || uid == 0 {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "注册失败")
}
models.Regulate(models.GetTableSys(), "CntUser", 1, "Id=1") //站点用户数量增加
this.IsLogin = uid
this.SetCookieLogin(uid)
this.ResponseJson(true, "会员注册成功")
}
// 发送邮件
func (this *UserController) SendMail() {
if len(this.Ctx.GetCookie(beego.AppConfig.String("SessionName"))) == 0 {
this.Redirect("/", 302)
return
}
//发送邮件的类型:注册(reg)和找回密码(findpwd)
t := this.GetString("type")
if t != "reg" && t != "findpwd" {
this.ResponseJson(false, "邮件发送类型不正确")
}
valid := validation.Validation{}
email := this.GetString("email")
res := valid.Email(email, "mail")
if res.Error != nil || !res.Ok {
this.ResponseJson(false, "邮箱格式不正确")
}
//检测邮箱是否已被注册
ModelUser := models.NewUser()
user := ModelUser.GetUserField(orm.NewCondition().And("email", email))
//注册邮件
if t == "reg" {
if user.Id > 0 {
this.ResponseJson(false, "该邮箱已经被注册会员")
}
code := helper.RandStr(6, 0)
fmt.Print(code)
//发送验证是否成功注释掉了
err := models.SendMail(email, fmt.Sprintf("%v会员注册验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailReg, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("RegMail", email)
this.SetSession("RegCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
// 找回密码
if user.Id == 0 {
this.ResponseJson(false, "邮箱不存在")
}
code := helper.RandStr(6, 0)
err := models.SendMail(email, fmt.Sprintf("%v找回密码验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailFindPwd, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("FindPwdMail", email)
this.SetSession("FindPwdCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
//会员签到,增加金币
func (this *UserController) Sign() {
if this.IsLogin == 0 {
this.ResponseJson(false, "签到失败,请先登录")
}
var data = models.Sign{
Uid: this.IsLogin,
Date: time.Now().Format("20060102"),
}
_, err := orm.NewOrm().Insert(&data)
if err != nil {
this.ResponseJson(false, "签到失败,您今天已签到")
}
if err = models.Regulate(models.GetTableUserInfo(), "Coin", this.Sys.Sign, fmt.Sprintf("Id=%v", this.IsLogin)); err == nil {
log := models.CoinLog{
Uid: this.IsLogin,
Coin: this.Sys.Sign,
Log: fmt.Sprintf("于%v签到成功,增加 %v 个金币", time.Now().Format("2006-01-02 15:04:05"), this.Sys.Sign),
}
models.NewCoinLog().LogRecord(log)
}
this.ResponseJson(true, fmt.Sprintf("恭喜您,今日签到成功,领取了 %v 个金币", this.Sys.Sign))
}
// 检测用户是否已登录
func (this *UserController) CheckLogin() {
if this.BaseController.IsLogin > 0 {
this.ResponseJson(true, "已登录")
}
this.ResponseJson(false, "您当前处于未登录状态,请先登录")
}
// 创建收藏夹
func (this *UserController) CreateCollectFolder() {
if this.IsLogin == 0 {
this.ResponseJson(false, "您当前未登录,请先登录")
}
cover := ""
timestamp := int(time.Now().Unix())
//文件在文档库中未存在,则接收文件并做处理
f, fh, err := this.GetFile("Cover")
if err == nil {
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := slice[len(slice)-1]
dir := fmt.Sprintf("./uploads/%v/%v/", time.Now().Format("2006-01-02"), this.IsLogin)
os.MkdirAll(dir, 0777)
file := helper.MyMD5(fmt.Sprintf("%v-%v-%v", timestamp, this.IsLogin, fh.Filename)) + "." + ext
err = this.SaveToFile("Cover", dir+file)
if err == nil {
//将图片移动到OSS
err = models.NewOss().MoveToOss(dir+file, file, true, true)
helper.Logger.Debug(dir + file)
if err != nil {
helper.Logger.Error(err.Error())
}
cover = file
}
}
// 收藏夹
folder := models.CollectFolder{
Uid: this.IsLogin,
Title: this.GetString("Title"),
Description: this.GetString("Description"),
TimeCreate: int(time.Now().Unix()),
Cnt: 0,
Cover: cover,
}
// 收藏夹 Id 大于0,则表示编辑收藏夹
folder.Id, _ = this.GetInt("Id")
if folder.Id > 0 { // 编辑收藏夹
cols := []string{"Title", "Description"}
if len(cover) > 0 {
cols = append(cols, "Cover")
}
if _, err = orm.NewOrm().Update(&folder, cols...); err == nil {
this.ResponseJson(true, "收藏夹编辑成功")
}
} else { // 创建收藏夹
if _, err = orm.NewOrm().Insert(&folder); err == nil { //收藏夹数量+1
models.Regulate(models.GetTableUserInfo(), "Collect", 1, "Id=?", this.IsLogin)
this.ResponseJson(true, "收藏夹创建成功")
}
}
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "操作失败,请重试")
}
this.ResponseJson(true, "操作成功")
}
// 找回密码
func (this *UserController) FindPwd() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Findpwd", "找回密码", "找回密码", "找回密码", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "findpwd.html"
return
}
rules := map[string][]string{
"username": {"required", "mincount:2", "maxcount:16"},
"email": {"required", "email"},
"code": {"required", "len:6"},
"password": {"required", "mincount:6"},
"repassword": {"required", "mincount:6"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
if _, ok := errs["username"]; ok {
this.ResponseJson(false, "用户名限2-16个字符")
}
if _, ok := errs["email"]; ok {
this.ResponseJson(false, "邮箱格式不正确")
}
if _, ok := errs["code"]; ok {
this.ResponseJson(false, "请输入6位验证码")
}
if _, ok := errs["password"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
if _, ok := errs["repassword"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
}
//校验验证码和邮箱是否匹配
if fmt.Sprintf("%v", this.GetSession("FindPwdMail")) != params["email"].(string) || fmt.Sprintf("%v", this.GetSession("FindPwdCode")) != params["code"].(string) {
this.ResponseJson(false, "验证码不正确,修改密码失败")
}
pwd := helper.MyMD5(params["password"].(string))
repwd := helper.MyMD5(params["repassword"].(string))
if pwd != repwd {
this.ResponseJson(false, "确认密码和密码不一致")
}
user := models.NewUser().GetUserField(orm.NewCondition().And("Email", params["email"]))
if user.Id == 0 || user.Username != params["username"].(string) {
this.ResponseJson(false, "重置密码失败,用户名与邮箱不匹配")
}
_, err := models.UpdateByIds("user", "Password", pwd, user.Id)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "重置密码失败,请刷新页面重试")
}
this.DelSession("FindPwdMail")
this.DelSession("FindPwdCode")
this.ResponseJson(true, "重置密码成功,请重新登录")
}
//删除文档
func (this *UserController) DocDel() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
docid, _ := this.GetInt(":doc")
if docid == 0 {
this.ResponseJson(false, "删除失败,文档不存在")
}
errs := models.NewDocumentRecycle().RemoveToRecycle(this.IsLogin, true, docid)
if len(errs) > 0 {
helper.Logger.Error("删除失败:%v", strings.Join(errs, "; "))
this.ResponseJson(false, "删除失败,文档不存在")
}
this.ResponseJson(true, "删除成功")
}
//文档编辑
func (this *UserController) DocEdit() {
if this.IsLogin == 0 {
this.Redirect("/user", 302)
}
docId, _ := this.GetInt(":doc")
if docId == 0 {
this.Redirect("/user", 302)
}
info := models.DocumentInfo{Id: docId}
err := orm.NewOrm().Read(&info)
if err != nil {
helper.Logger.Error(err.Error())
this.Redirect("/user", 302)
}
if info.Uid != this.IsLogin { // 文档所属用户id与登录的用户id不一致
this.Redirect("/user", 302)
}
doc := models.Document{Id: docId}
// POST
if this.Ctx.Request.Method == "POST" {
ruels := map[string][]string{
"Title": {"required", "unempty"},
"Chanel": {"required", "gt:0", "int"},
"Pid": {"required", "gt:0", "int"},
"Cid": {"required", "gt:0", "int"},
"Tags": {"required"},
"Intro": {"required"},
"Price": {"required", "int"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, ruels)
if len(errs) > 0 {
this.ResponseJson(false, "参数错误")
}
doc.Title = params["Title"].(string)
doc.Keywords = params["Tags"].(string)
doc.Description = params["Intro"].(string)
info.Pid = params["Pid"].(int)
info.Cid = params["Cid"].(int)
info.ChanelId = params["Chanel"].(int)
info.Price = params["Price"].(int)
info.TimeUpdate = int(tim | ).Unix())
orm.NewOrm().Update(&doc, "Title", "Keywords", "Description")
orm.NewOrm().Update(&info, "Pid", "Cid", "ChanelId", "Price")
//原分类-1
models.Regulate(models.GetTableCategory(), "Cnt", -1, fmt.Sprintf("Id in(%v,%v,%v)", info.ChanelId, info.Cid, info.Pid))
//新分类+1
models.Regulate(models.GetTableCategory(), "Cnt", 1, fmt.Sprintf("Id in(%v,%v,%v)", params["Chanel"], params["Cid"], params["Pid"]))
this.ResponseJson(true, "文档编辑成功")
}
// GET
err = orm.NewOrm().Read(&doc)
if err != nil {
helper.Logger.Error(err.Error())
this.Redirect("/user", 302)
}
cond := orm.NewCondition().And("status", 1)
data, _, _ := models.GetList(models.GetTableCategory(), 1, 2000, cond, "sort")
this.Data["User"], _, _ = models.NewUser().GetById(this.IsLogin)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
this.Data["IsUser"] = true
this.Data["Cates"], _ = conv.InterfaceToJson(data)
this.Data["json"] = data
this.Data["PageId"] = "wenku-user"
this.Data["Info"] = info
this.Data["Doc"] = doc
this.Data["Tab"] = "doc"
this.TplName = "edit.html"
}
//删除收藏(针对收藏夹)
func (this *UserController) CollectFolderDel() {
cid, _ := this.GetInt(":cid")
if cid > 0 && this.IsLogin > 0 {
err := models.NewCollect().DelFolder(cid, this.IsLogin)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, err.Error())
}
this.ResponseJson(true, "收藏夹删除成功")
}
this.ResponseJson(false, "删除失败,参数错误")
}
//取消收藏(针对文档)
func (this *UserController) CollectCancel() {
cid, _ := this.GetInt(":cid")
did, _ := this.GetInt(":did")
if err := models.NewCollect().Cancel(did, cid, this.IsLogin); err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "移除收藏失败,可能您为收藏该文档")
}
this.ResponseJson(true, "移除收藏成功")
}
//更换头像
func (this *UserController) Avatar() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
//dir := fmt.Sprintf("./uploads/%v/%v", time.Now().Format("2006-01-02"), this.IsLogin)
dir := fmt.Sprintf("./static/header")
os.MkdirAll(dir, 0777)
f, fh, err := this.GetFile("Avatar")
if err != nil {
helper.Logger.Error("用户(%v)更新头像失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件上传失败")
}
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := strings.ToLower(slice[len(slice)-1])
if !(ext == "jpg" || ext == "jpeg" || ext == "png" || ext == "gif") {
this.ResponseJson(false, "头像图片格式只支持jpg、jpeg、png和gif")
}
tmpFile := dir + "/" + helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
//saveFile := helper.MyMD5(tmpFile) + "." + ext 进行了二次加密
saveFile := helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
err = this.SaveToFile("Avatar", tmpFile)
if err != nil {
helper.Logger.Error("用户(%v)头像保存失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
err = models.NewOss().MoveToOss(tmpFile, saveFile, true, true)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
//查询数据库用户数据
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(user.Avatar) > 0 {
//删除原头像图片
go models.NewOss().DelFromOss(true, user.Avatar)
}
user.Avatar = saveFile
rows, err := orm.NewOrm().Update(&user, "Avatar")
if rows > 0 && err == nil {
this.ResponseJson(true, "头像更新成功")
}
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "头像更新失败")
}
//编辑个人信息
func (this *UserController) Edit() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
changepwd := false
cols := []string{"Intro"}
rules := map[string][]string{
"OldPassword": {"required"},
"NewPassword": {"required"},
"RePassword": {"required"},
"Intro": {"required"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
this.ResponseJson(false, "参数不正确")
}
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(params["OldPassword"].(string)) > 0 || len(params["NewPassword"].(string)) > 0 || len(params["RePassword"].(string)) > 0 {
if len(params["NewPassword"].(string)) < 6 || len(params["RePassword"].(string)) < 6 {
this.ResponseJson(false, "密码长度必须至少6个字符")
}
opwd := helper.MyMD5(params["OldPassword"].(string))
npwd := helper.MyMD5(params["NewPassword"].(string))
rpwd := helper.MyMD5(params["RePassword"].(string))
if user.Password != opwd {
this.ResponseJson(false, "原密码不正确")
}
if npwd != rpwd {
this.ResponseJson(false, "确认密码和新密码必须一致")
}
if opwd == npwd {
this.ResponseJson(false, "确认密码不能与原密码相同")
}
user.Password = rpwd
cols = append(cols, "Password")
changepwd = true
}
user.Intro = params["Intro"].(string)
affected, err := orm.NewOrm().Update(&user, cols...)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "设置失败,请刷新页面重试")
}
if affected == 0 {
this.ResponseJson(true, "设置失败,可能您未对内容做更改")
}
if changepwd {
this.ResetCookie()
this.ResponseJson(true, "设置成功,您设置了新密码,请重新登录")
}
this.ResponseJson(true, "设置成功")
}
| e.Now( | identifier_name |
UserController.go | package HomeControllers
import (
"fmt"
"strings"
"time"
"os"
"github.com/TruthHun/DocHub/helper"
"github.com/TruthHun/DocHub/helper/conv"
"github.com/TruthHun/DocHub/models"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/validation"
)
type UserController struct {
BaseController
}
func (this *UserController) Prepare() {
this.BaseController.Prepare()
this.Xsrf()
}
//会员中心
func (this *UserController) Get() {
uid, _ := this.GetInt(":uid")
path := this.GetString(":splat")
params := conv.Path2Map(path)
//排序
sort := "new"
if param, ok := params["sort"]; ok {
sort = param
}
//页码
p := 1
if page, ok := params["p"]; ok {
p = helper.Interface2Int(page)
if p < 1 {
p = 1
}
}
switch sort {
case "dcnt":
sort = "dcnt"
case "score":
sort = "score"
case "vcnt":
sort = "vcnt"
case "ccnt":
sort = "ccnt"
default:
sort = "new"
}
//显示风格
style := "list"
if s, ok := params["style"]; ok {
style = s
}
if style != "th" {
style = "list"
}
//cid:collect folder id ,收藏夹id
cid := 0
if s, ok := params["cid"]; ok {
cid = helper.Interface2Int(s)
}
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
this.Data["Uid"] = uid
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
if cid > 0 {
sql := fmt.Sprintf("select Title,Cnt from %v where Id=? limit 1", models.GetTableCollectFolder())
var params []orm.Params
orm.NewOrm().Raw(sql, cid).Values(¶ms)
if len(params) == 0 {
this.Redirect(fmt.Sprintf("/user/%v/collect", uid), 302)
return
}
this.Data["Folder"] = params[0]
fields := "di.Id,di.`Uid`, di.`Cid`, di.`TimeCreate`, di.`Dcnt`, di.`Vcnt`, di.`Ccnt`, di.`Score`, di.`Status`, di.`ChanelId`, di.`Pid`,c.Title Category,u.Username,d.Title,ds.`Md5`, ds.`Ext`, ds.`ExtCate`, ds.`ExtNum`, ds.`Page`, ds.`Size`"
sqlFormat := `
select %v from %v di left join %v u on di.Uid=u.Id
left join %v clt on clt.Did=di.Id
left join %v d on d.Id=di.Id
left join %v c on c.Id=di.cid
left join %v ds on ds.Id=di.DsId
where %v order by %v limit %v,%v
`
sql = fmt.Sprintf(sqlFormat,
fields,
models.GetTableDocumentInfo(),
models.GetTableUser(),
models.GetTableCollect(),
models.GetTableDocument(),
models.GetTableCategory(),
models.GetTableDocumentStore(),
fmt.Sprintf("clt.Cid=%v", cid),
"clt.Id desc",
(p-1)*listRows, listRows,
)
var data []orm.Params
orm.NewOrm().Raw(sql).Values(&data)
this.Data["Lists"] = data
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(params[0]["Cnt"]), listRows, p, fmt.Sprintf("/user/%v/doc/cid/%v", user["Id"], cid), "sort", sort, "style", style)
} else {
this.Data["Lists"], _, _ = models.GetDocList(uid, 0, 0, 0, p, listRows, sort, 1)
this.Data["Page"] = helper.Paginations(6, helper.Interface2Int(user["Document"]), listRows, p, fmt.Sprintf("/user/%v/doc", user["Id"]), "sort", sort, "style", style)
}
this.Data["Tab"] = "doc"
this.Data["Cid"] = cid
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Sort"] = sort
this.Data["Style"] = style
this.Data["P"] = p
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Doc", "文档列表-会员中心-"+user["Username"].(string), "会员中心,文档列表,"+user["Username"].(string), "文档列表-会员中心-"+user["Username"].(string), this.Sys.Site)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "index.html"
}
//金币记录
func (this *UserController) Coin() {
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 16
lists, _, _ := models.GetList(models.GetTableCoinLog(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 当页码大于0,则以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["Tab"] = "coin"
this.Data["IsUser"] = true
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Coin", "财富记录—会员中心-"+user["Username"].(string), "会员中心,财富记录,"+user["Username"].(string), "财富记录—会员中心-"+user["Username"].(string), this.Sys.Site)
this.TplName = "coin.html"
}
// 收藏夹
func (this *UserController) Collect() {
this.Data["Tab"] = "collect"
action := this.GetString("action")
uid, _ := this.GetInt(":uid")
p, _ := this.GetInt("p", 1)
if p < 1 {
p = 1
}
if uid < 1 {
uid = this.IsLogin
}
if uid <= 0 {
this.Redirect("/user/login", 302)
return
}
listRows := 100
lists, _, _ := models.GetList(models.GetTableCollectFolder(), p, listRows, orm.NewCondition().And("Uid", uid), "-Id")
if p > 1 { // 页码大于1,以 JSON 返回数据
this.ResponseJson(true, "数据获取成功", lists)
}
user, rows, err := models.NewUser().GetById(uid)
if err != nil {
helper.Logger.Error(err.Error())
}
if rows == 0 {
this.Redirect("/", 302)
return
}
this.Data["Lists"] = lists
this.Data["User"] = user
this.Data["PageId"] = "wenku-user"
this.Data["IsUser"] = true
this.Data["Uid"] = uid
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
if err != nil {
helper.Logger.Error(err.Error())
}
this.TplName = "collect.html"
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Ucenter-Folder", "收藏夹—会员中心-"+user["Username"].(string), "会员中心,收藏夹,"+user["Username"].(string), "收藏夹—会员中心-"+user["Username"].(string), this.Sys.Site)
if action == "edit" {
this.Data["Edit"] = true
} else {
this.Data["Edit"] = false
}
}
//用户登录
func (this *UserController) Login() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
// GET 请求
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员登录", "会员登录", "会员登录", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "login.html"
return
}
type Post struct {
Email, Password string
}
var post struct {
Email, Password string
}
this.ParseForm(&post)
valid := validation.Validation{}
res := valid.Email(post.Email, "Email")
if !res.Ok {
this.ResponseJson(false, "登录失败,邮箱格式不正确")
}
ModelUser := models.NewUser()
users, rows, err := ModelUser.UserList(1, 1, "", "", "u.`email`=? and u.`password`=?", post.Email, helper.MyMD5(post.Password))
if rows == 0 || err != nil {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "登录失败,邮箱或密码不正确")
}
user := users[0]
this.IsLogin = helper.Interface2Int(user["Id"])
if this.IsLogin > 0 {
//查询用户有没有被封禁
if info := ModelUser.UserInfo(this.IsLogin); info.Status == false { //被封禁了
this.ResponseJson(false, "登录失败,您的账号已被管理员禁用")
}
this.BaseController.SetCookieLogin(this.IsLogin)
this.ResponseJson(true, "登录成功")
}
this.ResponseJson(false, "登录失败,未知错误!")
}
//用户退出登录
func (this *UserController) Logout() {
this.ResetCookie()
if v, ok := this.Ctx.Request.Header["X-Requested-With"]; ok && v[0] == "XMLHttpRequest" {
this.ResponseJson(true, "退出登录成功")
}
this.Redirect("/", 302)
}
//会员注册[GET/POST]
func (this *UserController) Reg() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["IsUser"] = true
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Login", "会员注册", "会员注册", "会员注册", this.Sys.Site)
this.Data["PageId"] = "wenku-reg"
if this.Sys.IsCloseReg {
this.TplName = "regclose.html"
} else {
this.TplName = "reg.html"
}
return
}
if this.Sys.IsCloseReg {
this.ResponseJson(false, "注册失败,站点已关闭注册功能")
}
//先验证邮箱验证码是否正确
email := this.GetString("email")
code := this.GetString("code")
sessEmail := fmt.Sprintf("%v", this.GetSession("RegMail"))
sessCode := fmt.Sprintf("%v", this.GetSession("RegCode"))
if sessEmail != email || sessCode != code {
this.ResponseJson(false, "邮箱验证码不正确,请重新输入或重新获取")
}
// 注册
err, uid := models.NewUser().Reg(
email,
this.GetString("username"),
this.GetString("password"),
this.GetString("repassword"),
this.GetString("intro"),
)
if err != nil || uid == 0 {
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "注册失败")
}
models.Regulate(models.GetTableSys(), "CntUser", 1, "Id=1") //站点用户数量增加
this.IsLogin = uid
this.SetCookieLogin(uid)
this.ResponseJson(true, "会员注册成功")
}
// 发送邮件
func (this *UserController) SendMail() {
if len(this.Ctx.GetCookie(beego.AppConfig.String("SessionName"))) == 0 {
this.Redirect("/", 302)
return
}
//发送邮件的类型:注册(reg)和找回密码(findpwd)
t := this.GetString("type")
if t != "reg" && t != "findpwd" {
this.ResponseJson(false, "邮件发送类型不正确")
}
valid := validation.Validation{}
email := this.GetString("email")
res := valid.Email(email, "mail")
if res.Error != nil || !res.Ok {
this.ResponseJson(false, "邮箱格式不正确")
}
//检测邮箱是否已被注册
ModelUser := models.NewUser()
user := ModelUser.GetUserField(orm.NewCondition().And("email", email))
//注册邮件
if t == "reg" {
if user.Id > 0 {
this.ResponseJson(false, "该邮箱已经被注册会员")
}
code := helper.RandStr(6, 0)
fmt.Print(code)
//发送验证是否成功注释掉了
err := models.SendMail(email, fmt.Sprintf("%v会员注册验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailReg, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("RegMail", email)
this.SetSession("RegCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
// 找回密码
if user.Id == 0 {
this.ResponseJson(false, "邮箱不存在")
}
code := helper.RandStr(6, 0)
err := models.SendMail(email, fmt.Sprintf("%v找回密码验证码", this.Sys.Site), strings.Replace(this.Sys.TplEmailFindPwd, "{code}", code, -1))
if err != nil {
helper.Logger.Error("邮件发送失败:%v", err.Error())
this.ResponseJson(false, "邮件发送失败,请联系管理员检查邮箱配置是否正确")
}
this.SetSession("FindPwdMail", email)
this.SetSession("FindPwdCode", code)
this.ResponseJson(true, "邮件发送成功,请打开邮箱查看验证码")
}
//会员签到,增加金币
func (this *UserController) Sign() {
if this.IsLogin == 0 {
this.ResponseJson(false, "签到失败,请先登录")
}
var data = models.Sign{
Uid: this.IsLogin,
Date: time.Now().Format("20060102"),
}
_, err := orm.NewOrm().Insert(&data)
if err != nil {
this.ResponseJson(false, "签到失败,您今天已签到")
}
if err = models.Regulate(models.GetTableUserInfo(), "Coin", this.Sys.Sign, fmt.Sprintf("Id=%v", this.IsLogin)); err == nil {
log := models.CoinLog{
Uid: this.IsLogin,
Coin: this.Sys.Sign,
Log: fmt.Sprintf("于%v签到成功,增加 %v 个金币", time.Now().Format("2006-01-02 15:04:05"), this.Sys.Sign),
}
models.NewCoinLog().LogRecord(log)
}
this.ResponseJson(true, fmt.Sprintf("恭喜您,今日签到成功,领取了 %v 个金币", this.Sys.Sign))
}
// 检测用户是否已登录
func (this *UserController) CheckLogin() {
if this.BaseController.IsLogin > 0 {
this.ResponseJson(true, "已登录")
}
this.ResponseJson(false, "您当前处于未登录状态,请先登录")
}
// 创建收藏夹
func (this *UserController) CreateCollectFolder() {
if this.IsLogin == 0 {
this.ResponseJson(false, "您当前未登录,请先登录")
}
cover := ""
timestamp := int(time.Now().Unix())
//文件在文档库中未存在,则接收文件并做处理
f, fh, err := this.GetFile("Cover")
if err == nil {
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := slice[len(slice)-1]
dir := fmt.Sprintf("./uploads/%v/%v/", time.Now().Format("2006-01-02"), this.IsLogin)
os.MkdirAll(dir, 0777)
file := helper.MyMD5(fmt.Sprintf("%v-%v-%v", timestamp, this.IsLogin, fh.Filename)) + "." + ext
err = this.SaveToFile("Cover", dir+file)
if err == nil {
//将图片移动到OSS
err = models.NewOss().MoveToOss(dir+file, file, true, true)
helper.Logger.Debug(dir + file)
if err != nil {
helper.Logger.Error(err.Error())
}
cover = file
}
}
// 收藏夹
folder := models.CollectFolder{
Uid: this.IsLogin,
Title: this.GetString("Title"),
Description: this.GetString("Description"),
TimeCreate: int(time.Now().Unix()),
Cnt: 0,
Cover: cover,
}
// 收藏夹 Id 大于0,则表示编辑收藏夹
folder.Id, _ = this.GetInt("Id")
if folder.Id > 0 { // 编辑收藏夹
cols := []string{"Title", "Description"}
if len(cover) > 0 {
cols = append(cols, "Cover")
}
if _, err = orm.NewOrm().Update(&folder, cols...); err == nil {
this.ResponseJson(true, "收藏夹编辑成功")
}
} else { // 创建收藏夹
if _, err = orm.NewOrm().Insert(&folder); err == nil { //收藏夹数量+1
models.Regulate(models.GetTableUserInfo(), "Collect", 1, "Id=?", this.IsLogin)
this.ResponseJson(true, "收藏夹创建成功")
}
}
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "操作失败,请重试")
}
this.ResponseJson(true, "操作成功")
}
// 找回密码
func (this *UserController) FindPwd() {
if this.IsLogin > 0 {
this.Redirect("/user", 302)
return
}
if this.Ctx.Request.Method == "GET" {
this.Data["Seo"] = models.NewSeo().GetByPage("PC-Findpwd", "找回密码", "找回密码", "找回密码", this.Sys.Site)
this.Data["IsUser"] = true
this.Data["PageId"] = "wenku-reg"
this.TplName = "findpwd.html"
return
}
rules := map[string][]string{
"username": {"required", "mincount:2", "maxcount:16"},
"email": {"required", "email"},
"code": {"required", "len:6"},
"password": {"required", "mincount:6"},
"repassword": {"required", "mincount:6"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
if _, ok := errs["username"]; ok {
this.ResponseJson(false, "用户名限2-16个字符")
}
if _, ok := errs["email"]; ok {
this.ResponseJson(false, "邮箱格式不正确")
}
if _, ok := errs["code"]; ok {
this.ResponseJson(false, "请输入6位验证码")
}
if _, ok := errs["password"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
if _, ok := errs["repassword"]; ok {
this.ResponseJson(false, "密码长度,至少6个字符")
}
}
//校验验证码和邮箱是否匹配
if fmt.Sprintf("%v", this.GetSession("FindPwdMail")) != params["email"].(string) || fmt.Sprintf("%v", this.GetSession("FindPwdCode")) != params["code"].(string) {
this.ResponseJson(false, "验证码不正确,修改密码失败")
}
pwd := helper.MyMD5(params["password"].(string))
repwd := helper.MyMD5(params["repassword"].(string))
if pwd != repwd {
this.ResponseJson(false, "确认密码和密码不一致")
}
user := models.NewUser().GetUserField(orm.NewCondition().And("Email", params["email"]))
if user.Id == 0 || user.Username != params["username"].(string) {
this.ResponseJson(false, "重置密码失败,用户名与邮箱不匹配")
}
_, err := models.UpdateByIds("user", "Password", pwd, user.Id)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "重置密码失败,请刷新页面重试")
}
this.DelSession("FindPwdMail")
this.DelSession("FindPwdCode")
this.ResponseJson(true, "重置密码成功,请重新登录")
}
//删除文档
func (this *UserController) DocDel() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
docid, _ := this.GetInt(":doc")
if docid == 0 {
this.ResponseJson(false, "删除失败,文档不存在")
}
errs := models.NewDocumentRecycle().RemoveToRecycle(this.IsLogin, true, docid)
if len(errs) > 0 {
helper.Logger.Error("删除失败:%v", strings.Join(errs, "; "))
this.ResponseJson(false, "删除失败,文档不存在")
}
this.ResponseJson(true, "删除成功")
}
//文档编辑
func (this *UserController) DocEdit() {
if this.IsLogin == 0 {
this.Redirect("/user", 302)
}
docId, _ := this.GetInt(":doc")
if docId == 0 {
this.Redirect("/user", 302)
}
info := models.DocumentInfo{Id: docId}
err := orm.NewOrm().Read(&info)
if err != nil {
helper.Logger.Error(err.Error())
this.Redirect("/user", 302)
}
if info.Uid != this.IsLogin { // 文档所属用户id与登录的用户id不一致
this.Redirect("/user", 302)
}
doc := models.Document{Id: docId}
// POST
if this.Ctx.Request.Method == "POST" {
ruels := map[string][]string{
"Title": {"required", "unempty"},
"Chanel": {"required", "gt:0", "int"},
"Pid": {"required", "gt:0", "int"},
"Cid": {"required", "gt:0", "int"},
"Tags": {"required"},
"Intro": {"required"},
"Price": {"required", "int"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, ruels)
if len(errs) > 0 {
this.ResponseJson(false, "参数错误")
}
doc.Title = params["Title"].(string)
doc.Keywords = params["Tags"].(string)
doc.Description = params["Intro"].(string)
info.Pid = params["Pid"].(int)
info.Cid = params["Cid"].(int)
info.ChanelId = params["Chanel"].(int)
info.Price = params["Price"].(int)
info.TimeUpdate = int(time.Now().Unix())
orm.NewOrm().Update(&doc, "Title", "Keywords", "Description")
orm.NewOrm().Update(&info, "Pid", "Cid", "ChanelId", "Price")
//原分类-1
models.Regulate(models.GetTableCategory(), "Cnt", -1, fmt.Sprintf("Id in(%v,%v,%v)", info.ChanelId, info.Cid, info.Pid))
//新分类+1
models.Regulate(models.GetTableCategory(), "Cnt", 1, fmt.Sprintf("Id in(%v,%v,%v)", params["Chanel"], params["Cid"], params["Pid"]))
this.ResponseJson(true, "文档编辑成功")
}
// GET
err = orm.NewOrm().Read(&doc)
if err != nil {
helper.Logger.Error(err.Error())
| cond := orm.NewCondition().And("status", 1)
data, _, _ := models.GetList(models.GetTableCategory(), 1, 2000, cond, "sort")
this.Data["User"], _, _ = models.NewUser().GetById(this.IsLogin)
this.Data["Ranks"], _, err = models.NewUser().UserList(1, 8, "i.Document desc", "u.Id,u.Username,u.Avatar,u.Intro,i.Document", "i.Status=1")
this.Data["IsUser"] = true
this.Data["Cates"], _ = conv.InterfaceToJson(data)
this.Data["json"] = data
this.Data["PageId"] = "wenku-user"
this.Data["Info"] = info
this.Data["Doc"] = doc
this.Data["Tab"] = "doc"
this.TplName = "edit.html"
}
//删除收藏(针对收藏夹)
func (this *UserController) CollectFolderDel() {
cid, _ := this.GetInt(":cid")
if cid > 0 && this.IsLogin > 0 {
err := models.NewCollect().DelFolder(cid, this.IsLogin)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, err.Error())
}
this.ResponseJson(true, "收藏夹删除成功")
}
this.ResponseJson(false, "删除失败,参数错误")
}
//取消收藏(针对文档)
func (this *UserController) CollectCancel() {
cid, _ := this.GetInt(":cid")
did, _ := this.GetInt(":did")
if err := models.NewCollect().Cancel(did, cid, this.IsLogin); err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "移除收藏失败,可能您为收藏该文档")
}
this.ResponseJson(true, "移除收藏成功")
}
//更换头像
func (this *UserController) Avatar() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
//dir := fmt.Sprintf("./uploads/%v/%v", time.Now().Format("2006-01-02"), this.IsLogin)
dir := fmt.Sprintf("./static/header")
os.MkdirAll(dir, 0777)
f, fh, err := this.GetFile("Avatar")
if err != nil {
helper.Logger.Error("用户(%v)更新头像失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件上传失败")
}
defer f.Close()
slice := strings.Split(fh.Filename, ".")
ext := strings.ToLower(slice[len(slice)-1])
if !(ext == "jpg" || ext == "jpeg" || ext == "png" || ext == "gif") {
this.ResponseJson(false, "头像图片格式只支持jpg、jpeg、png和gif")
}
tmpFile := dir + "/" + helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
//saveFile := helper.MyMD5(tmpFile) + "." + ext 进行了二次加密
saveFile := helper.MyMD5(fmt.Sprintf("%v-%v-%v", fh.Filename, this.IsLogin, time.Now().Unix())) + "." + ext
err = this.SaveToFile("Avatar", tmpFile)
if err != nil {
helper.Logger.Error("用户(%v)头像保存失败:%v", this.IsLogin, err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
err = models.NewOss().MoveToOss(tmpFile, saveFile, true, true)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "头像文件保存失败")
}
//查询数据库用户数据
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(user.Avatar) > 0 {
//删除原头像图片
go models.NewOss().DelFromOss(true, user.Avatar)
}
user.Avatar = saveFile
rows, err := orm.NewOrm().Update(&user, "Avatar")
if rows > 0 && err == nil {
this.ResponseJson(true, "头像更新成功")
}
if err != nil {
helper.Logger.Error(err.Error())
}
this.ResponseJson(false, "头像更新失败")
}
//编辑个人信息
func (this *UserController) Edit() {
if this.IsLogin == 0 {
this.ResponseJson(false, "请先登录")
}
changepwd := false
cols := []string{"Intro"}
rules := map[string][]string{
"OldPassword": {"required"},
"NewPassword": {"required"},
"RePassword": {"required"},
"Intro": {"required"},
}
params, errs := helper.Valid(this.Ctx.Request.Form, rules)
if len(errs) > 0 {
this.ResponseJson(false, "参数不正确")
}
var user = models.User{Id: this.IsLogin}
orm.NewOrm().Read(&user)
if len(params["OldPassword"].(string)) > 0 || len(params["NewPassword"].(string)) > 0 || len(params["RePassword"].(string)) > 0 {
if len(params["NewPassword"].(string)) < 6 || len(params["RePassword"].(string)) < 6 {
this.ResponseJson(false, "密码长度必须至少6个字符")
}
opwd := helper.MyMD5(params["OldPassword"].(string))
npwd := helper.MyMD5(params["NewPassword"].(string))
rpwd := helper.MyMD5(params["RePassword"].(string))
if user.Password != opwd {
this.ResponseJson(false, "原密码不正确")
}
if npwd != rpwd {
this.ResponseJson(false, "确认密码和新密码必须一致")
}
if opwd == npwd {
this.ResponseJson(false, "确认密码不能与原密码相同")
}
user.Password = rpwd
cols = append(cols, "Password")
changepwd = true
}
user.Intro = params["Intro"].(string)
affected, err := orm.NewOrm().Update(&user, cols...)
if err != nil {
helper.Logger.Error(err.Error())
this.ResponseJson(false, "设置失败,请刷新页面重试")
}
if affected == 0 {
this.ResponseJson(true, "设置失败,可能您未对内容做更改")
}
if changepwd {
this.ResetCookie()
this.ResponseJson(true, "设置成功,您设置了新密码,请重新登录")
}
this.ResponseJson(true, "设置成功")
}
| this.Redirect("/user", 302)
}
| conditional_block |
nosuicide.py | import os
import multiprocessing
import datetime as dt
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.feature_selection import chi2, SelectPercentile, mutual_info_classif, SelectFromModel
from scipy.sparse import lil_matrix, vstack, hstack
import pymorphy2
from sklearn.decomposition import TruncatedSVD
def write_data(data):
with open(os.path.abspath(os.path.dirname(__file__) + './out.csv'), 'w') as outf:
outf.write('id,prob\n')
for itm in data:
outf.write(str(int(itm[0])) + ',' + str(float(itm[1])) + '\n')
outf.close()
def normalize_text(txt):
myan = pymorphy2.MorphAnalyzer()
newtxt = ''
for w in txt.decode('utf8').split(' '):
myword = myan.parse(w.lower())
newtxt += myword[0].normal_form
return newtxt.encode('utf8')
def normalize_array(data):
for idx, txt in enumerate(data):
data[idx] = normalize_text(txt)
return data
def geld_data(X, Y, vectorizer, Classifier):
# culling?
#return (X, Y)
print('WTF2', X.shape, Y.shape)
myTmpClassifier = Classifier(X = X, Y = Y, test_data = X, vectorizer = vectorizer)
new_X_train = None
new_Y_train = []
threshold = 1 - 1 / X.shape[1] ** 2
my_full_estimates = myTmpClassifier.run_crosscheck()
my_estimates = my_full_estimates['proba']
for idx, row in enumerate(my_estimates):
if abs(row - Y[idx]) <= threshold:
if new_X_train is None:
new_X_train = X[idx]
else:
new_X_train = vstack([new_X_train, X[idx]])
new_Y_train.append(Y[idx])
else:
pass
#print(row)
#print(td[1][idx])
new_Y_train = np.array(new_Y_train)
print('SHAPE', new_X_train.shape)
return (new_X_train, new_Y_train)
def load_text_data(total_pts, labels = True, **args):
with open(os.path.abspath(os.path.dirname(__file__) + './' + args['filename']), 'r') as td:
index = -1
X = np.zeros(
(total_pts),
dtype = [('id', 'u2'), ('text', 'S16000')]
)
if labels:
Y = np.zeros(total_pts)
for line in td:
sl = line.split(',')
if index > -1 and index < total_pts:
X[index] = (sl[0], sl[1].encode('utf8'))
if labels:
Y[index] = sl[2]
index += 1
td.close()
if labels:
return (X, Y)
else:
return X
def vectorize_data(**args):
total_data_pts = len(args['td'])
all_docs = np.zeros(
total_data_pts * 2,
dtype = [('id', 'u2'), ('text', 'S16000')]
)
all_docs[:total_data_pts] = args['td']
all_docs[total_data_pts:] = args['testd']
myvec = CustomTextVectorizer(
data = all_docs,
stop_words = args.get('stop_words'),
vectorizer = args.get('vectorizer'),
)
current_vectorizer = myvec.vectorize(args['td']['text'])
tv = myvec.dump()
myvec.vectorize(args['testd']['text'])
testv = myvec.dump()
return (tv, testv, current_vectorizer)
class CustomTextVectorizer():
def __init__(self, **args):
self.data = args['data']
self.labels = args.get('labels')
self.stop_words = args.get('stop_words')
self.fn = args.get('fn')
self.vecs = None
if self.fn is None:
self.fn = 'training_vecs.csv'
# best so far
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .2,
# max_features = 16000
# after normalization & new stop words:
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .7,
# max_features = 128000
# ~.5 precision & recall on test data
# works with culling
# analyzer = 'char',
# stop_words = self.stop_words,
# ngram_range = (3, 4),
# max_df = .7,
# max_features = 6000
# 48k features best on train set.
if args.get('vectorizer') is not None:
self.vectorizer = args.get('vectorizer')
else:
self.vectorizer = TfidfVectorizer(
stop_words = self.stop_words,
ngram_range = (2, 3),
max_df = .9,
max_features = 16000
)
self.vectorizer.fit(self.data['text'])
print(len(self.vectorizer.vocabulary_.items()))
def vectorize(self, data):
self.vecs = self.vectorizer.transform(data)
return self.vectorizer
def write(self):
with open(os.path.abspath(os.path.dirname(__file__) + './' + self.fn), 'w') as outf:
outf.write('id,' + ','.join(['f' + str(i) for i in range(len(self.vecs.toarray()[0]))]) + ',label\n')
for index, itm in enumerate(self.vecs.toarray()):
current_row = str(self.data['id'][index]) + ',' + ','.join(list(str(f) for f in itm))
if self.labels is not None:
current_row += ',' + str(int(self.labels[index]))
outf.write(current_row + '\n')
outf.close()
def dump(self):
return self.vecs
class StrangeClassifier():
def __init__(self, **args):
self.ids = args.get('ids')
self.X = args['X']
self.Y = args['Y']
self.test_data = args['test_data']
print('EFFIN_TEST_DATA', self.test_data.shape)
self.total_data_pts = self.test_data.shape[0]
self.estimates = np.zeros((self.total_data_pts, 2))
self.crosscheck_estimates = None
self.calc_start = dt.datetime.now()
self.vectorizer = args['vectorizer']
def estimate(self):
# class_prior = [.9, .1] - we dunnno
classifier = BernoulliNB()
#classifier = MultinomialNB(alpha = 0.02)
#classifier = DecisionTreeClassifier(class_weight = { 0: 1, 1: 9 })
#classifier = KNeighborsClassifier(n_neighbors=50, metric='minkowski', p=3)
# classifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
# )
classifier.fit(self.X, self.Y)
if self.calc_start is not None:
print('Fitting time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
#if self.ids is not None:
results_proba = classifier.predict_proba(self.test_data)
if self.calc_start is not None:
print('Prediction time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
print(results_proba[:100])
if self.ids is not None:
self.estimates[:, 0] = self.ids
self.estimates[:, 1] = results_proba[:, 1]
else:
self.crosscheck_estimates = {
'labels': classifier.predict(self.test_data),
'proba': results_proba[:, 1]
}
# inverted_vocab = {_id:w for (w,_id) in self.vectorizer.vocabulary_.items() }
# for _id in classifier.coef_.argsort()[0][-50:]:
# print(inverted_vocab[_id], classifier.coef_[0][_id])
def run(self):
self.estimate()
print(self.estimates)
return self.estimates
def run_crosscheck(self):
self.estimate()
print(self.crosscheck_estimates)
return self.crosscheck_estimates
def run_process(X, Y, test_data, ids, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, ids = ids, vectorizer = vectorizer)
return runner.run()
def run_process_crosscheck(X, Y, test_data, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, vectorizer = vectorizer)
return runner.run_crosscheck()
def run_normalizer(vec):
return normalize_array(vec)
if __name__ == '__main__':
total_data_pts = 13944
crosscheck = True
stop_words = [
'а',
#'атьс',
'ах',
'бы',
'быть',
'в',
'вать',
'во',
'вот',
'всего',
'всё',
'вы',
'для',
'да',
'до',
'если',
'ещё',
'ение',
'же',
'за',
'и',
#'иват',
'из',
'ие',
'ия',
'или',
'к',
'ки',
'как',
'ко',
'который',
'кто',
'ку',
'ли',
'лишь',
'между',
'мы',
'на',
'над',
'нибудь',
'никак',
'нный',
'но',
'ну'
#'нять',
'о',
'об',
'овать',
'оват',
'ой',
'ок',
'около',
'он',
'она',
'они',
#'ость',
'от',
'по',
'под',
'практически',
'при',
'про',
'просто',
'с',
#'сить',
'совсем',
'среди',
#'ство',
'так',
'таки',
'тать',
'тем',
'то',
'ты',
'ть',
'тьс',
'ться',
'тот',
'у',
'уже',
'чем',
'что',
'чтобы',
'ься',
#'ывать',
#'ыват',
'это',
'этот',
'src',
'https',
'figure'
]
neg_stop_words = []
for w in stop_words:
neg_stop_words.append('не_' + w)
stop_words = stop_words + neg_stop_words
calc_start = dt.datetime.now()
td = load_text_data(total_data_pts, True, filename='train_normalized_withspaces.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_withspaces.csv')
(tv_norm, testv_norm, vectorizer_norm) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 3),
max_df = .7,
min_df = 3,
#max_features = 96000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_just4grams.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_just4grams.csv')
(tv_grams, testv_grams, vectorizer_grams) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 2),
max_df = .9,
min_df = 3,
# max_features = 16000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_justpos.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_justpos.csv')
(tv_pos, testv_pos, vectorizer_pos) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (2, 4),
max_df = .9,
min_df = 3,
#max_features = 16000
),
)
tv = hstack([tv_norm, tv_grams, tv_pos])
testv = lil_matrix(hstack([testv_norm, testv_grams, testv_pos]))
# tv = tv_norm
# testv = testv_norm
print('Vectorization time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
X_train, X_test, Y_train, Y_test = train_test_split(tv, td[1], test_size = .3)
# X_test = X_train
# Y_test = Y_train
test_data_pts = Y_test.shape[0]
# test_data_pts = Y_train.shape[0]
current_vectorizer = None
if crosscheck:
print('WTF???', X_train.shape, Y_train.shape)
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([X_train, X_test]))
# X_train_whooshed = minimizer.transform(X_train)
# X_test_whooshed = minimizer.transform(X_test)
#tmpClassifier = DecisionTreeClassifier(max_depth = 32, class_weight = { 0: 1, 1: 9 })
#tmpClassifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
#)
#tmpClassifier = MultinomialNB(alpha = 0.02)
# tmpClassifier = BernoulliNB()
# tmpClassifier.fit(X_train, Y_train)
feature_selector = SelectPercentile(chi2, percentile = 10)
#feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(X_train)
X_test_whooshed = feature_selector.transform(X_test)
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, Y_train, current_vectorizer, StrangeClassifier)
else:
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([tv, testv]))
# X_train_whooshed = minimizer.transform(tv)
# X_test_whooshed = minimizer.transform(testv)
tmpClassifier = BernoulliNB()
tmpClassifier.fit(tv, td[1])
#feature_selector = SelectPercentile(mutual_info_classif, percentile = 25)
feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
#feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(tv)
X_test_whooshed = feature_selector.transform(testv)
#X_train_whooshed = tv
#X_test_whooshed = testv
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, td[1], current_vectorizer, StrangeClassifier)
print('Culling time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
process_arrays = []
data_arrays = []
thread_cnt = 3 #1
pool = multiprocessing.Pool(processes = thread_cnt)
for cpu in range(thread_cnt):
if crosscheck:
start_index = cpu * test_data_pts // thread_cnt
end_index = (cpu + 1) * test_data_pts // thread_cnt
# start_index = cpu * total_data_pts // thread_cnt
# end_index = (cpu + 1) * total_data_pts // thread_cnt
process_arrays.append(pool.apply_async(run_process_crosscheck, [
new_X,
new_Y,
# X_train[start_index:end_index],
X_test_whooshed[start_index:end_index],
# tv[start_index:end_index],
current_vectorizer
]))
else:
start_index = cpu * total_data_pts // thread_cnt
end_index = (cpu + 1) * total_data_pts // thread_cnt | new_X,
new_Y,
# tv[start_index:end_index],
# td[0]['id'][start_index:end_index],
X_test_whooshed[start_index:end_index],
testd['id'][start_index:end_index],
current_vectorizer
]))
for p in process_arrays:
data_arrays.append(p.get())
print('Exec time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
if crosscheck:
Y_pred = np.zeros(Y_test.shape)
Y_proba = np.zeros(Y_test.shape)
for idx, arr in enumerate(data_arrays):
Y_pred[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['labels']
Y_proba[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['proba']
#print(Y_pred[:100], Y_test[:100])
print(classification_report(y_true = Y_test, y_pred = Y_pred))
print('roc_auc', roc_auc_score(y_true = Y_test, y_score = Y_proba))
#print(classification_report(y_true = td[1], y_pred = Y_pred))
#print('roc_auc', roc_auc_score(y_true = td[1], y_score = Y_proba))
else:
final_results = np.zeros((total_data_pts, 2))
for idx, arr in enumerate(data_arrays):
final_results[idx * total_data_pts // thread_cnt:(idx + 1) * total_data_pts // thread_cnt] = arr
write_data(final_results) | process_arrays.append(pool.apply_async(run_process, [ | random_line_split |
nosuicide.py | import os
import multiprocessing
import datetime as dt
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.feature_selection import chi2, SelectPercentile, mutual_info_classif, SelectFromModel
from scipy.sparse import lil_matrix, vstack, hstack
import pymorphy2
from sklearn.decomposition import TruncatedSVD
def write_data(data):
with open(os.path.abspath(os.path.dirname(__file__) + './out.csv'), 'w') as outf:
outf.write('id,prob\n')
for itm in data:
outf.write(str(int(itm[0])) + ',' + str(float(itm[1])) + '\n')
outf.close()
def normalize_text(txt):
myan = pymorphy2.MorphAnalyzer()
newtxt = ''
for w in txt.decode('utf8').split(' '):
myword = myan.parse(w.lower())
newtxt += myword[0].normal_form
return newtxt.encode('utf8')
def normalize_array(data):
for idx, txt in enumerate(data):
data[idx] = normalize_text(txt)
return data
def geld_data(X, Y, vectorizer, Classifier):
# culling?
#return (X, Y)
print('WTF2', X.shape, Y.shape)
myTmpClassifier = Classifier(X = X, Y = Y, test_data = X, vectorizer = vectorizer)
new_X_train = None
new_Y_train = []
threshold = 1 - 1 / X.shape[1] ** 2
my_full_estimates = myTmpClassifier.run_crosscheck()
my_estimates = my_full_estimates['proba']
for idx, row in enumerate(my_estimates):
if abs(row - Y[idx]) <= threshold:
if new_X_train is None:
new_X_train = X[idx]
else:
new_X_train = vstack([new_X_train, X[idx]])
new_Y_train.append(Y[idx])
else:
pass
#print(row)
#print(td[1][idx])
new_Y_train = np.array(new_Y_train)
print('SHAPE', new_X_train.shape)
return (new_X_train, new_Y_train)
def load_text_data(total_pts, labels = True, **args):
with open(os.path.abspath(os.path.dirname(__file__) + './' + args['filename']), 'r') as td:
index = -1
X = np.zeros(
(total_pts),
dtype = [('id', 'u2'), ('text', 'S16000')]
)
if labels:
Y = np.zeros(total_pts)
for line in td:
sl = line.split(',')
if index > -1 and index < total_pts:
X[index] = (sl[0], sl[1].encode('utf8'))
if labels:
Y[index] = sl[2]
index += 1
td.close()
if labels:
return (X, Y)
else:
return X
def vectorize_data(**args):
total_data_pts = len(args['td'])
all_docs = np.zeros(
total_data_pts * 2,
dtype = [('id', 'u2'), ('text', 'S16000')]
)
all_docs[:total_data_pts] = args['td']
all_docs[total_data_pts:] = args['testd']
myvec = CustomTextVectorizer(
data = all_docs,
stop_words = args.get('stop_words'),
vectorizer = args.get('vectorizer'),
)
current_vectorizer = myvec.vectorize(args['td']['text'])
tv = myvec.dump()
myvec.vectorize(args['testd']['text'])
testv = myvec.dump()
return (tv, testv, current_vectorizer)
class CustomTextVectorizer():
def __init__(self, **args):
self.data = args['data']
self.labels = args.get('labels')
self.stop_words = args.get('stop_words')
self.fn = args.get('fn')
self.vecs = None
if self.fn is None:
self.fn = 'training_vecs.csv'
# best so far
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .2,
# max_features = 16000
# after normalization & new stop words:
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .7,
# max_features = 128000
# ~.5 precision & recall on test data
# works with culling
# analyzer = 'char',
# stop_words = self.stop_words,
# ngram_range = (3, 4),
# max_df = .7,
# max_features = 6000
# 48k features best on train set.
if args.get('vectorizer') is not None:
self.vectorizer = args.get('vectorizer')
else:
self.vectorizer = TfidfVectorizer(
stop_words = self.stop_words,
ngram_range = (2, 3),
max_df = .9,
max_features = 16000
)
self.vectorizer.fit(self.data['text'])
print(len(self.vectorizer.vocabulary_.items()))
def vectorize(self, data):
self.vecs = self.vectorizer.transform(data)
return self.vectorizer
def write(self):
with open(os.path.abspath(os.path.dirname(__file__) + './' + self.fn), 'w') as outf:
outf.write('id,' + ','.join(['f' + str(i) for i in range(len(self.vecs.toarray()[0]))]) + ',label\n')
for index, itm in enumerate(self.vecs.toarray()):
current_row = str(self.data['id'][index]) + ',' + ','.join(list(str(f) for f in itm))
if self.labels is not None:
current_row += ',' + str(int(self.labels[index]))
outf.write(current_row + '\n')
outf.close()
def dump(self):
return self.vecs
class StrangeClassifier():
def __init__(self, **args):
self.ids = args.get('ids')
self.X = args['X']
self.Y = args['Y']
self.test_data = args['test_data']
print('EFFIN_TEST_DATA', self.test_data.shape)
self.total_data_pts = self.test_data.shape[0]
self.estimates = np.zeros((self.total_data_pts, 2))
self.crosscheck_estimates = None
self.calc_start = dt.datetime.now()
self.vectorizer = args['vectorizer']
def estimate(self):
# class_prior = [.9, .1] - we dunnno
|
def run(self):
self.estimate()
print(self.estimates)
return self.estimates
def run_crosscheck(self):
self.estimate()
print(self.crosscheck_estimates)
return self.crosscheck_estimates
def run_process(X, Y, test_data, ids, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, ids = ids, vectorizer = vectorizer)
return runner.run()
def run_process_crosscheck(X, Y, test_data, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, vectorizer = vectorizer)
return runner.run_crosscheck()
def run_normalizer(vec):
return normalize_array(vec)
if __name__ == '__main__':
total_data_pts = 13944
crosscheck = True
stop_words = [
'а',
#'атьс',
'ах',
'бы',
'быть',
'в',
'вать',
'во',
'вот',
'всего',
'всё',
'вы',
'для',
'да',
'до',
'если',
'ещё',
'ение',
'же',
'за',
'и',
#'иват',
'из',
'ие',
'ия',
'или',
'к',
'ки',
'как',
'ко',
'который',
'кто',
'ку',
'ли',
'лишь',
'между',
'мы',
'на',
'над',
'нибудь',
'никак',
'нный',
'но',
'ну'
#'нять',
'о',
'об',
'овать',
'оват',
'ой',
'ок',
'около',
'он',
'она',
'они',
#'ость',
'от',
'по',
'под',
'практически',
'при',
'про',
'просто',
'с',
#'сить',
'совсем',
'среди',
#'ство',
'так',
'таки',
'тать',
'тем',
'то',
'ты',
'ть',
'тьс',
'ться',
'тот',
'у',
'уже',
'чем',
'что',
'чтобы',
'ься',
#'ывать',
#'ыват',
'это',
'этот',
'src',
'https',
'figure'
]
neg_stop_words = []
for w in stop_words:
neg_stop_words.append('не_' + w)
stop_words = stop_words + neg_stop_words
calc_start = dt.datetime.now()
td = load_text_data(total_data_pts, True, filename='train_normalized_withspaces.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_withspaces.csv')
(tv_norm, testv_norm, vectorizer_norm) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 3),
max_df = .7,
min_df = 3,
#max_features = 96000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_just4grams.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_just4grams.csv')
(tv_grams, testv_grams, vectorizer_grams) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 2),
max_df = .9,
min_df = 3,
# max_features = 16000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_justpos.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_justpos.csv')
(tv_pos, testv_pos, vectorizer_pos) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (2, 4),
max_df = .9,
min_df = 3,
#max_features = 16000
),
)
tv = hstack([tv_norm, tv_grams, tv_pos])
testv = lil_matrix(hstack([testv_norm, testv_grams, testv_pos]))
# tv = tv_norm
# testv = testv_norm
print('Vectorization time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
X_train, X_test, Y_train, Y_test = train_test_split(tv, td[1], test_size = .3)
# X_test = X_train
# Y_test = Y_train
test_data_pts = Y_test.shape[0]
# test_data_pts = Y_train.shape[0]
current_vectorizer = None
if crosscheck:
print('WTF???', X_train.shape, Y_train.shape)
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([X_train, X_test]))
# X_train_whooshed = minimizer.transform(X_train)
# X_test_whooshed = minimizer.transform(X_test)
#tmpClassifier = DecisionTreeClassifier(max_depth = 32, class_weight = { 0: 1, 1: 9 })
#tmpClassifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
#)
#tmpClassifier = MultinomialNB(alpha = 0.02)
# tmpClassifier = BernoulliNB()
# tmpClassifier.fit(X_train, Y_train)
feature_selector = SelectPercentile(chi2, percentile = 10)
#feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(X_train)
X_test_whooshed = feature_selector.transform(X_test)
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, Y_train, current_vectorizer, StrangeClassifier)
else:
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([tv, testv]))
# X_train_whooshed = minimizer.transform(tv)
# X_test_whooshed = minimizer.transform(testv)
tmpClassifier = BernoulliNB()
tmpClassifier.fit(tv, td[1])
#feature_selector = SelectPercentile(mutual_info_classif, percentile = 25)
feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
#feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(tv)
X_test_whooshed = feature_selector.transform(testv)
#X_train_whooshed = tv
#X_test_whooshed = testv
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, td[1], current_vectorizer, StrangeClassifier)
print('Culling time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
process_arrays = []
data_arrays = []
thread_cnt = 3 #1
pool = multiprocessing.Pool(processes = thread_cnt)
for cpu in range(thread_cnt):
if crosscheck:
start_index = cpu * test_data_pts // thread_cnt
end_index = (cpu + 1) * test_data_pts // thread_cnt
# start_index = cpu * total_data_pts // thread_cnt
# end_index = (cpu + 1) * total_data_pts // thread_cnt
process_arrays.append(pool.apply_async(run_process_crosscheck, [
new_X,
new_Y,
# X_train[start_index:end_index],
X_test_whooshed[start_index:end_index],
# tv[start_index:end_index],
current_vectorizer
]))
else:
start_index = cpu * total_data_pts // thread_cnt
end_index = (cpu + 1) * total_data_pts // thread_cnt
process_arrays.append(pool.apply_async(run_process, [
new_X,
new_Y,
# tv[start_index:end_index],
# td[0]['id'][start_index:end_index],
X_test_whooshed[start_index:end_index],
testd['id'][start_index:end_index],
current_vectorizer
]))
for p in process_arrays:
data_arrays.append(p.get())
print('Exec time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
if crosscheck:
Y_pred = np.zeros(Y_test.shape)
Y_proba = np.zeros(Y_test.shape)
for idx, arr in enumerate(data_arrays):
Y_pred[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['labels']
Y_proba[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['proba']
#print(Y_pred[:100], Y_test[:100])
print(classification_report(y_true = Y_test, y_pred = Y_pred))
print('roc_auc', roc_auc_score(y_true = Y_test, y_score = Y_proba))
#print(classification_report(y_true = td[1], y_pred = Y_pred))
#print('roc_auc', roc_auc_score(y_true = td[1], y_score = Y_proba))
else:
final_results = np.zeros((total_data_pts, 2))
for idx, arr in enumerate(data_arrays):
final_results[idx * total_data_pts // thread_cnt:(idx + 1) * total_data_pts // thread_cnt] = arr
write_data(final_results)
| classifier = BernoulliNB()
#classifier = MultinomialNB(alpha = 0.02)
#classifier = DecisionTreeClassifier(class_weight = { 0: 1, 1: 9 })
#classifier = KNeighborsClassifier(n_neighbors=50, metric='minkowski', p=3)
# classifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
# )
classifier.fit(self.X, self.Y)
if self.calc_start is not None:
print('Fitting time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
#if self.ids is not None:
results_proba = classifier.predict_proba(self.test_data)
if self.calc_start is not None:
print('Prediction time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
print(results_proba[:100])
if self.ids is not None:
self.estimates[:, 0] = self.ids
self.estimates[:, 1] = results_proba[:, 1]
else:
self.crosscheck_estimates = {
'labels': classifier.predict(self.test_data),
'proba': results_proba[:, 1]
}
# inverted_vocab = {_id:w for (w,_id) in self.vectorizer.vocabulary_.items() }
# for _id in classifier.coef_.argsort()[0][-50:]:
# print(inverted_vocab[_id], classifier.coef_[0][_id]) | identifier_body |
nosuicide.py | import os
import multiprocessing
import datetime as dt
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.feature_selection import chi2, SelectPercentile, mutual_info_classif, SelectFromModel
from scipy.sparse import lil_matrix, vstack, hstack
import pymorphy2
from sklearn.decomposition import TruncatedSVD
def write_data(data):
with open(os.path.abspath(os.path.dirname(__file__) + './out.csv'), 'w') as outf:
outf.write('id,prob\n')
for itm in data:
outf.write(str(int(itm[0])) + ',' + str(float(itm[1])) + '\n')
outf.close()
def normalize_text(txt):
myan = pymorphy2.MorphAnalyzer()
newtxt = ''
for w in txt.decode('utf8').split(' '):
myword = myan.parse(w.lower())
newtxt += myword[0].normal_form
return newtxt.encode('utf8')
def normalize_array(data):
for idx, txt in enumerate(data):
data[idx] = normalize_text(txt)
return data
def | (X, Y, vectorizer, Classifier):
# culling?
#return (X, Y)
print('WTF2', X.shape, Y.shape)
myTmpClassifier = Classifier(X = X, Y = Y, test_data = X, vectorizer = vectorizer)
new_X_train = None
new_Y_train = []
threshold = 1 - 1 / X.shape[1] ** 2
my_full_estimates = myTmpClassifier.run_crosscheck()
my_estimates = my_full_estimates['proba']
for idx, row in enumerate(my_estimates):
if abs(row - Y[idx]) <= threshold:
if new_X_train is None:
new_X_train = X[idx]
else:
new_X_train = vstack([new_X_train, X[idx]])
new_Y_train.append(Y[idx])
else:
pass
#print(row)
#print(td[1][idx])
new_Y_train = np.array(new_Y_train)
print('SHAPE', new_X_train.shape)
return (new_X_train, new_Y_train)
def load_text_data(total_pts, labels = True, **args):
with open(os.path.abspath(os.path.dirname(__file__) + './' + args['filename']), 'r') as td:
index = -1
X = np.zeros(
(total_pts),
dtype = [('id', 'u2'), ('text', 'S16000')]
)
if labels:
Y = np.zeros(total_pts)
for line in td:
sl = line.split(',')
if index > -1 and index < total_pts:
X[index] = (sl[0], sl[1].encode('utf8'))
if labels:
Y[index] = sl[2]
index += 1
td.close()
if labels:
return (X, Y)
else:
return X
def vectorize_data(**args):
total_data_pts = len(args['td'])
all_docs = np.zeros(
total_data_pts * 2,
dtype = [('id', 'u2'), ('text', 'S16000')]
)
all_docs[:total_data_pts] = args['td']
all_docs[total_data_pts:] = args['testd']
myvec = CustomTextVectorizer(
data = all_docs,
stop_words = args.get('stop_words'),
vectorizer = args.get('vectorizer'),
)
current_vectorizer = myvec.vectorize(args['td']['text'])
tv = myvec.dump()
myvec.vectorize(args['testd']['text'])
testv = myvec.dump()
return (tv, testv, current_vectorizer)
class CustomTextVectorizer():
def __init__(self, **args):
self.data = args['data']
self.labels = args.get('labels')
self.stop_words = args.get('stop_words')
self.fn = args.get('fn')
self.vecs = None
if self.fn is None:
self.fn = 'training_vecs.csv'
# best so far
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .2,
# max_features = 16000
# after normalization & new stop words:
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .7,
# max_features = 128000
# ~.5 precision & recall on test data
# works with culling
# analyzer = 'char',
# stop_words = self.stop_words,
# ngram_range = (3, 4),
# max_df = .7,
# max_features = 6000
# 48k features best on train set.
if args.get('vectorizer') is not None:
self.vectorizer = args.get('vectorizer')
else:
self.vectorizer = TfidfVectorizer(
stop_words = self.stop_words,
ngram_range = (2, 3),
max_df = .9,
max_features = 16000
)
self.vectorizer.fit(self.data['text'])
print(len(self.vectorizer.vocabulary_.items()))
def vectorize(self, data):
self.vecs = self.vectorizer.transform(data)
return self.vectorizer
def write(self):
with open(os.path.abspath(os.path.dirname(__file__) + './' + self.fn), 'w') as outf:
outf.write('id,' + ','.join(['f' + str(i) for i in range(len(self.vecs.toarray()[0]))]) + ',label\n')
for index, itm in enumerate(self.vecs.toarray()):
current_row = str(self.data['id'][index]) + ',' + ','.join(list(str(f) for f in itm))
if self.labels is not None:
current_row += ',' + str(int(self.labels[index]))
outf.write(current_row + '\n')
outf.close()
def dump(self):
return self.vecs
class StrangeClassifier():
def __init__(self, **args):
self.ids = args.get('ids')
self.X = args['X']
self.Y = args['Y']
self.test_data = args['test_data']
print('EFFIN_TEST_DATA', self.test_data.shape)
self.total_data_pts = self.test_data.shape[0]
self.estimates = np.zeros((self.total_data_pts, 2))
self.crosscheck_estimates = None
self.calc_start = dt.datetime.now()
self.vectorizer = args['vectorizer']
def estimate(self):
# class_prior = [.9, .1] - we dunnno
classifier = BernoulliNB()
#classifier = MultinomialNB(alpha = 0.02)
#classifier = DecisionTreeClassifier(class_weight = { 0: 1, 1: 9 })
#classifier = KNeighborsClassifier(n_neighbors=50, metric='minkowski', p=3)
# classifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
# )
classifier.fit(self.X, self.Y)
if self.calc_start is not None:
print('Fitting time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
#if self.ids is not None:
results_proba = classifier.predict_proba(self.test_data)
if self.calc_start is not None:
print('Prediction time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
print(results_proba[:100])
if self.ids is not None:
self.estimates[:, 0] = self.ids
self.estimates[:, 1] = results_proba[:, 1]
else:
self.crosscheck_estimates = {
'labels': classifier.predict(self.test_data),
'proba': results_proba[:, 1]
}
# inverted_vocab = {_id:w for (w,_id) in self.vectorizer.vocabulary_.items() }
# for _id in classifier.coef_.argsort()[0][-50:]:
# print(inverted_vocab[_id], classifier.coef_[0][_id])
def run(self):
self.estimate()
print(self.estimates)
return self.estimates
def run_crosscheck(self):
self.estimate()
print(self.crosscheck_estimates)
return self.crosscheck_estimates
def run_process(X, Y, test_data, ids, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, ids = ids, vectorizer = vectorizer)
return runner.run()
def run_process_crosscheck(X, Y, test_data, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, vectorizer = vectorizer)
return runner.run_crosscheck()
def run_normalizer(vec):
return normalize_array(vec)
if __name__ == '__main__':
total_data_pts = 13944
crosscheck = True
stop_words = [
'а',
#'атьс',
'ах',
'бы',
'быть',
'в',
'вать',
'во',
'вот',
'всего',
'всё',
'вы',
'для',
'да',
'до',
'если',
'ещё',
'ение',
'же',
'за',
'и',
#'иват',
'из',
'ие',
'ия',
'или',
'к',
'ки',
'как',
'ко',
'который',
'кто',
'ку',
'ли',
'лишь',
'между',
'мы',
'на',
'над',
'нибудь',
'никак',
'нный',
'но',
'ну'
#'нять',
'о',
'об',
'овать',
'оват',
'ой',
'ок',
'около',
'он',
'она',
'они',
#'ость',
'от',
'по',
'под',
'практически',
'при',
'про',
'просто',
'с',
#'сить',
'совсем',
'среди',
#'ство',
'так',
'таки',
'тать',
'тем',
'то',
'ты',
'ть',
'тьс',
'ться',
'тот',
'у',
'уже',
'чем',
'что',
'чтобы',
'ься',
#'ывать',
#'ыват',
'это',
'этот',
'src',
'https',
'figure'
]
neg_stop_words = []
for w in stop_words:
neg_stop_words.append('не_' + w)
stop_words = stop_words + neg_stop_words
calc_start = dt.datetime.now()
td = load_text_data(total_data_pts, True, filename='train_normalized_withspaces.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_withspaces.csv')
(tv_norm, testv_norm, vectorizer_norm) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 3),
max_df = .7,
min_df = 3,
#max_features = 96000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_just4grams.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_just4grams.csv')
(tv_grams, testv_grams, vectorizer_grams) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 2),
max_df = .9,
min_df = 3,
# max_features = 16000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_justpos.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_justpos.csv')
(tv_pos, testv_pos, vectorizer_pos) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (2, 4),
max_df = .9,
min_df = 3,
#max_features = 16000
),
)
tv = hstack([tv_norm, tv_grams, tv_pos])
testv = lil_matrix(hstack([testv_norm, testv_grams, testv_pos]))
# tv = tv_norm
# testv = testv_norm
print('Vectorization time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
X_train, X_test, Y_train, Y_test = train_test_split(tv, td[1], test_size = .3)
# X_test = X_train
# Y_test = Y_train
test_data_pts = Y_test.shape[0]
# test_data_pts = Y_train.shape[0]
current_vectorizer = None
if crosscheck:
print('WTF???', X_train.shape, Y_train.shape)
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([X_train, X_test]))
# X_train_whooshed = minimizer.transform(X_train)
# X_test_whooshed = minimizer.transform(X_test)
#tmpClassifier = DecisionTreeClassifier(max_depth = 32, class_weight = { 0: 1, 1: 9 })
#tmpClassifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
#)
#tmpClassifier = MultinomialNB(alpha = 0.02)
# tmpClassifier = BernoulliNB()
# tmpClassifier.fit(X_train, Y_train)
feature_selector = SelectPercentile(chi2, percentile = 10)
#feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(X_train)
X_test_whooshed = feature_selector.transform(X_test)
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, Y_train, current_vectorizer, StrangeClassifier)
else:
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([tv, testv]))
# X_train_whooshed = minimizer.transform(tv)
# X_test_whooshed = minimizer.transform(testv)
tmpClassifier = BernoulliNB()
tmpClassifier.fit(tv, td[1])
#feature_selector = SelectPercentile(mutual_info_classif, percentile = 25)
feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
#feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(tv)
X_test_whooshed = feature_selector.transform(testv)
#X_train_whooshed = tv
#X_test_whooshed = testv
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, td[1], current_vectorizer, StrangeClassifier)
print('Culling time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
process_arrays = []
data_arrays = []
thread_cnt = 3 #1
pool = multiprocessing.Pool(processes = thread_cnt)
for cpu in range(thread_cnt):
if crosscheck:
start_index = cpu * test_data_pts // thread_cnt
end_index = (cpu + 1) * test_data_pts // thread_cnt
# start_index = cpu * total_data_pts // thread_cnt
# end_index = (cpu + 1) * total_data_pts // thread_cnt
process_arrays.append(pool.apply_async(run_process_crosscheck, [
new_X,
new_Y,
# X_train[start_index:end_index],
X_test_whooshed[start_index:end_index],
# tv[start_index:end_index],
current_vectorizer
]))
else:
start_index = cpu * total_data_pts // thread_cnt
end_index = (cpu + 1) * total_data_pts // thread_cnt
process_arrays.append(pool.apply_async(run_process, [
new_X,
new_Y,
# tv[start_index:end_index],
# td[0]['id'][start_index:end_index],
X_test_whooshed[start_index:end_index],
testd['id'][start_index:end_index],
current_vectorizer
]))
for p in process_arrays:
data_arrays.append(p.get())
print('Exec time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
if crosscheck:
Y_pred = np.zeros(Y_test.shape)
Y_proba = np.zeros(Y_test.shape)
for idx, arr in enumerate(data_arrays):
Y_pred[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['labels']
Y_proba[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['proba']
#print(Y_pred[:100], Y_test[:100])
print(classification_report(y_true = Y_test, y_pred = Y_pred))
print('roc_auc', roc_auc_score(y_true = Y_test, y_score = Y_proba))
#print(classification_report(y_true = td[1], y_pred = Y_pred))
#print('roc_auc', roc_auc_score(y_true = td[1], y_score = Y_proba))
else:
final_results = np.zeros((total_data_pts, 2))
for idx, arr in enumerate(data_arrays):
final_results[idx * total_data_pts // thread_cnt:(idx + 1) * total_data_pts // thread_cnt] = arr
write_data(final_results)
| geld_data | identifier_name |
nosuicide.py | import os
import multiprocessing
import datetime as dt
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.feature_selection import chi2, SelectPercentile, mutual_info_classif, SelectFromModel
from scipy.sparse import lil_matrix, vstack, hstack
import pymorphy2
from sklearn.decomposition import TruncatedSVD
def write_data(data):
with open(os.path.abspath(os.path.dirname(__file__) + './out.csv'), 'w') as outf:
outf.write('id,prob\n')
for itm in data:
outf.write(str(int(itm[0])) + ',' + str(float(itm[1])) + '\n')
outf.close()
def normalize_text(txt):
myan = pymorphy2.MorphAnalyzer()
newtxt = ''
for w in txt.decode('utf8').split(' '):
myword = myan.parse(w.lower())
newtxt += myword[0].normal_form
return newtxt.encode('utf8')
def normalize_array(data):
for idx, txt in enumerate(data):
data[idx] = normalize_text(txt)
return data
def geld_data(X, Y, vectorizer, Classifier):
# culling?
#return (X, Y)
print('WTF2', X.shape, Y.shape)
myTmpClassifier = Classifier(X = X, Y = Y, test_data = X, vectorizer = vectorizer)
new_X_train = None
new_Y_train = []
threshold = 1 - 1 / X.shape[1] ** 2
my_full_estimates = myTmpClassifier.run_crosscheck()
my_estimates = my_full_estimates['proba']
for idx, row in enumerate(my_estimates):
if abs(row - Y[idx]) <= threshold:
|
else:
pass
#print(row)
#print(td[1][idx])
new_Y_train = np.array(new_Y_train)
print('SHAPE', new_X_train.shape)
return (new_X_train, new_Y_train)
def load_text_data(total_pts, labels = True, **args):
with open(os.path.abspath(os.path.dirname(__file__) + './' + args['filename']), 'r') as td:
index = -1
X = np.zeros(
(total_pts),
dtype = [('id', 'u2'), ('text', 'S16000')]
)
if labels:
Y = np.zeros(total_pts)
for line in td:
sl = line.split(',')
if index > -1 and index < total_pts:
X[index] = (sl[0], sl[1].encode('utf8'))
if labels:
Y[index] = sl[2]
index += 1
td.close()
if labels:
return (X, Y)
else:
return X
def vectorize_data(**args):
total_data_pts = len(args['td'])
all_docs = np.zeros(
total_data_pts * 2,
dtype = [('id', 'u2'), ('text', 'S16000')]
)
all_docs[:total_data_pts] = args['td']
all_docs[total_data_pts:] = args['testd']
myvec = CustomTextVectorizer(
data = all_docs,
stop_words = args.get('stop_words'),
vectorizer = args.get('vectorizer'),
)
current_vectorizer = myvec.vectorize(args['td']['text'])
tv = myvec.dump()
myvec.vectorize(args['testd']['text'])
testv = myvec.dump()
return (tv, testv, current_vectorizer)
class CustomTextVectorizer():
def __init__(self, **args):
self.data = args['data']
self.labels = args.get('labels')
self.stop_words = args.get('stop_words')
self.fn = args.get('fn')
self.vecs = None
if self.fn is None:
self.fn = 'training_vecs.csv'
# best so far
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .2,
# max_features = 16000
# after normalization & new stop words:
# stop_words = self.stop_words,
# ngram_range = (2, 3),
# max_df = .7,
# max_features = 128000
# ~.5 precision & recall on test data
# works with culling
# analyzer = 'char',
# stop_words = self.stop_words,
# ngram_range = (3, 4),
# max_df = .7,
# max_features = 6000
# 48k features best on train set.
if args.get('vectorizer') is not None:
self.vectorizer = args.get('vectorizer')
else:
self.vectorizer = TfidfVectorizer(
stop_words = self.stop_words,
ngram_range = (2, 3),
max_df = .9,
max_features = 16000
)
self.vectorizer.fit(self.data['text'])
print(len(self.vectorizer.vocabulary_.items()))
def vectorize(self, data):
self.vecs = self.vectorizer.transform(data)
return self.vectorizer
def write(self):
with open(os.path.abspath(os.path.dirname(__file__) + './' + self.fn), 'w') as outf:
outf.write('id,' + ','.join(['f' + str(i) for i in range(len(self.vecs.toarray()[0]))]) + ',label\n')
for index, itm in enumerate(self.vecs.toarray()):
current_row = str(self.data['id'][index]) + ',' + ','.join(list(str(f) for f in itm))
if self.labels is not None:
current_row += ',' + str(int(self.labels[index]))
outf.write(current_row + '\n')
outf.close()
def dump(self):
return self.vecs
class StrangeClassifier():
def __init__(self, **args):
self.ids = args.get('ids')
self.X = args['X']
self.Y = args['Y']
self.test_data = args['test_data']
print('EFFIN_TEST_DATA', self.test_data.shape)
self.total_data_pts = self.test_data.shape[0]
self.estimates = np.zeros((self.total_data_pts, 2))
self.crosscheck_estimates = None
self.calc_start = dt.datetime.now()
self.vectorizer = args['vectorizer']
def estimate(self):
# class_prior = [.9, .1] - we dunnno
classifier = BernoulliNB()
#classifier = MultinomialNB(alpha = 0.02)
#classifier = DecisionTreeClassifier(class_weight = { 0: 1, 1: 9 })
#classifier = KNeighborsClassifier(n_neighbors=50, metric='minkowski', p=3)
# classifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
# )
classifier.fit(self.X, self.Y)
if self.calc_start is not None:
print('Fitting time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
#if self.ids is not None:
results_proba = classifier.predict_proba(self.test_data)
if self.calc_start is not None:
print('Prediction time: ' + str((dt.datetime.now() - self.calc_start).total_seconds()) + 's')
print(results_proba[:100])
if self.ids is not None:
self.estimates[:, 0] = self.ids
self.estimates[:, 1] = results_proba[:, 1]
else:
self.crosscheck_estimates = {
'labels': classifier.predict(self.test_data),
'proba': results_proba[:, 1]
}
# inverted_vocab = {_id:w for (w,_id) in self.vectorizer.vocabulary_.items() }
# for _id in classifier.coef_.argsort()[0][-50:]:
# print(inverted_vocab[_id], classifier.coef_[0][_id])
def run(self):
self.estimate()
print(self.estimates)
return self.estimates
def run_crosscheck(self):
self.estimate()
print(self.crosscheck_estimates)
return self.crosscheck_estimates
def run_process(X, Y, test_data, ids, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, ids = ids, vectorizer = vectorizer)
return runner.run()
def run_process_crosscheck(X, Y, test_data, vectorizer):
runner = StrangeClassifier(X = X, Y = Y, test_data = test_data, vectorizer = vectorizer)
return runner.run_crosscheck()
def run_normalizer(vec):
return normalize_array(vec)
if __name__ == '__main__':
total_data_pts = 13944
crosscheck = True
stop_words = [
'а',
#'атьс',
'ах',
'бы',
'быть',
'в',
'вать',
'во',
'вот',
'всего',
'всё',
'вы',
'для',
'да',
'до',
'если',
'ещё',
'ение',
'же',
'за',
'и',
#'иват',
'из',
'ие',
'ия',
'или',
'к',
'ки',
'как',
'ко',
'который',
'кто',
'ку',
'ли',
'лишь',
'между',
'мы',
'на',
'над',
'нибудь',
'никак',
'нный',
'но',
'ну'
#'нять',
'о',
'об',
'овать',
'оват',
'ой',
'ок',
'около',
'он',
'она',
'они',
#'ость',
'от',
'по',
'под',
'практически',
'при',
'про',
'просто',
'с',
#'сить',
'совсем',
'среди',
#'ство',
'так',
'таки',
'тать',
'тем',
'то',
'ты',
'ть',
'тьс',
'ться',
'тот',
'у',
'уже',
'чем',
'что',
'чтобы',
'ься',
#'ывать',
#'ыват',
'это',
'этот',
'src',
'https',
'figure'
]
neg_stop_words = []
for w in stop_words:
neg_stop_words.append('не_' + w)
stop_words = stop_words + neg_stop_words
calc_start = dt.datetime.now()
td = load_text_data(total_data_pts, True, filename='train_normalized_withspaces.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_withspaces.csv')
(tv_norm, testv_norm, vectorizer_norm) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 3),
max_df = .7,
min_df = 3,
#max_features = 96000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_just4grams.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_just4grams.csv')
(tv_grams, testv_grams, vectorizer_grams) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (1, 2),
max_df = .9,
min_df = 3,
# max_features = 16000
),
)
td = load_text_data(total_data_pts, True, filename='train_normalized_justpos.csv')
testd = load_text_data(total_data_pts, False, filename='test_normalized_justpos.csv')
(tv_pos, testv_pos, vectorizer_pos) = vectorize_data(
td = td[0],
testd = testd,
stop_words = stop_words,
vectorizer = TfidfVectorizer(
stop_words = stop_words,
ngram_range = (2, 4),
max_df = .9,
min_df = 3,
#max_features = 16000
),
)
tv = hstack([tv_norm, tv_grams, tv_pos])
testv = lil_matrix(hstack([testv_norm, testv_grams, testv_pos]))
# tv = tv_norm
# testv = testv_norm
print('Vectorization time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
X_train, X_test, Y_train, Y_test = train_test_split(tv, td[1], test_size = .3)
# X_test = X_train
# Y_test = Y_train
test_data_pts = Y_test.shape[0]
# test_data_pts = Y_train.shape[0]
current_vectorizer = None
if crosscheck:
print('WTF???', X_train.shape, Y_train.shape)
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([X_train, X_test]))
# X_train_whooshed = minimizer.transform(X_train)
# X_test_whooshed = minimizer.transform(X_test)
#tmpClassifier = DecisionTreeClassifier(max_depth = 32, class_weight = { 0: 1, 1: 9 })
#tmpClassifier = RandomForestClassifier(
# max_depth = 32,
# n_estimators = 64,
# max_features = 0.25,
# class_weight = { 0: 1, 1: 9 },
# n_jobs = 3
#)
#tmpClassifier = MultinomialNB(alpha = 0.02)
# tmpClassifier = BernoulliNB()
# tmpClassifier.fit(X_train, Y_train)
feature_selector = SelectPercentile(chi2, percentile = 10)
#feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(X_train)
X_test_whooshed = feature_selector.transform(X_test)
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, Y_train, current_vectorizer, StrangeClassifier)
else:
# minimizer = TruncatedSVD(n_components=250, n_iter=100)
# minimizer.fit(vstack([tv, testv]))
# X_train_whooshed = minimizer.transform(tv)
# X_test_whooshed = minimizer.transform(testv)
tmpClassifier = BernoulliNB()
tmpClassifier.fit(tv, td[1])
#feature_selector = SelectPercentile(mutual_info_classif, percentile = 25)
feature_selector = SelectFromModel(tmpClassifier, prefit=True, threshold = 'median')
#feature_selector.fit(X_train, Y_train)
X_train_whooshed = feature_selector.transform(tv)
X_test_whooshed = feature_selector.transform(testv)
#X_train_whooshed = tv
#X_test_whooshed = testv
print('Minification time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
(new_X, new_Y) = geld_data(X_train_whooshed, td[1], current_vectorizer, StrangeClassifier)
print('Culling time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
process_arrays = []
data_arrays = []
thread_cnt = 3 #1
pool = multiprocessing.Pool(processes = thread_cnt)
for cpu in range(thread_cnt):
if crosscheck:
start_index = cpu * test_data_pts // thread_cnt
end_index = (cpu + 1) * test_data_pts // thread_cnt
# start_index = cpu * total_data_pts // thread_cnt
# end_index = (cpu + 1) * total_data_pts // thread_cnt
process_arrays.append(pool.apply_async(run_process_crosscheck, [
new_X,
new_Y,
# X_train[start_index:end_index],
X_test_whooshed[start_index:end_index],
# tv[start_index:end_index],
current_vectorizer
]))
else:
start_index = cpu * total_data_pts // thread_cnt
end_index = (cpu + 1) * total_data_pts // thread_cnt
process_arrays.append(pool.apply_async(run_process, [
new_X,
new_Y,
# tv[start_index:end_index],
# td[0]['id'][start_index:end_index],
X_test_whooshed[start_index:end_index],
testd['id'][start_index:end_index],
current_vectorizer
]))
for p in process_arrays:
data_arrays.append(p.get())
print('Exec time: ' + str((dt.datetime.now() - calc_start).total_seconds()) + 's')
if crosscheck:
Y_pred = np.zeros(Y_test.shape)
Y_proba = np.zeros(Y_test.shape)
for idx, arr in enumerate(data_arrays):
Y_pred[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['labels']
Y_proba[idx * test_data_pts // thread_cnt:(idx + 1) * test_data_pts // thread_cnt] = arr['proba']
#print(Y_pred[:100], Y_test[:100])
print(classification_report(y_true = Y_test, y_pred = Y_pred))
print('roc_auc', roc_auc_score(y_true = Y_test, y_score = Y_proba))
#print(classification_report(y_true = td[1], y_pred = Y_pred))
#print('roc_auc', roc_auc_score(y_true = td[1], y_score = Y_proba))
else:
final_results = np.zeros((total_data_pts, 2))
for idx, arr in enumerate(data_arrays):
final_results[idx * total_data_pts // thread_cnt:(idx + 1) * total_data_pts // thread_cnt] = arr
write_data(final_results)
| if new_X_train is None:
new_X_train = X[idx]
else:
new_X_train = vstack([new_X_train, X[idx]])
new_Y_train.append(Y[idx]) | conditional_block |
binres_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binres
import (
"bytes"
"encoding"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"sort"
"strings"
"testing"
"golang.org/x/mobile/internal/sdkpath"
)
func init() {
skipSynthesize = true
}
func printrecurse(t *testing.T, pl *Pool, el *Element, ws string) {
t.Logf("%s+elem:ns(%v) name(%s)", ws, el.NS, el.Name.Resolve(pl))
for _, attr := range el.attrs {
ns := ""
if attr.NS != math.MaxUint32 {
ns = pl.strings[int(attr.NS)]
nss := strings.Split(ns, "/")
ns = nss[len(nss)-1]
}
val := ""
if attr.RawValue != NoEntry {
val = pl.strings[int(attr.RawValue)]
} else {
switch attr.TypedValue.Type {
case DataIntDec:
val = fmt.Sprintf("%v", attr.TypedValue.Value)
case DataIntBool:
val = fmt.Sprintf("%v", attr.TypedValue.Value == 0xFFFFFFFF)
default:
val = fmt.Sprintf("0x%08X", attr.TypedValue.Value)
}
}
dt := attr.TypedValue.Type
t.Logf("%s|attr:ns(%v) name(%s) val(%s) valtyp(%s)\n", ws, ns, pl.strings[int(attr.Name)], val, dt)
}
t.Logf("\n")
for _, e := range el.Children {
printrecurse(t, pl, e, ws+" ")
}
}
func compareBytes(a, b []byte) error {
if bytes.Equal(a, b) {
return nil
}
buf := new(bytes.Buffer)
x, y := len(a), len(b)
if x != y {
fmt.Fprintf(buf, "byte length does not match, have %v, want %v\n", y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
fmt.Fprintf(buf, "first byte mismatch at %v\n", i)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 3300
if truncate {
x = 3300
}
buf.WriteString(" HAVE WANT\n")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
notequal := ""
if !bytes.Equal(b[i:i+he], a[i:i+we]) {
notequal = "***"
}
fmt.Fprintf(buf, "%3v | % X % X %s\n", i, b[i:i+he], a[i:i+we], notequal)
}
if truncate {
fmt.Fprint(buf, "... output truncated.\n")
}
}
return fmt.Errorf(buf.String())
}
func TestBootstrap(t *testing.T) {
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
// unmarshal binary xml and store byte indices of decoded resources.
debugIndices := make(map[encoding.BinaryMarshaler]int)
trackUnmarshal := func(buf []byte) (*XML, error) {
bx := new(XML)
if err := (&bx.chunkHeader).UnmarshalBinary(buf); err != nil {
return nil, err
}
buf = buf[8:]
debugIndex := 8
for len(buf) > 0 |
return bx, nil
}
checkMarshal := func(res encoding.BinaryMarshaler, bsize int) {
b, err := res.MarshalBinary()
if err != nil {
t.Error(err)
}
idx := debugIndices[res]
a := bin[idx : idx+bsize]
if !bytes.Equal(a, b) {
x, y := len(a), len(b)
if x != y {
t.Errorf("%v: %T: byte length does not match, have %v, want %v", idx, res, y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
t.Errorf("%v: %T: first byte mismatch at %v of %v", idx, res, i, bsize)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 1300
if truncate {
x = 1300
}
t.Log(" HAVE WANT")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
t.Logf("%3v | % X % X\n", i, b[i:i+he], a[i:i+we])
}
if truncate {
t.Log("... output truncated.")
}
}
}
}
bxml, err := trackUnmarshal(bin)
if err != nil {
t.Fatal(err)
}
for i, x := range bxml.Pool.strings {
t.Logf("Pool(%v): %q\n", i, x)
}
for _, e := range bxml.Children {
printrecurse(t, bxml.Pool, e, "")
}
checkMarshal(&bxml.chunkHeader, int(bxml.headerByteSize))
checkMarshal(bxml.Pool, bxml.Pool.size())
checkMarshal(bxml.Map, bxml.Map.size())
checkMarshal(bxml.Namespace, bxml.Namespace.size())
for el := range bxml.iterElements() {
checkMarshal(el, el.size())
checkMarshal(el.end, el.end.size())
}
checkMarshal(bxml.Namespace.end, bxml.Namespace.end.size())
checkMarshal(bxml, bxml.size())
}
func TestEncode(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
bxml := new(XML)
if err := bxml.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, bxml.Pool.strings, bx.Pool.strings); err != nil {
t.Error(err)
}
if err := compareUint32s(t, rtou(bxml.Map.rs), rtou(bx.Map.rs)); err != nil {
t.Error(err)
}
if err := compareNamespaces(bx.Namespace, bxml.Namespace); err != nil {
t.Error(err)
}
if err := compareElements(bx, bxml); err != nil {
t.Error(err)
}
// Current output byte-for-byte of pkg binres is close, but not exact, to output of aapt.
// The current exceptions to this are as follows:
// * sort order of certain attributes
// * typed value of minSdkVersion
// The below check will produce an error, listing differences in the byte output of each.
// have, err := bx.MarshalBinary()
// if err != nil {
// t.Fatal(err)
// }
// if err := compareBytes(bin, have); err != nil {
// t.Fatal(err)
// }
}
func TestRawValueByName(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
pkgname, err := bx.RawValueByName("manifest", xml.Name{Local: "package"})
if want := "com.zentus.balloon"; err != nil || pkgname != want {
t.Fatalf("have (%q, %v), want (%q, nil)", pkgname, err, want)
}
}
type byAttrName []*Attribute
func (a byAttrName) Len() int { return len(a) }
func (a byAttrName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byAttrName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func compareElements(have, want *XML) error {
h, w := have.iterElements(), want.iterElements()
buf := new(bytes.Buffer)
for {
a, b := <-h, <-w
if a == nil || b == nil {
break
}
if a.Name.Resolve(have.Pool) == "uses-sdk" {
a = <-h // discard uses-sdk token from tests since it's synthesized internally
}
if a.NS != b.NS ||
a.Name != b.Name {
return fmt.Errorf("elements don't match, have %+v, want %+v", a, b)
}
if a.end.NS != b.end.NS ||
a.end.Name != b.end.Name {
return fmt.Errorf("element ends don't match, have %+v, want %+v", a.end, b.end)
}
if len(a.attrs) != len(b.attrs) {
return fmt.Errorf("element attribute lengths don't match, have %v, want %v", len(a.attrs), len(b.attrs))
}
// discards order of aapt and binres as some sorting details of apt have eluded this package but do not
// affect final output from functioning correctly
sort.Sort(byAttrName(a.attrs))
sort.Sort(byAttrName(b.attrs))
for i, attr := range a.attrs {
bttr := b.attrs[i]
if attr.NS != bttr.NS ||
attr.Name != bttr.Name ||
attr.RawValue != bttr.RawValue ||
attr.TypedValue.Type != bttr.TypedValue.Type ||
attr.TypedValue.Value != bttr.TypedValue.Value {
// single exception to check for minSdkVersion which has peculiar output from aapt
// but following same format of all other like-types appears to work correctly.
// BUG(dskinner) this check is brittle as it will skip over any attribute in
// bootstrap.xml that has value == MinSDK.
if attr.TypedValue.Value == MinSDK {
continue
}
fmt.Fprintf(buf, "attrs don't match\nhave: %+v\nwant: %+v\n", attr, bttr)
}
}
if buf.Len() > 0 {
buf.WriteString("-------------\n")
}
}
if buf.Len() > 0 {
return fmt.Errorf(buf.String())
}
return nil
}
func compareNamespaces(have, want *Namespace) error {
if have == nil || want == nil ||
have.LineNumber != want.LineNumber ||
have.Comment != want.Comment ||
have.prefix != want.prefix ||
have.uri != want.uri {
return fmt.Errorf("namespaces don't match, have %+v, want %+v", have, want)
}
if have.end != nil || want.end != nil {
return compareNamespaces(have.end, want.end)
}
return nil
}
func rtou(a []TableRef) (b []uint32) {
for _, x := range a {
b = append(b, uint32(x))
}
return
}
func compareUint32s(t *testing.T, a, b []uint32) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
n := len(a)
if n < len(b) {
n = len(b)
}
var buf bytes.Buffer
buf.WriteString("a.Map.rs b.Map.rs\n")
for i := 0; i < n; i++ {
var c, d string
if i < len(a) {
c = fmt.Sprintf("%0#8x ", a[i])
} else {
c = "__________ "
}
if i < len(b) {
d = fmt.Sprintf("%0#8x ", b[i])
} else {
d = "__________ "
}
if err == nil && c != d {
err = fmt.Errorf("has missing/incorrect values")
}
buf.WriteString(c + " " + d + "\n")
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func compareStrings(t *testing.T, a, b []string) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
buf := new(bytes.Buffer)
for i, x := range a {
v := "__"
for j, y := range b {
if x == y {
v = fmt.Sprintf("%2v", j)
break
}
}
if err == nil && v == "__" {
if !strings.HasPrefix(x, "4.1.") {
// as of the time of this writing, the current version of build tools being targeted
// reports 4.1.2-1425332.
//
// TODO this check has the potential to hide real errors but can be fixed once more
// of the xml document is unmarshalled and XML can be queried to assure this is related
// to platformBuildVersionName.
err = fmt.Errorf("has missing/incorrect values")
}
}
fmt.Fprintf(buf, "Pool(%2v, %s) %q\n", i, v, x)
}
contains := func(xs []string, a string) bool {
for _, x := range xs {
if x == a {
return true
}
}
return false
}
if err != nil {
buf.WriteString("\n## only in var a\n")
for i, x := range a {
if !contains(b, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
buf.WriteString("\n## only in var b\n")
for i, x := range b {
if !contains(a, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func TestOpenTable(t *testing.T) {
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skipf("Could not locate Android SDK: %v", err)
}
tbl, err := OpenTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
pkg := tbl.pkgs[0]
t.Log("package name:", pkg.name)
for i, x := range pkg.typePool.strings {
t.Logf("typePool[i=%v]: %s\n", i, x)
}
for i, spec := range pkg.specs {
t.Logf("spec[i=%v]: %v %q\n", i, spec.id, pkg.typePool.strings[spec.id-1])
for j, typ := range spec.types {
t.Logf("\ttype[i=%v]: %v\n", j, typ.id)
for k, nt := range typ.entries {
if nt == nil { // NoEntry
continue
}
t.Logf("\t\tentry[i=%v]: %v %q\n", k, nt.key, pkg.keyPool.strings[nt.key])
if k > 5 {
t.Logf("\t\t... truncating output")
break
}
}
}
}
}
func TestTableRefByName(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
ref, err := tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
t.Fatal(err)
}
if want := uint32(0x01030007); uint32(ref) != want {
t.Fatalf("RefByName does not match expected result, have %0#8x, want %0#8x", ref, want)
}
}
func TestTableMarshal(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
bin, err := tbl.MarshalBinary()
if err != nil {
t.Fatal(err)
}
xtbl := new(Table)
if err := xtbl.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if len(tbl.pool.strings) != len(xtbl.pool.strings) {
t.Fatal("tbl.pool lengths don't match")
}
if len(tbl.pkgs) != len(xtbl.pkgs) {
t.Fatal("tbl.pkgs lengths don't match")
}
pkg, xpkg := tbl.pkgs[0], xtbl.pkgs[0]
if err := compareStrings(t, pkg.typePool.strings, xpkg.typePool.strings); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, pkg.keyPool.strings, xpkg.keyPool.strings); err != nil {
t.Fatal(err)
}
if len(pkg.specs) != len(xpkg.specs) {
t.Fatal("pkg.specs lengths don't match")
}
for i, spec := range pkg.specs {
xspec := xpkg.specs[i]
if spec.id != xspec.id {
t.Fatal("spec.id doesn't match")
}
if spec.entryCount != xspec.entryCount {
t.Fatal("spec.entryCount doesn't match")
}
if len(spec.entries) != len(xspec.entries) {
t.Fatal("spec.entries lengths don't match")
}
for j, mask := range spec.entries {
xmask := xspec.entries[j]
if mask != xmask {
t.Fatal("entry mask doesn't match")
}
}
if len(spec.types) != len(xspec.types) {
t.Fatal("spec.types length don't match")
}
for j, typ := range spec.types {
xtyp := xspec.types[j]
if typ.id != xtyp.id {
t.Fatal("typ.id doesn't match")
}
if typ.entryCount != xtyp.entryCount {
t.Fatal("typ.entryCount doesn't match")
}
// Config size can differ after serialization due to the loss of extended fields
// during reserialization, but the fixed portions of the Type header must not change.
if uint32(typ.headerByteSize)-typ.config.size != uint32(xtyp.headerByteSize)-uint32(xtyp.config.size) {
t.Fatal("fixed size header portions don't match")
}
if len(typ.indices) != len(xtyp.indices) {
t.Fatal("typ.indices length don't match")
}
for k, index := range typ.indices {
xindex := xtyp.indices[k]
if index != xindex {
t.Errorf("type index doesn't match at %v, have %v, want %v", k, xindex, index)
}
}
if len(typ.entries) != len(xtyp.entries) {
t.Fatal("typ.entries lengths don't match")
}
for k, nt := range typ.entries {
xnt := xtyp.entries[k]
if nt == nil {
if xnt != nil {
t.Fatal("nt is nil but xnt is not")
}
continue
}
if nt.size != xnt.size {
t.Fatal("entry.size doesn't match")
}
if nt.flags != xnt.flags {
t.Fatal("entry.flags don't match")
}
if nt.key != xnt.key {
t.Fatal("entry.key doesn't match")
}
if nt.parent != xnt.parent {
t.Fatal("entry.parent doesn't match")
}
if nt.count != xnt.count {
t.Fatal("entry.count doesn't match")
}
for l, val := range nt.values {
xval := xnt.values[l]
if val.name != xval.name {
t.Fatal("value.name doesn't match")
}
}
}
}
}
}
func checkResources(t *testing.T) {
t.Helper()
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skip("Could not locate Android SDK")
}
rscPath, err := apiResourcesPath()
if err != nil {
t.Skipf("failed to find resources: %v", err)
}
if _, err := os.Stat(rscPath); err != nil {
t.Skipf("failed to find resources: %v", err)
}
}
func BenchmarkTableRefByName(b *testing.B) {
if _, err := sdkpath.AndroidHome(); err != nil {
b.Fatal("Could not locate Android SDK")
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
tbl, err := OpenTable()
if err != nil {
b.Fatal(err)
}
_, err = tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
b.Fatal(err)
}
}
}
| {
k, err := bx.unmarshalBinaryKind(buf)
if err != nil {
return nil, err
}
debugIndices[k.(encoding.BinaryMarshaler)] = debugIndex
debugIndex += k.size()
buf = buf[k.size():]
} | conditional_block |
binres_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binres
import (
"bytes"
"encoding"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"sort"
"strings"
"testing"
"golang.org/x/mobile/internal/sdkpath"
)
func init() {
skipSynthesize = true
}
func printrecurse(t *testing.T, pl *Pool, el *Element, ws string) {
t.Logf("%s+elem:ns(%v) name(%s)", ws, el.NS, el.Name.Resolve(pl))
for _, attr := range el.attrs {
ns := ""
if attr.NS != math.MaxUint32 {
ns = pl.strings[int(attr.NS)]
nss := strings.Split(ns, "/")
ns = nss[len(nss)-1]
}
val := ""
if attr.RawValue != NoEntry {
val = pl.strings[int(attr.RawValue)]
} else {
switch attr.TypedValue.Type {
case DataIntDec:
val = fmt.Sprintf("%v", attr.TypedValue.Value)
case DataIntBool:
val = fmt.Sprintf("%v", attr.TypedValue.Value == 0xFFFFFFFF)
default:
val = fmt.Sprintf("0x%08X", attr.TypedValue.Value)
}
}
dt := attr.TypedValue.Type
t.Logf("%s|attr:ns(%v) name(%s) val(%s) valtyp(%s)\n", ws, ns, pl.strings[int(attr.Name)], val, dt)
}
t.Logf("\n")
for _, e := range el.Children {
printrecurse(t, pl, e, ws+" ")
}
}
func compareBytes(a, b []byte) error {
if bytes.Equal(a, b) {
return nil
}
buf := new(bytes.Buffer)
x, y := len(a), len(b)
if x != y {
fmt.Fprintf(buf, "byte length does not match, have %v, want %v\n", y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
fmt.Fprintf(buf, "first byte mismatch at %v\n", i)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 3300
if truncate {
x = 3300
}
buf.WriteString(" HAVE WANT\n")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
notequal := ""
if !bytes.Equal(b[i:i+he], a[i:i+we]) {
notequal = "***"
}
fmt.Fprintf(buf, "%3v | % X % X %s\n", i, b[i:i+he], a[i:i+we], notequal)
}
if truncate {
fmt.Fprint(buf, "... output truncated.\n")
}
}
return fmt.Errorf(buf.String())
}
func TestBootstrap(t *testing.T) {
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
// unmarshal binary xml and store byte indices of decoded resources.
debugIndices := make(map[encoding.BinaryMarshaler]int)
trackUnmarshal := func(buf []byte) (*XML, error) {
bx := new(XML)
if err := (&bx.chunkHeader).UnmarshalBinary(buf); err != nil {
return nil, err
}
buf = buf[8:]
debugIndex := 8
for len(buf) > 0 {
k, err := bx.unmarshalBinaryKind(buf)
if err != nil {
return nil, err
}
debugIndices[k.(encoding.BinaryMarshaler)] = debugIndex
debugIndex += k.size()
buf = buf[k.size():]
}
return bx, nil
}
checkMarshal := func(res encoding.BinaryMarshaler, bsize int) {
b, err := res.MarshalBinary()
if err != nil {
t.Error(err)
}
idx := debugIndices[res]
a := bin[idx : idx+bsize]
if !bytes.Equal(a, b) {
x, y := len(a), len(b)
if x != y {
t.Errorf("%v: %T: byte length does not match, have %v, want %v", idx, res, y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
t.Errorf("%v: %T: first byte mismatch at %v of %v", idx, res, i, bsize)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 1300
if truncate {
x = 1300
}
t.Log(" HAVE WANT")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
t.Logf("%3v | % X % X\n", i, b[i:i+he], a[i:i+we])
}
if truncate {
t.Log("... output truncated.")
}
}
}
}
bxml, err := trackUnmarshal(bin)
if err != nil {
t.Fatal(err)
}
for i, x := range bxml.Pool.strings {
t.Logf("Pool(%v): %q\n", i, x)
}
for _, e := range bxml.Children {
printrecurse(t, bxml.Pool, e, "")
}
checkMarshal(&bxml.chunkHeader, int(bxml.headerByteSize))
checkMarshal(bxml.Pool, bxml.Pool.size())
checkMarshal(bxml.Map, bxml.Map.size())
checkMarshal(bxml.Namespace, bxml.Namespace.size())
for el := range bxml.iterElements() {
checkMarshal(el, el.size())
checkMarshal(el.end, el.end.size())
}
checkMarshal(bxml.Namespace.end, bxml.Namespace.end.size())
checkMarshal(bxml, bxml.size())
}
func TestEncode(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
bxml := new(XML)
if err := bxml.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, bxml.Pool.strings, bx.Pool.strings); err != nil {
t.Error(err)
}
if err := compareUint32s(t, rtou(bxml.Map.rs), rtou(bx.Map.rs)); err != nil {
t.Error(err)
}
if err := compareNamespaces(bx.Namespace, bxml.Namespace); err != nil {
t.Error(err)
}
if err := compareElements(bx, bxml); err != nil {
t.Error(err)
}
// Current output byte-for-byte of pkg binres is close, but not exact, to output of aapt.
// The current exceptions to this are as follows:
// * sort order of certain attributes
// * typed value of minSdkVersion
// The below check will produce an error, listing differences in the byte output of each.
// have, err := bx.MarshalBinary()
// if err != nil {
// t.Fatal(err)
// }
// if err := compareBytes(bin, have); err != nil {
// t.Fatal(err)
// }
}
func TestRawValueByName(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
pkgname, err := bx.RawValueByName("manifest", xml.Name{Local: "package"})
if want := "com.zentus.balloon"; err != nil || pkgname != want {
t.Fatalf("have (%q, %v), want (%q, nil)", pkgname, err, want)
}
}
type byAttrName []*Attribute
func (a byAttrName) Len() int { return len(a) }
func (a byAttrName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byAttrName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func compareElements(have, want *XML) error {
h, w := have.iterElements(), want.iterElements()
buf := new(bytes.Buffer)
for {
a, b := <-h, <-w
if a == nil || b == nil {
break
}
if a.Name.Resolve(have.Pool) == "uses-sdk" {
a = <-h // discard uses-sdk token from tests since it's synthesized internally
}
if a.NS != b.NS ||
a.Name != b.Name {
return fmt.Errorf("elements don't match, have %+v, want %+v", a, b)
}
if a.end.NS != b.end.NS ||
a.end.Name != b.end.Name {
return fmt.Errorf("element ends don't match, have %+v, want %+v", a.end, b.end)
}
if len(a.attrs) != len(b.attrs) {
return fmt.Errorf("element attribute lengths don't match, have %v, want %v", len(a.attrs), len(b.attrs))
}
// discards order of aapt and binres as some sorting details of apt have eluded this package but do not
// affect final output from functioning correctly
sort.Sort(byAttrName(a.attrs))
sort.Sort(byAttrName(b.attrs))
for i, attr := range a.attrs {
bttr := b.attrs[i]
if attr.NS != bttr.NS ||
attr.Name != bttr.Name ||
attr.RawValue != bttr.RawValue ||
attr.TypedValue.Type != bttr.TypedValue.Type ||
attr.TypedValue.Value != bttr.TypedValue.Value {
// single exception to check for minSdkVersion which has peculiar output from aapt
// but following same format of all other like-types appears to work correctly.
// BUG(dskinner) this check is brittle as it will skip over any attribute in
// bootstrap.xml that has value == MinSDK.
if attr.TypedValue.Value == MinSDK {
continue
}
fmt.Fprintf(buf, "attrs don't match\nhave: %+v\nwant: %+v\n", attr, bttr)
}
}
if buf.Len() > 0 {
buf.WriteString("-------------\n")
}
}
if buf.Len() > 0 {
return fmt.Errorf(buf.String())
}
return nil
}
func compareNamespaces(have, want *Namespace) error {
if have == nil || want == nil ||
have.LineNumber != want.LineNumber ||
have.Comment != want.Comment ||
have.prefix != want.prefix ||
have.uri != want.uri {
return fmt.Errorf("namespaces don't match, have %+v, want %+v", have, want)
}
if have.end != nil || want.end != nil {
return compareNamespaces(have.end, want.end)
}
return nil
}
func | (a []TableRef) (b []uint32) {
for _, x := range a {
b = append(b, uint32(x))
}
return
}
func compareUint32s(t *testing.T, a, b []uint32) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
n := len(a)
if n < len(b) {
n = len(b)
}
var buf bytes.Buffer
buf.WriteString("a.Map.rs b.Map.rs\n")
for i := 0; i < n; i++ {
var c, d string
if i < len(a) {
c = fmt.Sprintf("%0#8x ", a[i])
} else {
c = "__________ "
}
if i < len(b) {
d = fmt.Sprintf("%0#8x ", b[i])
} else {
d = "__________ "
}
if err == nil && c != d {
err = fmt.Errorf("has missing/incorrect values")
}
buf.WriteString(c + " " + d + "\n")
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func compareStrings(t *testing.T, a, b []string) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
buf := new(bytes.Buffer)
for i, x := range a {
v := "__"
for j, y := range b {
if x == y {
v = fmt.Sprintf("%2v", j)
break
}
}
if err == nil && v == "__" {
if !strings.HasPrefix(x, "4.1.") {
// as of the time of this writing, the current version of build tools being targeted
// reports 4.1.2-1425332.
//
// TODO this check has the potential to hide real errors but can be fixed once more
// of the xml document is unmarshalled and XML can be queried to assure this is related
// to platformBuildVersionName.
err = fmt.Errorf("has missing/incorrect values")
}
}
fmt.Fprintf(buf, "Pool(%2v, %s) %q\n", i, v, x)
}
contains := func(xs []string, a string) bool {
for _, x := range xs {
if x == a {
return true
}
}
return false
}
if err != nil {
buf.WriteString("\n## only in var a\n")
for i, x := range a {
if !contains(b, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
buf.WriteString("\n## only in var b\n")
for i, x := range b {
if !contains(a, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func TestOpenTable(t *testing.T) {
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skipf("Could not locate Android SDK: %v", err)
}
tbl, err := OpenTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
pkg := tbl.pkgs[0]
t.Log("package name:", pkg.name)
for i, x := range pkg.typePool.strings {
t.Logf("typePool[i=%v]: %s\n", i, x)
}
for i, spec := range pkg.specs {
t.Logf("spec[i=%v]: %v %q\n", i, spec.id, pkg.typePool.strings[spec.id-1])
for j, typ := range spec.types {
t.Logf("\ttype[i=%v]: %v\n", j, typ.id)
for k, nt := range typ.entries {
if nt == nil { // NoEntry
continue
}
t.Logf("\t\tentry[i=%v]: %v %q\n", k, nt.key, pkg.keyPool.strings[nt.key])
if k > 5 {
t.Logf("\t\t... truncating output")
break
}
}
}
}
}
func TestTableRefByName(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
ref, err := tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
t.Fatal(err)
}
if want := uint32(0x01030007); uint32(ref) != want {
t.Fatalf("RefByName does not match expected result, have %0#8x, want %0#8x", ref, want)
}
}
func TestTableMarshal(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
bin, err := tbl.MarshalBinary()
if err != nil {
t.Fatal(err)
}
xtbl := new(Table)
if err := xtbl.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if len(tbl.pool.strings) != len(xtbl.pool.strings) {
t.Fatal("tbl.pool lengths don't match")
}
if len(tbl.pkgs) != len(xtbl.pkgs) {
t.Fatal("tbl.pkgs lengths don't match")
}
pkg, xpkg := tbl.pkgs[0], xtbl.pkgs[0]
if err := compareStrings(t, pkg.typePool.strings, xpkg.typePool.strings); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, pkg.keyPool.strings, xpkg.keyPool.strings); err != nil {
t.Fatal(err)
}
if len(pkg.specs) != len(xpkg.specs) {
t.Fatal("pkg.specs lengths don't match")
}
for i, spec := range pkg.specs {
xspec := xpkg.specs[i]
if spec.id != xspec.id {
t.Fatal("spec.id doesn't match")
}
if spec.entryCount != xspec.entryCount {
t.Fatal("spec.entryCount doesn't match")
}
if len(spec.entries) != len(xspec.entries) {
t.Fatal("spec.entries lengths don't match")
}
for j, mask := range spec.entries {
xmask := xspec.entries[j]
if mask != xmask {
t.Fatal("entry mask doesn't match")
}
}
if len(spec.types) != len(xspec.types) {
t.Fatal("spec.types length don't match")
}
for j, typ := range spec.types {
xtyp := xspec.types[j]
if typ.id != xtyp.id {
t.Fatal("typ.id doesn't match")
}
if typ.entryCount != xtyp.entryCount {
t.Fatal("typ.entryCount doesn't match")
}
// Config size can differ after serialization due to the loss of extended fields
// during reserialization, but the fixed portions of the Type header must not change.
if uint32(typ.headerByteSize)-typ.config.size != uint32(xtyp.headerByteSize)-uint32(xtyp.config.size) {
t.Fatal("fixed size header portions don't match")
}
if len(typ.indices) != len(xtyp.indices) {
t.Fatal("typ.indices length don't match")
}
for k, index := range typ.indices {
xindex := xtyp.indices[k]
if index != xindex {
t.Errorf("type index doesn't match at %v, have %v, want %v", k, xindex, index)
}
}
if len(typ.entries) != len(xtyp.entries) {
t.Fatal("typ.entries lengths don't match")
}
for k, nt := range typ.entries {
xnt := xtyp.entries[k]
if nt == nil {
if xnt != nil {
t.Fatal("nt is nil but xnt is not")
}
continue
}
if nt.size != xnt.size {
t.Fatal("entry.size doesn't match")
}
if nt.flags != xnt.flags {
t.Fatal("entry.flags don't match")
}
if nt.key != xnt.key {
t.Fatal("entry.key doesn't match")
}
if nt.parent != xnt.parent {
t.Fatal("entry.parent doesn't match")
}
if nt.count != xnt.count {
t.Fatal("entry.count doesn't match")
}
for l, val := range nt.values {
xval := xnt.values[l]
if val.name != xval.name {
t.Fatal("value.name doesn't match")
}
}
}
}
}
}
func checkResources(t *testing.T) {
t.Helper()
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skip("Could not locate Android SDK")
}
rscPath, err := apiResourcesPath()
if err != nil {
t.Skipf("failed to find resources: %v", err)
}
if _, err := os.Stat(rscPath); err != nil {
t.Skipf("failed to find resources: %v", err)
}
}
func BenchmarkTableRefByName(b *testing.B) {
if _, err := sdkpath.AndroidHome(); err != nil {
b.Fatal("Could not locate Android SDK")
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
tbl, err := OpenTable()
if err != nil {
b.Fatal(err)
}
_, err = tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
b.Fatal(err)
}
}
}
| rtou | identifier_name |
binres_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binres
import (
"bytes"
"encoding"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"sort"
"strings"
"testing"
"golang.org/x/mobile/internal/sdkpath"
)
func init() {
skipSynthesize = true
}
func printrecurse(t *testing.T, pl *Pool, el *Element, ws string) {
t.Logf("%s+elem:ns(%v) name(%s)", ws, el.NS, el.Name.Resolve(pl))
for _, attr := range el.attrs {
ns := ""
if attr.NS != math.MaxUint32 {
ns = pl.strings[int(attr.NS)]
nss := strings.Split(ns, "/")
ns = nss[len(nss)-1]
}
val := ""
if attr.RawValue != NoEntry {
val = pl.strings[int(attr.RawValue)]
} else {
switch attr.TypedValue.Type {
case DataIntDec:
val = fmt.Sprintf("%v", attr.TypedValue.Value)
case DataIntBool:
val = fmt.Sprintf("%v", attr.TypedValue.Value == 0xFFFFFFFF)
default:
val = fmt.Sprintf("0x%08X", attr.TypedValue.Value)
}
}
dt := attr.TypedValue.Type
t.Logf("%s|attr:ns(%v) name(%s) val(%s) valtyp(%s)\n", ws, ns, pl.strings[int(attr.Name)], val, dt)
}
t.Logf("\n")
for _, e := range el.Children {
printrecurse(t, pl, e, ws+" ")
}
}
func compareBytes(a, b []byte) error {
if bytes.Equal(a, b) {
return nil
}
buf := new(bytes.Buffer)
x, y := len(a), len(b)
if x != y {
fmt.Fprintf(buf, "byte length does not match, have %v, want %v\n", y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
fmt.Fprintf(buf, "first byte mismatch at %v\n", i)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 3300
if truncate {
x = 3300
}
buf.WriteString(" HAVE WANT\n")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
notequal := ""
if !bytes.Equal(b[i:i+he], a[i:i+we]) {
notequal = "***"
}
fmt.Fprintf(buf, "%3v | % X % X %s\n", i, b[i:i+he], a[i:i+we], notequal)
}
if truncate {
fmt.Fprint(buf, "... output truncated.\n")
}
}
return fmt.Errorf(buf.String())
}
func TestBootstrap(t *testing.T) {
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
// unmarshal binary xml and store byte indices of decoded resources.
debugIndices := make(map[encoding.BinaryMarshaler]int)
trackUnmarshal := func(buf []byte) (*XML, error) {
bx := new(XML)
if err := (&bx.chunkHeader).UnmarshalBinary(buf); err != nil {
return nil, err
}
buf = buf[8:]
debugIndex := 8
for len(buf) > 0 {
k, err := bx.unmarshalBinaryKind(buf)
if err != nil {
return nil, err
}
debugIndices[k.(encoding.BinaryMarshaler)] = debugIndex
debugIndex += k.size()
buf = buf[k.size():]
}
return bx, nil
}
checkMarshal := func(res encoding.BinaryMarshaler, bsize int) {
b, err := res.MarshalBinary()
if err != nil {
t.Error(err)
}
idx := debugIndices[res]
a := bin[idx : idx+bsize]
if !bytes.Equal(a, b) {
x, y := len(a), len(b)
if x != y {
t.Errorf("%v: %T: byte length does not match, have %v, want %v", idx, res, y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
t.Errorf("%v: %T: first byte mismatch at %v of %v", idx, res, i, bsize)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 1300
if truncate {
x = 1300
}
t.Log(" HAVE WANT")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
t.Logf("%3v | % X % X\n", i, b[i:i+he], a[i:i+we])
}
if truncate {
t.Log("... output truncated.")
}
}
}
}
bxml, err := trackUnmarshal(bin)
if err != nil {
t.Fatal(err)
}
for i, x := range bxml.Pool.strings {
t.Logf("Pool(%v): %q\n", i, x)
}
for _, e := range bxml.Children {
printrecurse(t, bxml.Pool, e, "")
}
checkMarshal(&bxml.chunkHeader, int(bxml.headerByteSize))
checkMarshal(bxml.Pool, bxml.Pool.size())
checkMarshal(bxml.Map, bxml.Map.size())
checkMarshal(bxml.Namespace, bxml.Namespace.size())
for el := range bxml.iterElements() {
checkMarshal(el, el.size())
checkMarshal(el.end, el.end.size())
}
checkMarshal(bxml.Namespace.end, bxml.Namespace.end.size())
checkMarshal(bxml, bxml.size())
}
func TestEncode(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
bxml := new(XML)
if err := bxml.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, bxml.Pool.strings, bx.Pool.strings); err != nil {
t.Error(err)
}
if err := compareUint32s(t, rtou(bxml.Map.rs), rtou(bx.Map.rs)); err != nil {
t.Error(err)
}
if err := compareNamespaces(bx.Namespace, bxml.Namespace); err != nil {
t.Error(err)
}
if err := compareElements(bx, bxml); err != nil {
t.Error(err)
}
// Current output byte-for-byte of pkg binres is close, but not exact, to output of aapt.
// The current exceptions to this are as follows:
// * sort order of certain attributes
// * typed value of minSdkVersion
// The below check will produce an error, listing differences in the byte output of each.
// have, err := bx.MarshalBinary()
// if err != nil {
// t.Fatal(err)
// }
// if err := compareBytes(bin, have); err != nil {
// t.Fatal(err)
// }
}
func TestRawValueByName(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
pkgname, err := bx.RawValueByName("manifest", xml.Name{Local: "package"})
if want := "com.zentus.balloon"; err != nil || pkgname != want {
t.Fatalf("have (%q, %v), want (%q, nil)", pkgname, err, want)
}
}
type byAttrName []*Attribute
func (a byAttrName) Len() int |
func (a byAttrName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byAttrName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func compareElements(have, want *XML) error {
h, w := have.iterElements(), want.iterElements()
buf := new(bytes.Buffer)
for {
a, b := <-h, <-w
if a == nil || b == nil {
break
}
if a.Name.Resolve(have.Pool) == "uses-sdk" {
a = <-h // discard uses-sdk token from tests since it's synthesized internally
}
if a.NS != b.NS ||
a.Name != b.Name {
return fmt.Errorf("elements don't match, have %+v, want %+v", a, b)
}
if a.end.NS != b.end.NS ||
a.end.Name != b.end.Name {
return fmt.Errorf("element ends don't match, have %+v, want %+v", a.end, b.end)
}
if len(a.attrs) != len(b.attrs) {
return fmt.Errorf("element attribute lengths don't match, have %v, want %v", len(a.attrs), len(b.attrs))
}
// discards order of aapt and binres as some sorting details of apt have eluded this package but do not
// affect final output from functioning correctly
sort.Sort(byAttrName(a.attrs))
sort.Sort(byAttrName(b.attrs))
for i, attr := range a.attrs {
bttr := b.attrs[i]
if attr.NS != bttr.NS ||
attr.Name != bttr.Name ||
attr.RawValue != bttr.RawValue ||
attr.TypedValue.Type != bttr.TypedValue.Type ||
attr.TypedValue.Value != bttr.TypedValue.Value {
// single exception to check for minSdkVersion which has peculiar output from aapt
// but following same format of all other like-types appears to work correctly.
// BUG(dskinner) this check is brittle as it will skip over any attribute in
// bootstrap.xml that has value == MinSDK.
if attr.TypedValue.Value == MinSDK {
continue
}
fmt.Fprintf(buf, "attrs don't match\nhave: %+v\nwant: %+v\n", attr, bttr)
}
}
if buf.Len() > 0 {
buf.WriteString("-------------\n")
}
}
if buf.Len() > 0 {
return fmt.Errorf(buf.String())
}
return nil
}
func compareNamespaces(have, want *Namespace) error {
if have == nil || want == nil ||
have.LineNumber != want.LineNumber ||
have.Comment != want.Comment ||
have.prefix != want.prefix ||
have.uri != want.uri {
return fmt.Errorf("namespaces don't match, have %+v, want %+v", have, want)
}
if have.end != nil || want.end != nil {
return compareNamespaces(have.end, want.end)
}
return nil
}
func rtou(a []TableRef) (b []uint32) {
for _, x := range a {
b = append(b, uint32(x))
}
return
}
func compareUint32s(t *testing.T, a, b []uint32) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
n := len(a)
if n < len(b) {
n = len(b)
}
var buf bytes.Buffer
buf.WriteString("a.Map.rs b.Map.rs\n")
for i := 0; i < n; i++ {
var c, d string
if i < len(a) {
c = fmt.Sprintf("%0#8x ", a[i])
} else {
c = "__________ "
}
if i < len(b) {
d = fmt.Sprintf("%0#8x ", b[i])
} else {
d = "__________ "
}
if err == nil && c != d {
err = fmt.Errorf("has missing/incorrect values")
}
buf.WriteString(c + " " + d + "\n")
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func compareStrings(t *testing.T, a, b []string) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
buf := new(bytes.Buffer)
for i, x := range a {
v := "__"
for j, y := range b {
if x == y {
v = fmt.Sprintf("%2v", j)
break
}
}
if err == nil && v == "__" {
if !strings.HasPrefix(x, "4.1.") {
// as of the time of this writing, the current version of build tools being targeted
// reports 4.1.2-1425332.
//
// TODO this check has the potential to hide real errors but can be fixed once more
// of the xml document is unmarshalled and XML can be queried to assure this is related
// to platformBuildVersionName.
err = fmt.Errorf("has missing/incorrect values")
}
}
fmt.Fprintf(buf, "Pool(%2v, %s) %q\n", i, v, x)
}
contains := func(xs []string, a string) bool {
for _, x := range xs {
if x == a {
return true
}
}
return false
}
if err != nil {
buf.WriteString("\n## only in var a\n")
for i, x := range a {
if !contains(b, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
buf.WriteString("\n## only in var b\n")
for i, x := range b {
if !contains(a, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func TestOpenTable(t *testing.T) {
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skipf("Could not locate Android SDK: %v", err)
}
tbl, err := OpenTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
pkg := tbl.pkgs[0]
t.Log("package name:", pkg.name)
for i, x := range pkg.typePool.strings {
t.Logf("typePool[i=%v]: %s\n", i, x)
}
for i, spec := range pkg.specs {
t.Logf("spec[i=%v]: %v %q\n", i, spec.id, pkg.typePool.strings[spec.id-1])
for j, typ := range spec.types {
t.Logf("\ttype[i=%v]: %v\n", j, typ.id)
for k, nt := range typ.entries {
if nt == nil { // NoEntry
continue
}
t.Logf("\t\tentry[i=%v]: %v %q\n", k, nt.key, pkg.keyPool.strings[nt.key])
if k > 5 {
t.Logf("\t\t... truncating output")
break
}
}
}
}
}
func TestTableRefByName(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
ref, err := tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
t.Fatal(err)
}
if want := uint32(0x01030007); uint32(ref) != want {
t.Fatalf("RefByName does not match expected result, have %0#8x, want %0#8x", ref, want)
}
}
func TestTableMarshal(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
bin, err := tbl.MarshalBinary()
if err != nil {
t.Fatal(err)
}
xtbl := new(Table)
if err := xtbl.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if len(tbl.pool.strings) != len(xtbl.pool.strings) {
t.Fatal("tbl.pool lengths don't match")
}
if len(tbl.pkgs) != len(xtbl.pkgs) {
t.Fatal("tbl.pkgs lengths don't match")
}
pkg, xpkg := tbl.pkgs[0], xtbl.pkgs[0]
if err := compareStrings(t, pkg.typePool.strings, xpkg.typePool.strings); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, pkg.keyPool.strings, xpkg.keyPool.strings); err != nil {
t.Fatal(err)
}
if len(pkg.specs) != len(xpkg.specs) {
t.Fatal("pkg.specs lengths don't match")
}
for i, spec := range pkg.specs {
xspec := xpkg.specs[i]
if spec.id != xspec.id {
t.Fatal("spec.id doesn't match")
}
if spec.entryCount != xspec.entryCount {
t.Fatal("spec.entryCount doesn't match")
}
if len(spec.entries) != len(xspec.entries) {
t.Fatal("spec.entries lengths don't match")
}
for j, mask := range spec.entries {
xmask := xspec.entries[j]
if mask != xmask {
t.Fatal("entry mask doesn't match")
}
}
if len(spec.types) != len(xspec.types) {
t.Fatal("spec.types length don't match")
}
for j, typ := range spec.types {
xtyp := xspec.types[j]
if typ.id != xtyp.id {
t.Fatal("typ.id doesn't match")
}
if typ.entryCount != xtyp.entryCount {
t.Fatal("typ.entryCount doesn't match")
}
// Config size can differ after serialization due to the loss of extended fields
// during reserialization, but the fixed portions of the Type header must not change.
if uint32(typ.headerByteSize)-typ.config.size != uint32(xtyp.headerByteSize)-uint32(xtyp.config.size) {
t.Fatal("fixed size header portions don't match")
}
if len(typ.indices) != len(xtyp.indices) {
t.Fatal("typ.indices length don't match")
}
for k, index := range typ.indices {
xindex := xtyp.indices[k]
if index != xindex {
t.Errorf("type index doesn't match at %v, have %v, want %v", k, xindex, index)
}
}
if len(typ.entries) != len(xtyp.entries) {
t.Fatal("typ.entries lengths don't match")
}
for k, nt := range typ.entries {
xnt := xtyp.entries[k]
if nt == nil {
if xnt != nil {
t.Fatal("nt is nil but xnt is not")
}
continue
}
if nt.size != xnt.size {
t.Fatal("entry.size doesn't match")
}
if nt.flags != xnt.flags {
t.Fatal("entry.flags don't match")
}
if nt.key != xnt.key {
t.Fatal("entry.key doesn't match")
}
if nt.parent != xnt.parent {
t.Fatal("entry.parent doesn't match")
}
if nt.count != xnt.count {
t.Fatal("entry.count doesn't match")
}
for l, val := range nt.values {
xval := xnt.values[l]
if val.name != xval.name {
t.Fatal("value.name doesn't match")
}
}
}
}
}
}
func checkResources(t *testing.T) {
t.Helper()
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skip("Could not locate Android SDK")
}
rscPath, err := apiResourcesPath()
if err != nil {
t.Skipf("failed to find resources: %v", err)
}
if _, err := os.Stat(rscPath); err != nil {
t.Skipf("failed to find resources: %v", err)
}
}
func BenchmarkTableRefByName(b *testing.B) {
if _, err := sdkpath.AndroidHome(); err != nil {
b.Fatal("Could not locate Android SDK")
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
tbl, err := OpenTable()
if err != nil {
b.Fatal(err)
}
_, err = tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
b.Fatal(err)
}
}
}
| { return len(a) } | identifier_body |
binres_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binres
import (
"bytes"
"encoding"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"math"
"os"
"sort"
"strings"
"testing"
"golang.org/x/mobile/internal/sdkpath"
)
func init() {
skipSynthesize = true
}
func printrecurse(t *testing.T, pl *Pool, el *Element, ws string) {
t.Logf("%s+elem:ns(%v) name(%s)", ws, el.NS, el.Name.Resolve(pl))
for _, attr := range el.attrs {
ns := ""
if attr.NS != math.MaxUint32 {
ns = pl.strings[int(attr.NS)]
nss := strings.Split(ns, "/")
ns = nss[len(nss)-1]
}
val := ""
if attr.RawValue != NoEntry {
val = pl.strings[int(attr.RawValue)]
} else {
switch attr.TypedValue.Type {
case DataIntDec:
val = fmt.Sprintf("%v", attr.TypedValue.Value)
case DataIntBool:
val = fmt.Sprintf("%v", attr.TypedValue.Value == 0xFFFFFFFF)
default:
val = fmt.Sprintf("0x%08X", attr.TypedValue.Value)
}
}
dt := attr.TypedValue.Type
t.Logf("%s|attr:ns(%v) name(%s) val(%s) valtyp(%s)\n", ws, ns, pl.strings[int(attr.Name)], val, dt)
}
t.Logf("\n")
for _, e := range el.Children {
printrecurse(t, pl, e, ws+" ")
}
}
func compareBytes(a, b []byte) error {
if bytes.Equal(a, b) {
return nil
}
buf := new(bytes.Buffer)
x, y := len(a), len(b)
if x != y {
fmt.Fprintf(buf, "byte length does not match, have %v, want %v\n", y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
fmt.Fprintf(buf, "first byte mismatch at %v\n", i)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 3300
if truncate {
x = 3300
}
buf.WriteString(" HAVE WANT\n")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
notequal := ""
if !bytes.Equal(b[i:i+he], a[i:i+we]) {
notequal = "***"
}
fmt.Fprintf(buf, "%3v | % X % X %s\n", i, b[i:i+he], a[i:i+we], notequal)
}
if truncate {
fmt.Fprint(buf, "... output truncated.\n")
}
}
return fmt.Errorf(buf.String())
}
func TestBootstrap(t *testing.T) {
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
// unmarshal binary xml and store byte indices of decoded resources.
debugIndices := make(map[encoding.BinaryMarshaler]int)
trackUnmarshal := func(buf []byte) (*XML, error) {
bx := new(XML)
if err := (&bx.chunkHeader).UnmarshalBinary(buf); err != nil {
return nil, err
}
buf = buf[8:]
debugIndex := 8
for len(buf) > 0 {
k, err := bx.unmarshalBinaryKind(buf)
if err != nil {
return nil, err
}
debugIndices[k.(encoding.BinaryMarshaler)] = debugIndex
debugIndex += k.size()
buf = buf[k.size():]
}
return bx, nil
}
checkMarshal := func(res encoding.BinaryMarshaler, bsize int) {
b, err := res.MarshalBinary()
if err != nil {
t.Error(err)
}
idx := debugIndices[res]
a := bin[idx : idx+bsize]
if !bytes.Equal(a, b) {
x, y := len(a), len(b)
if x != y {
t.Errorf("%v: %T: byte length does not match, have %v, want %v", idx, res, y, x)
}
if x > y {
x, y = y, x
}
mismatch := false
for i := 0; i < x; i++ {
if mismatch = a[i] != b[i]; mismatch {
t.Errorf("%v: %T: first byte mismatch at %v of %v", idx, res, i, bsize)
break
}
}
if mismatch {
// print out a reasonable amount of data to help identify issues
truncate := x > 1300
if truncate {
x = 1300
}
t.Log(" HAVE WANT")
for i := 0; i < x; i += 4 {
he, we := 4, 4
if i+he >= x {
he = x - i
}
if i+we >= y {
we = y - i
}
t.Logf("%3v | % X % X\n", i, b[i:i+he], a[i:i+we])
}
if truncate {
t.Log("... output truncated.")
}
}
}
}
bxml, err := trackUnmarshal(bin)
if err != nil {
t.Fatal(err)
}
for i, x := range bxml.Pool.strings {
t.Logf("Pool(%v): %q\n", i, x)
}
for _, e := range bxml.Children {
printrecurse(t, bxml.Pool, e, "")
}
checkMarshal(&bxml.chunkHeader, int(bxml.headerByteSize))
checkMarshal(bxml.Pool, bxml.Pool.size())
checkMarshal(bxml.Map, bxml.Map.size())
checkMarshal(bxml.Namespace, bxml.Namespace.size())
for el := range bxml.iterElements() {
checkMarshal(el, el.size())
checkMarshal(el.end, el.end.size())
}
checkMarshal(bxml.Namespace.end, bxml.Namespace.end.size())
checkMarshal(bxml, bxml.size())
}
func TestEncode(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
bin, err := ioutil.ReadFile("testdata/bootstrap.bin")
if err != nil {
log.Fatal(err)
}
bxml := new(XML)
if err := bxml.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, bxml.Pool.strings, bx.Pool.strings); err != nil {
t.Error(err)
}
if err := compareUint32s(t, rtou(bxml.Map.rs), rtou(bx.Map.rs)); err != nil {
t.Error(err)
}
if err := compareNamespaces(bx.Namespace, bxml.Namespace); err != nil {
t.Error(err)
}
if err := compareElements(bx, bxml); err != nil {
t.Error(err)
}
// Current output byte-for-byte of pkg binres is close, but not exact, to output of aapt.
// The current exceptions to this are as follows:
// * sort order of certain attributes
// * typed value of minSdkVersion
// The below check will produce an error, listing differences in the byte output of each.
// have, err := bx.MarshalBinary()
// if err != nil {
// t.Fatal(err)
// }
// if err := compareBytes(bin, have); err != nil {
// t.Fatal(err)
// }
}
func TestRawValueByName(t *testing.T) {
f, err := os.Open("testdata/bootstrap.xml")
if err != nil {
t.Fatal(err)
}
bx, err := UnmarshalXML(f, false)
if err != nil {
t.Fatal(err)
}
pkgname, err := bx.RawValueByName("manifest", xml.Name{Local: "package"})
if want := "com.zentus.balloon"; err != nil || pkgname != want {
t.Fatalf("have (%q, %v), want (%q, nil)", pkgname, err, want)
}
}
type byAttrName []*Attribute
func (a byAttrName) Len() int { return len(a) }
func (a byAttrName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byAttrName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func compareElements(have, want *XML) error {
h, w := have.iterElements(), want.iterElements()
buf := new(bytes.Buffer)
for {
a, b := <-h, <-w
if a == nil || b == nil {
break
}
if a.Name.Resolve(have.Pool) == "uses-sdk" {
a = <-h // discard uses-sdk token from tests since it's synthesized internally
}
if a.NS != b.NS ||
a.Name != b.Name {
return fmt.Errorf("elements don't match, have %+v, want %+v", a, b)
}
if a.end.NS != b.end.NS ||
a.end.Name != b.end.Name {
return fmt.Errorf("element ends don't match, have %+v, want %+v", a.end, b.end)
}
if len(a.attrs) != len(b.attrs) {
return fmt.Errorf("element attribute lengths don't match, have %v, want %v", len(a.attrs), len(b.attrs))
}
// discards order of aapt and binres as some sorting details of apt have eluded this package but do not
// affect final output from functioning correctly
sort.Sort(byAttrName(a.attrs))
sort.Sort(byAttrName(b.attrs))
for i, attr := range a.attrs {
bttr := b.attrs[i]
if attr.NS != bttr.NS ||
attr.Name != bttr.Name ||
attr.RawValue != bttr.RawValue ||
attr.TypedValue.Type != bttr.TypedValue.Type ||
attr.TypedValue.Value != bttr.TypedValue.Value {
// single exception to check for minSdkVersion which has peculiar output from aapt
// but following same format of all other like-types appears to work correctly.
// BUG(dskinner) this check is brittle as it will skip over any attribute in
// bootstrap.xml that has value == MinSDK.
if attr.TypedValue.Value == MinSDK {
continue
}
fmt.Fprintf(buf, "attrs don't match\nhave: %+v\nwant: %+v\n", attr, bttr)
}
}
if buf.Len() > 0 {
buf.WriteString("-------------\n")
}
}
if buf.Len() > 0 {
return fmt.Errorf(buf.String())
}
return nil
}
func compareNamespaces(have, want *Namespace) error {
if have == nil || want == nil ||
have.LineNumber != want.LineNumber ||
have.Comment != want.Comment ||
have.prefix != want.prefix ||
have.uri != want.uri {
return fmt.Errorf("namespaces don't match, have %+v, want %+v", have, want)
}
if have.end != nil || want.end != nil {
return compareNamespaces(have.end, want.end)
}
return nil
}
func rtou(a []TableRef) (b []uint32) {
for _, x := range a {
b = append(b, uint32(x))
}
return
}
func compareUint32s(t *testing.T, a, b []uint32) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
n := len(a)
if n < len(b) {
n = len(b)
}
var buf bytes.Buffer
buf.WriteString("a.Map.rs b.Map.rs\n")
for i := 0; i < n; i++ {
var c, d string
if i < len(a) {
c = fmt.Sprintf("%0#8x ", a[i])
} else {
c = "__________ "
}
if i < len(b) {
d = fmt.Sprintf("%0#8x ", b[i])
} else {
d = "__________ "
}
if err == nil && c != d {
err = fmt.Errorf("has missing/incorrect values")
}
buf.WriteString(c + " " + d + "\n")
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func compareStrings(t *testing.T, a, b []string) error {
var err error
if len(a) != len(b) {
err = fmt.Errorf("lengths do not match")
}
buf := new(bytes.Buffer)
for i, x := range a {
v := "__"
for j, y := range b {
if x == y {
v = fmt.Sprintf("%2v", j)
break
}
}
if err == nil && v == "__" {
if !strings.HasPrefix(x, "4.1.") {
// as of the time of this writing, the current version of build tools being targeted
// reports 4.1.2-1425332.
//
// TODO this check has the potential to hide real errors but can be fixed once more
// of the xml document is unmarshalled and XML can be queried to assure this is related
// to platformBuildVersionName.
err = fmt.Errorf("has missing/incorrect values")
}
}
fmt.Fprintf(buf, "Pool(%2v, %s) %q\n", i, v, x)
}
contains := func(xs []string, a string) bool {
for _, x := range xs {
if x == a {
return true
}
}
return false
}
if err != nil {
buf.WriteString("\n## only in var a\n")
for i, x := range a {
if !contains(b, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
buf.WriteString("\n## only in var b\n")
for i, x := range b {
if !contains(a, x) {
fmt.Fprintf(buf, "Pool(%2v) %q\n", i, x)
}
}
}
if err != nil {
err = fmt.Errorf("%s\n%s", err, buf.String())
}
return err
}
func TestOpenTable(t *testing.T) {
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skipf("Could not locate Android SDK: %v", err)
}
tbl, err := OpenTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
pkg := tbl.pkgs[0]
t.Log("package name:", pkg.name)
for i, x := range pkg.typePool.strings {
t.Logf("typePool[i=%v]: %s\n", i, x)
}
for i, spec := range pkg.specs {
t.Logf("spec[i=%v]: %v %q\n", i, spec.id, pkg.typePool.strings[spec.id-1])
for j, typ := range spec.types {
t.Logf("\ttype[i=%v]: %v\n", j, typ.id)
for k, nt := range typ.entries {
if nt == nil { // NoEntry
continue
}
t.Logf("\t\tentry[i=%v]: %v %q\n", k, nt.key, pkg.keyPool.strings[nt.key])
if k > 5 {
t.Logf("\t\t... truncating output")
break
}
}
}
}
}
func TestTableRefByName(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
if len(tbl.pkgs) == 0 {
t.Fatal("failed to decode any resource packages")
}
ref, err := tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
t.Fatal(err)
}
if want := uint32(0x01030007); uint32(ref) != want {
t.Fatalf("RefByName does not match expected result, have %0#8x, want %0#8x", ref, want)
}
}
func TestTableMarshal(t *testing.T) {
checkResources(t)
tbl, err := OpenSDKTable()
if err != nil {
t.Fatal(err)
}
bin, err := tbl.MarshalBinary()
if err != nil {
t.Fatal(err)
}
xtbl := new(Table)
if err := xtbl.UnmarshalBinary(bin); err != nil {
t.Fatal(err)
}
if len(tbl.pool.strings) != len(xtbl.pool.strings) {
t.Fatal("tbl.pool lengths don't match")
}
if len(tbl.pkgs) != len(xtbl.pkgs) {
t.Fatal("tbl.pkgs lengths don't match")
}
pkg, xpkg := tbl.pkgs[0], xtbl.pkgs[0]
if err := compareStrings(t, pkg.typePool.strings, xpkg.typePool.strings); err != nil {
t.Fatal(err)
}
if err := compareStrings(t, pkg.keyPool.strings, xpkg.keyPool.strings); err != nil {
t.Fatal(err)
}
if len(pkg.specs) != len(xpkg.specs) {
t.Fatal("pkg.specs lengths don't match")
}
for i, spec := range pkg.specs {
xspec := xpkg.specs[i]
if spec.id != xspec.id {
t.Fatal("spec.id doesn't match")
}
if spec.entryCount != xspec.entryCount {
t.Fatal("spec.entryCount doesn't match")
}
if len(spec.entries) != len(xspec.entries) {
t.Fatal("spec.entries lengths don't match")
}
for j, mask := range spec.entries {
xmask := xspec.entries[j]
if mask != xmask {
t.Fatal("entry mask doesn't match")
}
}
if len(spec.types) != len(xspec.types) {
t.Fatal("spec.types length don't match")
}
for j, typ := range spec.types {
xtyp := xspec.types[j]
if typ.id != xtyp.id {
t.Fatal("typ.id doesn't match")
}
if typ.entryCount != xtyp.entryCount {
t.Fatal("typ.entryCount doesn't match")
}
// Config size can differ after serialization due to the loss of extended fields
// during reserialization, but the fixed portions of the Type header must not change. | }
for k, index := range typ.indices {
xindex := xtyp.indices[k]
if index != xindex {
t.Errorf("type index doesn't match at %v, have %v, want %v", k, xindex, index)
}
}
if len(typ.entries) != len(xtyp.entries) {
t.Fatal("typ.entries lengths don't match")
}
for k, nt := range typ.entries {
xnt := xtyp.entries[k]
if nt == nil {
if xnt != nil {
t.Fatal("nt is nil but xnt is not")
}
continue
}
if nt.size != xnt.size {
t.Fatal("entry.size doesn't match")
}
if nt.flags != xnt.flags {
t.Fatal("entry.flags don't match")
}
if nt.key != xnt.key {
t.Fatal("entry.key doesn't match")
}
if nt.parent != xnt.parent {
t.Fatal("entry.parent doesn't match")
}
if nt.count != xnt.count {
t.Fatal("entry.count doesn't match")
}
for l, val := range nt.values {
xval := xnt.values[l]
if val.name != xval.name {
t.Fatal("value.name doesn't match")
}
}
}
}
}
}
func checkResources(t *testing.T) {
t.Helper()
if _, err := sdkpath.AndroidHome(); err != nil {
t.Skip("Could not locate Android SDK")
}
rscPath, err := apiResourcesPath()
if err != nil {
t.Skipf("failed to find resources: %v", err)
}
if _, err := os.Stat(rscPath); err != nil {
t.Skipf("failed to find resources: %v", err)
}
}
func BenchmarkTableRefByName(b *testing.B) {
if _, err := sdkpath.AndroidHome(); err != nil {
b.Fatal("Could not locate Android SDK")
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
tbl, err := OpenTable()
if err != nil {
b.Fatal(err)
}
_, err = tbl.RefByName("@android:style/Theme.NoTitleBar.Fullscreen")
if err != nil {
b.Fatal(err)
}
}
} | if uint32(typ.headerByteSize)-typ.config.size != uint32(xtyp.headerByteSize)-uint32(xtyp.config.size) {
t.Fatal("fixed size header portions don't match")
}
if len(typ.indices) != len(xtyp.indices) {
t.Fatal("typ.indices length don't match") | random_line_split |
train_transformer_navigation.py | import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
raise AssertionError(f"invalid score type {self.score_type}")
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
# scheduler
scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor)
best_epoch = -1
block_size = int((args.resolution * 4)/64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
| # drop stuff we can't serialize
del(dump_args.__dict__["cfg"])
del(dump_args.__dict__["__cwd__"])
del(dump_args.__dict__["__path__"])
to_dump = dump_args.__dict__
# dump
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
#state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
#encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
# construct trainer
trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,
encoder = encoder,
optimizer = optimizer,
scheduler = scheduler,
num_epochs = args.num_epochs,
num_blocks = num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
checkpoint_every = args.checkpoint_every,
validation_limit = args.validation_limit,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = args.generate_after_n,
score_type=args.score_type,
depth = depth,
resolution = args.resolution,
output_type = args.output_type,
patch_size = args.patch_size,
block_size = block_size,
best_epoch = best_epoch,
seed = args.seed,
zero_weight = args.zero_weight,
debug_image_top_k = args.debug_image_top_k,
debug_image_threshold = args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f"evaluating")
acc = trainer.evaluate()
print(f"accuracy: {acc}")
if __name__ == "__main__":
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args) | # save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.yaml"), "w") as f1:
dump_args = copy.deepcopy(args) | random_line_split |
train_transformer_navigation.py | import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
|
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
# scheduler
scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor)
best_epoch = -1
block_size = int((args.resolution * 4)/64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
# save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.yaml"), "w") as f1:
dump_args = copy.deepcopy(args)
# drop stuff we can't serialize
del(dump_args.__dict__["cfg"])
del(dump_args.__dict__["__cwd__"])
del(dump_args.__dict__["__path__"])
to_dump = dump_args.__dict__
# dump
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
#state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
#encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
# construct trainer
trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,
encoder = encoder,
optimizer = optimizer,
scheduler = scheduler,
num_epochs = args.num_epochs,
num_blocks = num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
checkpoint_every = args.checkpoint_every,
validation_limit = args.validation_limit,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = args.generate_after_n,
score_type=args.score_type,
depth = depth,
resolution = args.resolution,
output_type = args.output_type,
patch_size = args.patch_size,
block_size = block_size,
best_epoch = best_epoch,
seed = args.seed,
zero_weight = args.zero_weight,
debug_image_top_k = args.debug_image_top_k,
debug_image_threshold = args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f"evaluating")
acc = trainer.evaluate()
print(f"accuracy: {acc}")
if __name__ == "__main__":
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
| raise AssertionError(f"invalid score type {self.score_type}") | conditional_block |
train_transformer_navigation.py | import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
raise AssertionError(f"invalid score type {self.score_type}")
def | (self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
# scheduler
scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor)
best_epoch = -1
block_size = int((args.resolution * 4)/64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
# save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.yaml"), "w") as f1:
dump_args = copy.deepcopy(args)
# drop stuff we can't serialize
del(dump_args.__dict__["cfg"])
del(dump_args.__dict__["__cwd__"])
del(dump_args.__dict__["__path__"])
to_dump = dump_args.__dict__
# dump
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
#state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
#encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
# construct trainer
trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,
encoder = encoder,
optimizer = optimizer,
scheduler = scheduler,
num_epochs = args.num_epochs,
num_blocks = num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
checkpoint_every = args.checkpoint_every,
validation_limit = args.validation_limit,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = args.generate_after_n,
score_type=args.score_type,
depth = depth,
resolution = args.resolution,
output_type = args.output_type,
patch_size = args.patch_size,
block_size = block_size,
best_epoch = best_epoch,
seed = args.seed,
zero_weight = args.zero_weight,
debug_image_top_k = args.debug_image_top_k,
debug_image_threshold = args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f"evaluating")
acc = trainer.evaluate()
print(f"accuracy: {acc}")
if __name__ == "__main__":
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
| compute_patch_loss | identifier_name |
train_transformer_navigation.py | import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
raise AssertionError(f"invalid score type {self.score_type}")
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
|
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
# scheduler
scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor)
best_epoch = -1
block_size = int((args.resolution * 4)/64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
# save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.yaml"), "w") as f1:
dump_args = copy.deepcopy(args)
# drop stuff we can't serialize
del(dump_args.__dict__["cfg"])
del(dump_args.__dict__["__cwd__"])
del(dump_args.__dict__["__path__"])
to_dump = dump_args.__dict__
# dump
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
#state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
#encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
# construct trainer
trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,
encoder = encoder,
optimizer = optimizer,
scheduler = scheduler,
num_epochs = args.num_epochs,
num_blocks = num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
checkpoint_every = args.checkpoint_every,
validation_limit = args.validation_limit,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = args.generate_after_n,
score_type=args.score_type,
depth = depth,
resolution = args.resolution,
output_type = args.output_type,
patch_size = args.patch_size,
block_size = block_size,
best_epoch = best_epoch,
seed = args.seed,
zero_weight = args.zero_weight,
debug_image_top_k = args.debug_image_top_k,
debug_image_threshold = args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f"evaluating")
acc = trainer.evaluate()
print(f"accuracy: {acc}")
if __name__ == "__main__":
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
| caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close() | identifier_body |
bridge_contract.go | package bridge
import (
"context"
"errors"
"math/big"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/stellar/go/clients/horizonclient"
"github.com/stellar/go/keypair"
hProtocol "github.com/stellar/go/protocols/horizon"
"github.com/stellar/go/protocols/horizon/effects"
"github.com/stellar/go/protocols/horizon/operations"
tfeth "github.com/threefoldtech/eth-bridge/api"
"github.com/threefoldtech/eth-bridge/api/bridge/contract"
)
const ERC20AddressLength = 20
type ERC20Address [ERC20AddressLength]byte
var (
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
)
const (
// retryDelay is the delay to retry calls when there are no peers
retryDelay = time.Second * 15
)
// BridgeContract exposes a higher lvl api for specific contract bindings. In case of proxy contracts,
// the bridge needs to use the bindings of the implementation contract, but the address of the proxy.
type BridgeContract struct {
networkConfig tfeth.NetworkConfiguration // Ethereum network
networkName string
lc *LightClient
filter *contract.TokenFilterer
transactor *contract.TokenTransactor
caller *contract.TokenCaller
contract *bind.BoundContract
abi abi.ABI
wallet *stellarWallet
// cache some stats in case they might be usefull
head *types.Header // Current head header of the bridge
balance *big.Int // The current balance of the bridge (note: ethers only!)
nonce uint64 // Current pending nonce of the bridge
price *big.Int // Current gas price to issue funds with
lock sync.RWMutex // Lock protecting the bridge's internals
}
// GetContractAdress returns the address of this contract
func (bridge *BridgeContract) GetContractAdress() common.Address {
return bridge.networkConfig.ContractAddress
}
// NewBridgeContract creates a new wrapper for an allready deployed contract
func NewBridgeContract(networkName string, bootnodes []string, contractAddress string, port int, accountJSON, accountPass string, datadir string, cancel <-chan struct{}, stellarNetwork string, stellarSeed string) (*BridgeContract, error) {
// load correct network config
networkConfig, err := tfeth.GetEthNetworkConfiguration(networkName)
if err != nil {
return nil, err
}
// override contract address if it's provided
if contractAddress != "" {
networkConfig.ContractAddress = common.HexToAddress(contractAddress)
// TODO: validate ABI of contract,
// see https://github.com/threefoldtech/rivine-extension-erc20/issues/3
}
bootstrapNodes, err := networkConfig.GetBootnodes(bootnodes)
if err != nil {
return nil, err
}
lc, err := NewLightClient(LightClientConfig{
Port: port,
DataDir: datadir,
BootstrapNodes: bootstrapNodes,
NetworkName: networkConfig.NetworkName,
NetworkID: networkConfig.NetworkID,
GenesisBlock: networkConfig.GenesisBlock,
})
if err != nil {
return nil, err
}
err = lc.LoadAccount(accountJSON, accountPass)
if err != nil {
return nil, err
}
filter, err := contract.NewTokenFilterer(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
transactor, err := contract.NewTokenTransactor(networkConfig.ContractAddress, lc.Client)
if err != nil |
caller, err := contract.NewTokenCaller(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
contract, abi, err := bindTTFT20(networkConfig.ContractAddress, lc.Client, lc.Client, lc.Client)
if err != nil {
return nil, err
}
w := &stellarWallet{
network: stellarNetwork,
}
if stellarSeed != "" {
w.keypair, err = keypair.ParseFull(stellarSeed)
if err != nil {
return nil, err
}
}
return &BridgeContract{
networkName: networkName,
networkConfig: networkConfig,
lc: lc,
filter: filter,
transactor: transactor,
caller: caller,
contract: contract,
abi: abi,
wallet: w,
}, nil
}
// Close terminates the Ethereum connection and tears down the stack.
func (bridge *BridgeContract) Close() error {
return bridge.lc.Close()
}
// AccountAddress returns the account address of the bridge contract
func (bridge *BridgeContract) AccountAddress() (common.Address, error) {
return bridge.lc.AccountAddress()
}
// LightClient returns the LightClient driving this bridge contract
func (bridge *BridgeContract) LightClient() *LightClient {
return bridge.lc
}
// ABI returns the parsed and bound ABI driving this bridge contract
func (bridge *BridgeContract) ABI() abi.ABI {
return bridge.abi
}
// Refresh attempts to retrieve the latest header from the chain and extract the
// associated bridge balance and nonce for connectivity caching.
func (bridge *BridgeContract) Refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = bridge.lc.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
nonce uint64
price *big.Int
balance *big.Int
)
if price, err = bridge.lc.SuggestGasPrice(ctx); err != nil {
return err
}
if balance, err = bridge.lc.AccountBalanceAt(ctx, head.Number); err != nil {
return err
}
log.Debug(bridge.lc.account.account.Address.Hex())
// Everything succeeded, update the cached stats
bridge.lock.Lock()
bridge.head, bridge.balance = head, balance
bridge.price, bridge.nonce = price, nonce
bridge.lock.Unlock()
return nil
}
// Loop subscribes to new eth heads. If a new head is received, it is passed on the given channel,
// after which the internal stats are updated if no update is already in progress
func (bridge *BridgeContract) Loop(ch chan<- *types.Header) {
log.Debug("Subscribing to eth headers")
// channel to receive head updates from client on
heads := make(chan *types.Header, 16)
// subscribe to head upates
sub, err := bridge.lc.SubscribeNewHead(context.Background(), heads)
if err != nil {
log.Error("Failed to subscribe to head events", "err", err)
}
defer sub.Unsubscribe()
// channel so we can update the internal state from the heads
update := make(chan *types.Header)
go func() {
for head := range update {
// old heads should be ignored during a chain sync after some downtime
if err := bridge.Refresh(head); err != nil {
log.Warn("Failed to update state", "block", head.Number, "err", err)
}
log.Debug("Internal stats updated", "block", head.Number, "account balance", bridge.balance, "gas price", bridge.price, "nonce", bridge.nonce)
}
}()
for head := range heads {
ch <- head
select {
// only process new head if another isn't being processed yet
case update <- head:
log.Debug("Processing new head")
default:
log.Debug("Ignoring current head, update already in progress")
}
}
log.Error("Bridge state update loop ended")
}
// SubscribeTransfers subscribes to new Transfer events on the given contract. This call blocks
// and prints out info about any transfer as it happened
func (bridge *BridgeContract) SubscribeTransfers() error {
sink := make(chan *contract.TokenTransfer)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchTransfer(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case transfer := <-sink:
log.Debug("Noticed transfer event", "from", transfer.From, "to", transfer.To, "amount", transfer.Tokens)
}
}
}
// SubscribeMint subscribes to new Mint events on the given contract. This call blocks
// and prints out info about any mint as it happened
func (bridge *BridgeContract) SubscribeMint() error {
sink := make(chan *contract.TokenMint)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchMint(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case mint := <-sink:
log.Info("Noticed mint event", "receiver", mint.Receiver, "amount", mint.Tokens, "TFT tx id", mint.Txid)
}
}
}
// WithdrawEvent holds relevant information about a withdraw event
type WithdrawEvent struct {
receiver common.Address
amount *big.Int
blockchain_address string
network string
txHash common.Hash
blockHash common.Hash
blockHeight uint64
raw []byte
}
// Receiver of the withdraw
func (w WithdrawEvent) Receiver() common.Address {
return w.receiver
}
// Amount withdrawn
func (w WithdrawEvent) Amount() *big.Int {
return w.amount
}
// Blockchain address to withdraw to
func (w WithdrawEvent) BlockchainAddress() string {
return w.blockchain_address
}
// Network to withdraw to
func (w WithdrawEvent) Network() string {
return w.network
}
// TxHash hash of the transaction
func (w WithdrawEvent) TxHash() common.Hash {
return w.txHash
}
// BlockHash of the containing block
func (w WithdrawEvent) BlockHash() common.Hash {
return w.blockHash
}
// BlockHeight of the containing block
func (w WithdrawEvent) BlockHeight() uint64 {
return w.blockHeight
}
// SubscribeWithdraw subscribes to new Withdraw events on the given contract. This call blocks
// and prints out info about any withdraw as it happened
func (bridge *BridgeContract) SubscribeWithdraw(wc chan<- WithdrawEvent, startHeight uint64) error {
log.Debug("Subscribing to withdraw events", "start height", startHeight)
sink := make(chan *contract.TokenWithdraw)
watchOpts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.WatchWithdraw(watchOpts, sink, nil)
if err != nil {
log.Error("Subscribing to withdraw events failed", "err", err)
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case withdraw := <-sink:
if withdraw.Raw.Removed {
// ignore removed events
continue
}
log.Debug("Noticed withdraw event", "receiver", withdraw.Receiver, "amount", withdraw.Tokens)
wc <- WithdrawEvent{
receiver: withdraw.Receiver,
amount: withdraw.Tokens,
txHash: withdraw.Raw.TxHash,
blockHash: withdraw.Raw.BlockHash,
blockHeight: withdraw.Raw.BlockNumber,
blockchain_address: withdraw.BlockchainAddress,
network: withdraw.Network,
raw: withdraw.Raw.Data,
}
}
}
}
// WatchWithdraw is a free log subscription operation binding the contract event 0x884edad9ce6fa2440d8a54cc123490eb96d2768479d49ff9c7366125a9424364.
//
// Solidity: e Withdraw(receiver indexed address, tokens uint256)
//
// This method is copied from the generated bindings and slightly modified, so we can add logic to stay backwards compatible with the old withdraw event signature
func (bridge *BridgeContract) WatchWithdraw(opts *bind.WatchOpts, sink chan<- *contract.TokenWithdraw, receiver []common.Address) (event.Subscription, error) {
var receiverRule []interface{}
for _, receiverItem := range receiver {
receiverRule = append(receiverRule, receiverItem)
}
logs, sub, err := bridge.contract.WatchLogs(opts, "Withdraw", receiverRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(contract.TokenWithdraw)
if err := bridge.contract.UnpackLog(event, "Withdraw", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// TransferFunds transfers funds from one address to another
func (bridge *BridgeContract) TransferFunds(recipient common.Address, amount *big.Int) error {
err := bridge.transferFunds(recipient, amount)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.transferFunds(recipient, amount)
}
return err
}
func (bridge *BridgeContract) transferFunds(recipient common.Address, amount *big.Int) error {
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 0, GasPrice: nil,
}
_, err = bridge.transactor.Transfer(opts, recipient, amount)
return err
}
func (bridge *BridgeContract) Mint(receiver ERC20Address, amount *big.Int, txID string) error {
err := bridge.mint(receiver, amount, txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.mint(receiver, amount, txID)
}
return err
}
func (bridge *BridgeContract) mint(receiver ERC20Address, amount *big.Int, txID string) error {
log.Debug("Calling mint function in contract")
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
// TODO estimate gas more correctly ..
gas, err := bridge.lc.SuggestGasPrice(context.Background())
if err != nil {
return err
}
newGas := big.NewInt(10 * gas.Int64())
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 100000, GasPrice: newGas,
}
_, err = bridge.transactor.MintTokens(opts, common.Address(receiver), amount, txID)
return err
}
func (bridge *BridgeContract) IsMintTxID(txID string) (bool, error) {
res, err := bridge.isMintTxID(txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
res, err = bridge.isMintTxID(txID)
}
return res, err
}
func (bridge *BridgeContract) isMintTxID(txID string) (bool, error) {
log.Debug("Calling isMintID")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.IsMintID(opts, txID)
}
func (bridge *BridgeContract) getSignerFunc() bind.SignerFn {
return func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) {
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return nil, err
}
if address != accountAddress {
return nil, errors.New("not authorized to sign this account")
}
networkID := int64(bridge.networkConfig.NetworkID)
return bridge.lc.SignTx(tx, big.NewInt(networkID))
}
}
func (bridge *BridgeContract) TokenBalance(address common.Address) (*big.Int, error) {
log.Debug("Calling TokenBalance function in contract")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.BalanceOf(opts, common.Address(address))
}
func (bridge *BridgeContract) EthBalance() (*big.Int, error) {
err := bridge.Refresh(nil) // force a refresh
return bridge.balance, err
}
// bindTTFT20 binds a generic wrapper to an already deployed contract.
//
// This method is copied from the generated bindings as a convenient way to get a *bind.Contract, as this is needed to implement the WatchWithdraw function ourselves
func bindTTFT20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, abi.ABI, error) {
parsed, err := abi.JSON(strings.NewReader(contract.TokenABI))
if err != nil {
return nil, parsed, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), parsed, nil
}
// GetHorizonClient gets the horizon client based on the wallet's network
func (b *BridgeContract) GetHorizonClient() (*horizonclient.Client, error) {
switch b.networkName {
case "smart-chain-testnet":
return horizonclient.DefaultTestNetClient, nil
case "main":
return horizonclient.DefaultPublicNetClient, nil
default:
return nil, errors.New("network is not supported")
}
}
func (b *BridgeContract) StreamStellarAccountPayments(ctx context.Context, accountID string, handler func(op operations.Operation)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.OperationRequest{
ForAccount: accountID,
}
return client.StreamPayments(ctx, opRequest, handler)
}
func (b *BridgeContract) StreamStellarAccountTransactions(ctx context.Context, accountID string, handler func(op hProtocol.Transaction)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.TransactionRequest{
ForAccount: accountID,
}
return client.StreamTransactions(ctx, opRequest, handler)
}
func (b *BridgeContract) GetTransactionEffects(txHash string) (effects effects.EffectsPage, err error) {
client, err := b.GetHorizonClient()
if err != nil {
return effects, err
}
effectsReq := horizonclient.EffectRequest{
ForTransaction: txHash,
}
effects, err = client.Effects(effectsReq)
if err != nil {
return effects, err
}
return effects, nil
}
| {
return nil, err
} | conditional_block |
bridge_contract.go | package bridge
import (
"context"
"errors"
"math/big"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/stellar/go/clients/horizonclient"
"github.com/stellar/go/keypair"
hProtocol "github.com/stellar/go/protocols/horizon"
"github.com/stellar/go/protocols/horizon/effects"
"github.com/stellar/go/protocols/horizon/operations"
tfeth "github.com/threefoldtech/eth-bridge/api"
"github.com/threefoldtech/eth-bridge/api/bridge/contract"
)
const ERC20AddressLength = 20
type ERC20Address [ERC20AddressLength]byte
var (
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
)
const (
// retryDelay is the delay to retry calls when there are no peers
retryDelay = time.Second * 15
)
// BridgeContract exposes a higher lvl api for specific contract bindings. In case of proxy contracts,
// the bridge needs to use the bindings of the implementation contract, but the address of the proxy.
type BridgeContract struct {
networkConfig tfeth.NetworkConfiguration // Ethereum network
networkName string
lc *LightClient
filter *contract.TokenFilterer
transactor *contract.TokenTransactor
caller *contract.TokenCaller
contract *bind.BoundContract
abi abi.ABI
wallet *stellarWallet
// cache some stats in case they might be usefull
head *types.Header // Current head header of the bridge
balance *big.Int // The current balance of the bridge (note: ethers only!)
nonce uint64 // Current pending nonce of the bridge
price *big.Int // Current gas price to issue funds with
lock sync.RWMutex // Lock protecting the bridge's internals
}
// GetContractAdress returns the address of this contract
func (bridge *BridgeContract) GetContractAdress() common.Address {
return bridge.networkConfig.ContractAddress
}
// NewBridgeContract creates a new wrapper for an allready deployed contract
func NewBridgeContract(networkName string, bootnodes []string, contractAddress string, port int, accountJSON, accountPass string, datadir string, cancel <-chan struct{}, stellarNetwork string, stellarSeed string) (*BridgeContract, error) {
// load correct network config
networkConfig, err := tfeth.GetEthNetworkConfiguration(networkName)
if err != nil {
return nil, err
}
// override contract address if it's provided
if contractAddress != "" {
networkConfig.ContractAddress = common.HexToAddress(contractAddress)
// TODO: validate ABI of contract,
// see https://github.com/threefoldtech/rivine-extension-erc20/issues/3
}
bootstrapNodes, err := networkConfig.GetBootnodes(bootnodes)
if err != nil {
return nil, err
}
lc, err := NewLightClient(LightClientConfig{
Port: port,
DataDir: datadir,
BootstrapNodes: bootstrapNodes,
NetworkName: networkConfig.NetworkName,
NetworkID: networkConfig.NetworkID,
GenesisBlock: networkConfig.GenesisBlock,
})
if err != nil {
return nil, err
}
err = lc.LoadAccount(accountJSON, accountPass)
if err != nil {
return nil, err
}
filter, err := contract.NewTokenFilterer(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
transactor, err := contract.NewTokenTransactor(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
caller, err := contract.NewTokenCaller(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
contract, abi, err := bindTTFT20(networkConfig.ContractAddress, lc.Client, lc.Client, lc.Client)
if err != nil {
return nil, err
}
w := &stellarWallet{
network: stellarNetwork,
}
if stellarSeed != "" {
w.keypair, err = keypair.ParseFull(stellarSeed)
if err != nil {
return nil, err
}
}
return &BridgeContract{
networkName: networkName,
networkConfig: networkConfig,
lc: lc,
filter: filter,
transactor: transactor,
caller: caller,
contract: contract,
abi: abi,
wallet: w,
}, nil
}
// Close terminates the Ethereum connection and tears down the stack.
func (bridge *BridgeContract) Close() error {
return bridge.lc.Close()
}
// AccountAddress returns the account address of the bridge contract
func (bridge *BridgeContract) AccountAddress() (common.Address, error) {
return bridge.lc.AccountAddress()
}
// LightClient returns the LightClient driving this bridge contract
func (bridge *BridgeContract) LightClient() *LightClient {
return bridge.lc
}
// ABI returns the parsed and bound ABI driving this bridge contract
func (bridge *BridgeContract) ABI() abi.ABI {
return bridge.abi
}
// Refresh attempts to retrieve the latest header from the chain and extract the
// associated bridge balance and nonce for connectivity caching.
func (bridge *BridgeContract) Refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = bridge.lc.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
nonce uint64
price *big.Int
balance *big.Int
)
if price, err = bridge.lc.SuggestGasPrice(ctx); err != nil {
return err
}
if balance, err = bridge.lc.AccountBalanceAt(ctx, head.Number); err != nil {
return err
}
log.Debug(bridge.lc.account.account.Address.Hex())
// Everything succeeded, update the cached stats
bridge.lock.Lock()
bridge.head, bridge.balance = head, balance
bridge.price, bridge.nonce = price, nonce
bridge.lock.Unlock()
return nil
}
// Loop subscribes to new eth heads. If a new head is received, it is passed on the given channel,
// after which the internal stats are updated if no update is already in progress
func (bridge *BridgeContract) Loop(ch chan<- *types.Header) {
log.Debug("Subscribing to eth headers")
// channel to receive head updates from client on
heads := make(chan *types.Header, 16)
// subscribe to head upates
sub, err := bridge.lc.SubscribeNewHead(context.Background(), heads)
if err != nil {
log.Error("Failed to subscribe to head events", "err", err)
}
defer sub.Unsubscribe()
// channel so we can update the internal state from the heads
update := make(chan *types.Header)
go func() {
for head := range update {
// old heads should be ignored during a chain sync after some downtime
if err := bridge.Refresh(head); err != nil {
log.Warn("Failed to update state", "block", head.Number, "err", err)
}
log.Debug("Internal stats updated", "block", head.Number, "account balance", bridge.balance, "gas price", bridge.price, "nonce", bridge.nonce)
}
}()
for head := range heads {
ch <- head
select {
// only process new head if another isn't being processed yet
case update <- head:
log.Debug("Processing new head")
default:
log.Debug("Ignoring current head, update already in progress")
}
}
log.Error("Bridge state update loop ended")
}
// SubscribeTransfers subscribes to new Transfer events on the given contract. This call blocks
// and prints out info about any transfer as it happened
func (bridge *BridgeContract) SubscribeTransfers() error {
sink := make(chan *contract.TokenTransfer)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchTransfer(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case transfer := <-sink:
log.Debug("Noticed transfer event", "from", transfer.From, "to", transfer.To, "amount", transfer.Tokens)
}
}
}
// SubscribeMint subscribes to new Mint events on the given contract. This call blocks
// and prints out info about any mint as it happened
func (bridge *BridgeContract) SubscribeMint() error {
sink := make(chan *contract.TokenMint)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchMint(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case mint := <-sink:
log.Info("Noticed mint event", "receiver", mint.Receiver, "amount", mint.Tokens, "TFT tx id", mint.Txid)
}
}
}
// WithdrawEvent holds relevant information about a withdraw event
type WithdrawEvent struct {
receiver common.Address
amount *big.Int
blockchain_address string
network string
txHash common.Hash
blockHash common.Hash
blockHeight uint64
raw []byte
}
// Receiver of the withdraw
func (w WithdrawEvent) Receiver() common.Address {
return w.receiver
}
// Amount withdrawn
func (w WithdrawEvent) Amount() *big.Int {
return w.amount
}
// Blockchain address to withdraw to
func (w WithdrawEvent) BlockchainAddress() string {
return w.blockchain_address
}
// Network to withdraw to
func (w WithdrawEvent) Network() string {
return w.network
}
// TxHash hash of the transaction
func (w WithdrawEvent) TxHash() common.Hash {
return w.txHash
}
// BlockHash of the containing block
func (w WithdrawEvent) BlockHash() common.Hash {
return w.blockHash
}
// BlockHeight of the containing block
func (w WithdrawEvent) BlockHeight() uint64 {
return w.blockHeight
}
// SubscribeWithdraw subscribes to new Withdraw events on the given contract. This call blocks
// and prints out info about any withdraw as it happened
func (bridge *BridgeContract) SubscribeWithdraw(wc chan<- WithdrawEvent, startHeight uint64) error {
log.Debug("Subscribing to withdraw events", "start height", startHeight)
sink := make(chan *contract.TokenWithdraw)
watchOpts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.WatchWithdraw(watchOpts, sink, nil)
if err != nil {
log.Error("Subscribing to withdraw events failed", "err", err)
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case withdraw := <-sink:
if withdraw.Raw.Removed {
// ignore removed events
continue
}
log.Debug("Noticed withdraw event", "receiver", withdraw.Receiver, "amount", withdraw.Tokens)
wc <- WithdrawEvent{
receiver: withdraw.Receiver,
amount: withdraw.Tokens,
txHash: withdraw.Raw.TxHash,
blockHash: withdraw.Raw.BlockHash,
blockHeight: withdraw.Raw.BlockNumber,
blockchain_address: withdraw.BlockchainAddress,
network: withdraw.Network,
raw: withdraw.Raw.Data,
}
}
}
}
// WatchWithdraw is a free log subscription operation binding the contract event 0x884edad9ce6fa2440d8a54cc123490eb96d2768479d49ff9c7366125a9424364.
//
// Solidity: e Withdraw(receiver indexed address, tokens uint256)
//
// This method is copied from the generated bindings and slightly modified, so we can add logic to stay backwards compatible with the old withdraw event signature
func (bridge *BridgeContract) WatchWithdraw(opts *bind.WatchOpts, sink chan<- *contract.TokenWithdraw, receiver []common.Address) (event.Subscription, error) {
var receiverRule []interface{}
for _, receiverItem := range receiver {
receiverRule = append(receiverRule, receiverItem)
}
logs, sub, err := bridge.contract.WatchLogs(opts, "Withdraw", receiverRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(contract.TokenWithdraw)
if err := bridge.contract.UnpackLog(event, "Withdraw", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// TransferFunds transfers funds from one address to another
func (bridge *BridgeContract) TransferFunds(recipient common.Address, amount *big.Int) error {
err := bridge.transferFunds(recipient, amount)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.transferFunds(recipient, amount)
}
return err
}
func (bridge *BridgeContract) transferFunds(recipient common.Address, amount *big.Int) error {
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 0, GasPrice: nil,
}
_, err = bridge.transactor.Transfer(opts, recipient, amount)
return err
}
func (bridge *BridgeContract) Mint(receiver ERC20Address, amount *big.Int, txID string) error {
err := bridge.mint(receiver, amount, txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.mint(receiver, amount, txID)
}
return err
}
func (bridge *BridgeContract) mint(receiver ERC20Address, amount *big.Int, txID string) error {
log.Debug("Calling mint function in contract")
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
// TODO estimate gas more correctly ..
gas, err := bridge.lc.SuggestGasPrice(context.Background())
if err != nil {
return err
}
newGas := big.NewInt(10 * gas.Int64())
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 100000, GasPrice: newGas,
}
_, err = bridge.transactor.MintTokens(opts, common.Address(receiver), amount, txID)
return err
}
func (bridge *BridgeContract) IsMintTxID(txID string) (bool, error) {
res, err := bridge.isMintTxID(txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
res, err = bridge.isMintTxID(txID)
}
return res, err
}
func (bridge *BridgeContract) isMintTxID(txID string) (bool, error) {
log.Debug("Calling isMintID")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.IsMintID(opts, txID)
}
func (bridge *BridgeContract) getSignerFunc() bind.SignerFn {
return func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) {
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return nil, err
}
if address != accountAddress {
return nil, errors.New("not authorized to sign this account")
}
networkID := int64(bridge.networkConfig.NetworkID)
return bridge.lc.SignTx(tx, big.NewInt(networkID))
}
}
func (bridge *BridgeContract) TokenBalance(address common.Address) (*big.Int, error) {
log.Debug("Calling TokenBalance function in contract")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.BalanceOf(opts, common.Address(address))
}
func (bridge *BridgeContract) EthBalance() (*big.Int, error) {
err := bridge.Refresh(nil) // force a refresh
return bridge.balance, err
}
// bindTTFT20 binds a generic wrapper to an already deployed contract.
//
// This method is copied from the generated bindings as a convenient way to get a *bind.Contract, as this is needed to implement the WatchWithdraw function ourselves
func bindTTFT20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, abi.ABI, error) {
parsed, err := abi.JSON(strings.NewReader(contract.TokenABI))
if err != nil {
return nil, parsed, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), parsed, nil
}
// GetHorizonClient gets the horizon client based on the wallet's network
func (b *BridgeContract) GetHorizonClient() (*horizonclient.Client, error) {
switch b.networkName {
case "smart-chain-testnet":
return horizonclient.DefaultTestNetClient, nil
case "main":
return horizonclient.DefaultPublicNetClient, nil
default:
return nil, errors.New("network is not supported")
}
}
func (b *BridgeContract) StreamStellarAccountPayments(ctx context.Context, accountID string, handler func(op operations.Operation)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.OperationRequest{
ForAccount: accountID,
}
return client.StreamPayments(ctx, opRequest, handler)
}
func (b *BridgeContract) StreamStellarAccountTransactions(ctx context.Context, accountID string, handler func(op hProtocol.Transaction)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.TransactionRequest{
ForAccount: accountID,
}
return client.StreamTransactions(ctx, opRequest, handler)
}
func (b *BridgeContract) | (txHash string) (effects effects.EffectsPage, err error) {
client, err := b.GetHorizonClient()
if err != nil {
return effects, err
}
effectsReq := horizonclient.EffectRequest{
ForTransaction: txHash,
}
effects, err = client.Effects(effectsReq)
if err != nil {
return effects, err
}
return effects, nil
}
| GetTransactionEffects | identifier_name |
bridge_contract.go | package bridge
import (
"context"
"errors"
"math/big"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/stellar/go/clients/horizonclient"
"github.com/stellar/go/keypair"
hProtocol "github.com/stellar/go/protocols/horizon"
"github.com/stellar/go/protocols/horizon/effects"
"github.com/stellar/go/protocols/horizon/operations"
tfeth "github.com/threefoldtech/eth-bridge/api"
"github.com/threefoldtech/eth-bridge/api/bridge/contract"
)
const ERC20AddressLength = 20
type ERC20Address [ERC20AddressLength]byte
var (
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
)
const (
// retryDelay is the delay to retry calls when there are no peers
retryDelay = time.Second * 15
)
// BridgeContract exposes a higher lvl api for specific contract bindings. In case of proxy contracts,
// the bridge needs to use the bindings of the implementation contract, but the address of the proxy.
type BridgeContract struct {
networkConfig tfeth.NetworkConfiguration // Ethereum network
networkName string
lc *LightClient
filter *contract.TokenFilterer
transactor *contract.TokenTransactor
caller *contract.TokenCaller
contract *bind.BoundContract
abi abi.ABI
wallet *stellarWallet
// cache some stats in case they might be usefull
head *types.Header // Current head header of the bridge
balance *big.Int // The current balance of the bridge (note: ethers only!)
nonce uint64 // Current pending nonce of the bridge
price *big.Int // Current gas price to issue funds with
lock sync.RWMutex // Lock protecting the bridge's internals
}
// GetContractAdress returns the address of this contract
func (bridge *BridgeContract) GetContractAdress() common.Address {
return bridge.networkConfig.ContractAddress
}
// NewBridgeContract creates a new wrapper for an allready deployed contract
func NewBridgeContract(networkName string, bootnodes []string, contractAddress string, port int, accountJSON, accountPass string, datadir string, cancel <-chan struct{}, stellarNetwork string, stellarSeed string) (*BridgeContract, error) {
// load correct network config
networkConfig, err := tfeth.GetEthNetworkConfiguration(networkName)
if err != nil {
return nil, err
}
// override contract address if it's provided
if contractAddress != "" {
networkConfig.ContractAddress = common.HexToAddress(contractAddress)
// TODO: validate ABI of contract,
// see https://github.com/threefoldtech/rivine-extension-erc20/issues/3
}
bootstrapNodes, err := networkConfig.GetBootnodes(bootnodes)
if err != nil {
return nil, err
}
lc, err := NewLightClient(LightClientConfig{
Port: port,
DataDir: datadir,
BootstrapNodes: bootstrapNodes,
NetworkName: networkConfig.NetworkName,
NetworkID: networkConfig.NetworkID,
GenesisBlock: networkConfig.GenesisBlock,
})
if err != nil {
return nil, err
}
err = lc.LoadAccount(accountJSON, accountPass)
if err != nil {
return nil, err
}
filter, err := contract.NewTokenFilterer(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
transactor, err := contract.NewTokenTransactor(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
caller, err := contract.NewTokenCaller(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
contract, abi, err := bindTTFT20(networkConfig.ContractAddress, lc.Client, lc.Client, lc.Client)
if err != nil {
return nil, err
}
w := &stellarWallet{
network: stellarNetwork,
}
if stellarSeed != "" {
w.keypair, err = keypair.ParseFull(stellarSeed)
if err != nil {
return nil, err
}
}
return &BridgeContract{
networkName: networkName,
networkConfig: networkConfig,
lc: lc,
filter: filter,
transactor: transactor,
caller: caller,
contract: contract,
abi: abi,
wallet: w,
}, nil
}
// Close terminates the Ethereum connection and tears down the stack.
func (bridge *BridgeContract) Close() error {
return bridge.lc.Close()
}
// AccountAddress returns the account address of the bridge contract
func (bridge *BridgeContract) AccountAddress() (common.Address, error) {
return bridge.lc.AccountAddress()
}
// LightClient returns the LightClient driving this bridge contract
func (bridge *BridgeContract) LightClient() *LightClient {
return bridge.lc
}
// ABI returns the parsed and bound ABI driving this bridge contract
func (bridge *BridgeContract) ABI() abi.ABI {
return bridge.abi
}
// Refresh attempts to retrieve the latest header from the chain and extract the
// associated bridge balance and nonce for connectivity caching.
func (bridge *BridgeContract) Refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = bridge.lc.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
nonce uint64
price *big.Int
balance *big.Int
)
if price, err = bridge.lc.SuggestGasPrice(ctx); err != nil {
return err
}
if balance, err = bridge.lc.AccountBalanceAt(ctx, head.Number); err != nil {
return err
}
log.Debug(bridge.lc.account.account.Address.Hex())
// Everything succeeded, update the cached stats
bridge.lock.Lock()
bridge.head, bridge.balance = head, balance
bridge.price, bridge.nonce = price, nonce
bridge.lock.Unlock()
return nil
}
// Loop subscribes to new eth heads. If a new head is received, it is passed on the given channel,
// after which the internal stats are updated if no update is already in progress
func (bridge *BridgeContract) Loop(ch chan<- *types.Header) {
log.Debug("Subscribing to eth headers")
// channel to receive head updates from client on
heads := make(chan *types.Header, 16)
// subscribe to head upates
sub, err := bridge.lc.SubscribeNewHead(context.Background(), heads)
if err != nil {
log.Error("Failed to subscribe to head events", "err", err)
}
defer sub.Unsubscribe()
// channel so we can update the internal state from the heads
update := make(chan *types.Header)
go func() {
for head := range update {
// old heads should be ignored during a chain sync after some downtime
if err := bridge.Refresh(head); err != nil {
log.Warn("Failed to update state", "block", head.Number, "err", err)
}
log.Debug("Internal stats updated", "block", head.Number, "account balance", bridge.balance, "gas price", bridge.price, "nonce", bridge.nonce)
}
}()
for head := range heads {
ch <- head
select {
// only process new head if another isn't being processed yet
case update <- head:
log.Debug("Processing new head")
default:
log.Debug("Ignoring current head, update already in progress")
}
}
log.Error("Bridge state update loop ended")
}
// SubscribeTransfers subscribes to new Transfer events on the given contract. This call blocks
// and prints out info about any transfer as it happened
func (bridge *BridgeContract) SubscribeTransfers() error {
sink := make(chan *contract.TokenTransfer)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchTransfer(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case transfer := <-sink:
log.Debug("Noticed transfer event", "from", transfer.From, "to", transfer.To, "amount", transfer.Tokens)
}
}
}
// SubscribeMint subscribes to new Mint events on the given contract. This call blocks
// and prints out info about any mint as it happened
func (bridge *BridgeContract) SubscribeMint() error {
sink := make(chan *contract.TokenMint)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchMint(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case mint := <-sink:
log.Info("Noticed mint event", "receiver", mint.Receiver, "amount", mint.Tokens, "TFT tx id", mint.Txid)
}
}
}
// WithdrawEvent holds relevant information about a withdraw event
type WithdrawEvent struct {
receiver common.Address
amount *big.Int
blockchain_address string
network string
txHash common.Hash
blockHash common.Hash
blockHeight uint64
raw []byte
}
// Receiver of the withdraw
func (w WithdrawEvent) Receiver() common.Address {
return w.receiver
}
// Amount withdrawn
func (w WithdrawEvent) Amount() *big.Int {
return w.amount
}
// Blockchain address to withdraw to
func (w WithdrawEvent) BlockchainAddress() string {
return w.blockchain_address
}
// Network to withdraw to
func (w WithdrawEvent) Network() string {
return w.network
}
// TxHash hash of the transaction
func (w WithdrawEvent) TxHash() common.Hash {
return w.txHash
}
// BlockHash of the containing block
func (w WithdrawEvent) BlockHash() common.Hash {
return w.blockHash
}
// BlockHeight of the containing block
func (w WithdrawEvent) BlockHeight() uint64 {
return w.blockHeight
}
// SubscribeWithdraw subscribes to new Withdraw events on the given contract. This call blocks
// and prints out info about any withdraw as it happened
func (bridge *BridgeContract) SubscribeWithdraw(wc chan<- WithdrawEvent, startHeight uint64) error {
log.Debug("Subscribing to withdraw events", "start height", startHeight)
sink := make(chan *contract.TokenWithdraw)
watchOpts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.WatchWithdraw(watchOpts, sink, nil)
if err != nil {
log.Error("Subscribing to withdraw events failed", "err", err)
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case withdraw := <-sink:
if withdraw.Raw.Removed {
// ignore removed events
continue
}
log.Debug("Noticed withdraw event", "receiver", withdraw.Receiver, "amount", withdraw.Tokens)
wc <- WithdrawEvent{
receiver: withdraw.Receiver,
amount: withdraw.Tokens,
txHash: withdraw.Raw.TxHash,
blockHash: withdraw.Raw.BlockHash,
blockHeight: withdraw.Raw.BlockNumber,
blockchain_address: withdraw.BlockchainAddress,
network: withdraw.Network,
raw: withdraw.Raw.Data,
}
}
}
}
// WatchWithdraw is a free log subscription operation binding the contract event 0x884edad9ce6fa2440d8a54cc123490eb96d2768479d49ff9c7366125a9424364.
//
// Solidity: e Withdraw(receiver indexed address, tokens uint256)
//
// This method is copied from the generated bindings and slightly modified, so we can add logic to stay backwards compatible with the old withdraw event signature
func (bridge *BridgeContract) WatchWithdraw(opts *bind.WatchOpts, sink chan<- *contract.TokenWithdraw, receiver []common.Address) (event.Subscription, error) {
var receiverRule []interface{}
for _, receiverItem := range receiver {
receiverRule = append(receiverRule, receiverItem)
}
logs, sub, err := bridge.contract.WatchLogs(opts, "Withdraw", receiverRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(contract.TokenWithdraw)
if err := bridge.contract.UnpackLog(event, "Withdraw", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// TransferFunds transfers funds from one address to another
func (bridge *BridgeContract) TransferFunds(recipient common.Address, amount *big.Int) error {
err := bridge.transferFunds(recipient, amount)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.transferFunds(recipient, amount)
}
return err
}
func (bridge *BridgeContract) transferFunds(recipient common.Address, amount *big.Int) error {
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 0, GasPrice: nil,
}
_, err = bridge.transactor.Transfer(opts, recipient, amount)
return err
}
func (bridge *BridgeContract) Mint(receiver ERC20Address, amount *big.Int, txID string) error {
err := bridge.mint(receiver, amount, txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.mint(receiver, amount, txID)
}
return err
}
func (bridge *BridgeContract) mint(receiver ERC20Address, amount *big.Int, txID string) error {
log.Debug("Calling mint function in contract")
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
// TODO estimate gas more correctly ..
gas, err := bridge.lc.SuggestGasPrice(context.Background())
if err != nil {
return err
}
newGas := big.NewInt(10 * gas.Int64())
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 100000, GasPrice: newGas,
}
_, err = bridge.transactor.MintTokens(opts, common.Address(receiver), amount, txID)
return err
}
func (bridge *BridgeContract) IsMintTxID(txID string) (bool, error) {
res, err := bridge.isMintTxID(txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
res, err = bridge.isMintTxID(txID)
}
return res, err
}
func (bridge *BridgeContract) isMintTxID(txID string) (bool, error) {
log.Debug("Calling isMintID")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.IsMintID(opts, txID)
}
func (bridge *BridgeContract) getSignerFunc() bind.SignerFn {
return func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) {
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return nil, err
}
if address != accountAddress {
return nil, errors.New("not authorized to sign this account")
}
networkID := int64(bridge.networkConfig.NetworkID)
return bridge.lc.SignTx(tx, big.NewInt(networkID))
}
}
func (bridge *BridgeContract) TokenBalance(address common.Address) (*big.Int, error) {
log.Debug("Calling TokenBalance function in contract")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.BalanceOf(opts, common.Address(address))
}
func (bridge *BridgeContract) EthBalance() (*big.Int, error) {
err := bridge.Refresh(nil) // force a refresh |
// bindTTFT20 binds a generic wrapper to an already deployed contract.
//
// This method is copied from the generated bindings as a convenient way to get a *bind.Contract, as this is needed to implement the WatchWithdraw function ourselves
func bindTTFT20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, abi.ABI, error) {
parsed, err := abi.JSON(strings.NewReader(contract.TokenABI))
if err != nil {
return nil, parsed, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), parsed, nil
}
// GetHorizonClient gets the horizon client based on the wallet's network
func (b *BridgeContract) GetHorizonClient() (*horizonclient.Client, error) {
switch b.networkName {
case "smart-chain-testnet":
return horizonclient.DefaultTestNetClient, nil
case "main":
return horizonclient.DefaultPublicNetClient, nil
default:
return nil, errors.New("network is not supported")
}
}
func (b *BridgeContract) StreamStellarAccountPayments(ctx context.Context, accountID string, handler func(op operations.Operation)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.OperationRequest{
ForAccount: accountID,
}
return client.StreamPayments(ctx, opRequest, handler)
}
func (b *BridgeContract) StreamStellarAccountTransactions(ctx context.Context, accountID string, handler func(op hProtocol.Transaction)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.TransactionRequest{
ForAccount: accountID,
}
return client.StreamTransactions(ctx, opRequest, handler)
}
func (b *BridgeContract) GetTransactionEffects(txHash string) (effects effects.EffectsPage, err error) {
client, err := b.GetHorizonClient()
if err != nil {
return effects, err
}
effectsReq := horizonclient.EffectRequest{
ForTransaction: txHash,
}
effects, err = client.Effects(effectsReq)
if err != nil {
return effects, err
}
return effects, nil
} | return bridge.balance, err
} | random_line_split |
bridge_contract.go | package bridge
import (
"context"
"errors"
"math/big"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/stellar/go/clients/horizonclient"
"github.com/stellar/go/keypair"
hProtocol "github.com/stellar/go/protocols/horizon"
"github.com/stellar/go/protocols/horizon/effects"
"github.com/stellar/go/protocols/horizon/operations"
tfeth "github.com/threefoldtech/eth-bridge/api"
"github.com/threefoldtech/eth-bridge/api/bridge/contract"
)
const ERC20AddressLength = 20
type ERC20Address [ERC20AddressLength]byte
var (
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
)
const (
// retryDelay is the delay to retry calls when there are no peers
retryDelay = time.Second * 15
)
// BridgeContract exposes a higher lvl api for specific contract bindings. In case of proxy contracts,
// the bridge needs to use the bindings of the implementation contract, but the address of the proxy.
type BridgeContract struct {
networkConfig tfeth.NetworkConfiguration // Ethereum network
networkName string
lc *LightClient
filter *contract.TokenFilterer
transactor *contract.TokenTransactor
caller *contract.TokenCaller
contract *bind.BoundContract
abi abi.ABI
wallet *stellarWallet
// cache some stats in case they might be usefull
head *types.Header // Current head header of the bridge
balance *big.Int // The current balance of the bridge (note: ethers only!)
nonce uint64 // Current pending nonce of the bridge
price *big.Int // Current gas price to issue funds with
lock sync.RWMutex // Lock protecting the bridge's internals
}
// GetContractAdress returns the address of this contract
func (bridge *BridgeContract) GetContractAdress() common.Address {
return bridge.networkConfig.ContractAddress
}
// NewBridgeContract creates a new wrapper for an allready deployed contract
func NewBridgeContract(networkName string, bootnodes []string, contractAddress string, port int, accountJSON, accountPass string, datadir string, cancel <-chan struct{}, stellarNetwork string, stellarSeed string) (*BridgeContract, error) {
// load correct network config
networkConfig, err := tfeth.GetEthNetworkConfiguration(networkName)
if err != nil {
return nil, err
}
// override contract address if it's provided
if contractAddress != "" {
networkConfig.ContractAddress = common.HexToAddress(contractAddress)
// TODO: validate ABI of contract,
// see https://github.com/threefoldtech/rivine-extension-erc20/issues/3
}
bootstrapNodes, err := networkConfig.GetBootnodes(bootnodes)
if err != nil {
return nil, err
}
lc, err := NewLightClient(LightClientConfig{
Port: port,
DataDir: datadir,
BootstrapNodes: bootstrapNodes,
NetworkName: networkConfig.NetworkName,
NetworkID: networkConfig.NetworkID,
GenesisBlock: networkConfig.GenesisBlock,
})
if err != nil {
return nil, err
}
err = lc.LoadAccount(accountJSON, accountPass)
if err != nil {
return nil, err
}
filter, err := contract.NewTokenFilterer(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
transactor, err := contract.NewTokenTransactor(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
caller, err := contract.NewTokenCaller(networkConfig.ContractAddress, lc.Client)
if err != nil {
return nil, err
}
contract, abi, err := bindTTFT20(networkConfig.ContractAddress, lc.Client, lc.Client, lc.Client)
if err != nil {
return nil, err
}
w := &stellarWallet{
network: stellarNetwork,
}
if stellarSeed != "" {
w.keypair, err = keypair.ParseFull(stellarSeed)
if err != nil {
return nil, err
}
}
return &BridgeContract{
networkName: networkName,
networkConfig: networkConfig,
lc: lc,
filter: filter,
transactor: transactor,
caller: caller,
contract: contract,
abi: abi,
wallet: w,
}, nil
}
// Close terminates the Ethereum connection and tears down the stack.
func (bridge *BridgeContract) Close() error {
return bridge.lc.Close()
}
// AccountAddress returns the account address of the bridge contract
func (bridge *BridgeContract) AccountAddress() (common.Address, error) {
return bridge.lc.AccountAddress()
}
// LightClient returns the LightClient driving this bridge contract
func (bridge *BridgeContract) LightClient() *LightClient {
return bridge.lc
}
// ABI returns the parsed and bound ABI driving this bridge contract
func (bridge *BridgeContract) ABI() abi.ABI {
return bridge.abi
}
// Refresh attempts to retrieve the latest header from the chain and extract the
// associated bridge balance and nonce for connectivity caching.
func (bridge *BridgeContract) Refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = bridge.lc.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
nonce uint64
price *big.Int
balance *big.Int
)
if price, err = bridge.lc.SuggestGasPrice(ctx); err != nil {
return err
}
if balance, err = bridge.lc.AccountBalanceAt(ctx, head.Number); err != nil {
return err
}
log.Debug(bridge.lc.account.account.Address.Hex())
// Everything succeeded, update the cached stats
bridge.lock.Lock()
bridge.head, bridge.balance = head, balance
bridge.price, bridge.nonce = price, nonce
bridge.lock.Unlock()
return nil
}
// Loop subscribes to new eth heads. If a new head is received, it is passed on the given channel,
// after which the internal stats are updated if no update is already in progress
func (bridge *BridgeContract) Loop(ch chan<- *types.Header) {
log.Debug("Subscribing to eth headers")
// channel to receive head updates from client on
heads := make(chan *types.Header, 16)
// subscribe to head upates
sub, err := bridge.lc.SubscribeNewHead(context.Background(), heads)
if err != nil {
log.Error("Failed to subscribe to head events", "err", err)
}
defer sub.Unsubscribe()
// channel so we can update the internal state from the heads
update := make(chan *types.Header)
go func() {
for head := range update {
// old heads should be ignored during a chain sync after some downtime
if err := bridge.Refresh(head); err != nil {
log.Warn("Failed to update state", "block", head.Number, "err", err)
}
log.Debug("Internal stats updated", "block", head.Number, "account balance", bridge.balance, "gas price", bridge.price, "nonce", bridge.nonce)
}
}()
for head := range heads {
ch <- head
select {
// only process new head if another isn't being processed yet
case update <- head:
log.Debug("Processing new head")
default:
log.Debug("Ignoring current head, update already in progress")
}
}
log.Error("Bridge state update loop ended")
}
// SubscribeTransfers subscribes to new Transfer events on the given contract. This call blocks
// and prints out info about any transfer as it happened
func (bridge *BridgeContract) SubscribeTransfers() error {
sink := make(chan *contract.TokenTransfer)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchTransfer(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case transfer := <-sink:
log.Debug("Noticed transfer event", "from", transfer.From, "to", transfer.To, "amount", transfer.Tokens)
}
}
}
// SubscribeMint subscribes to new Mint events on the given contract. This call blocks
// and prints out info about any mint as it happened
func (bridge *BridgeContract) SubscribeMint() error {
sink := make(chan *contract.TokenMint)
opts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.filter.WatchMint(opts, sink, nil, nil)
if err != nil {
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case mint := <-sink:
log.Info("Noticed mint event", "receiver", mint.Receiver, "amount", mint.Tokens, "TFT tx id", mint.Txid)
}
}
}
// WithdrawEvent holds relevant information about a withdraw event
type WithdrawEvent struct {
receiver common.Address
amount *big.Int
blockchain_address string
network string
txHash common.Hash
blockHash common.Hash
blockHeight uint64
raw []byte
}
// Receiver of the withdraw
func (w WithdrawEvent) Receiver() common.Address {
return w.receiver
}
// Amount withdrawn
func (w WithdrawEvent) Amount() *big.Int {
return w.amount
}
// Blockchain address to withdraw to
func (w WithdrawEvent) BlockchainAddress() string {
return w.blockchain_address
}
// Network to withdraw to
func (w WithdrawEvent) Network() string |
// TxHash hash of the transaction
func (w WithdrawEvent) TxHash() common.Hash {
return w.txHash
}
// BlockHash of the containing block
func (w WithdrawEvent) BlockHash() common.Hash {
return w.blockHash
}
// BlockHeight of the containing block
func (w WithdrawEvent) BlockHeight() uint64 {
return w.blockHeight
}
// SubscribeWithdraw subscribes to new Withdraw events on the given contract. This call blocks
// and prints out info about any withdraw as it happened
func (bridge *BridgeContract) SubscribeWithdraw(wc chan<- WithdrawEvent, startHeight uint64) error {
log.Debug("Subscribing to withdraw events", "start height", startHeight)
sink := make(chan *contract.TokenWithdraw)
watchOpts := &bind.WatchOpts{Context: context.Background(), Start: nil}
sub, err := bridge.WatchWithdraw(watchOpts, sink, nil)
if err != nil {
log.Error("Subscribing to withdraw events failed", "err", err)
return err
}
defer sub.Unsubscribe()
for {
select {
case err = <-sub.Err():
return err
case withdraw := <-sink:
if withdraw.Raw.Removed {
// ignore removed events
continue
}
log.Debug("Noticed withdraw event", "receiver", withdraw.Receiver, "amount", withdraw.Tokens)
wc <- WithdrawEvent{
receiver: withdraw.Receiver,
amount: withdraw.Tokens,
txHash: withdraw.Raw.TxHash,
blockHash: withdraw.Raw.BlockHash,
blockHeight: withdraw.Raw.BlockNumber,
blockchain_address: withdraw.BlockchainAddress,
network: withdraw.Network,
raw: withdraw.Raw.Data,
}
}
}
}
// WatchWithdraw is a free log subscription operation binding the contract event 0x884edad9ce6fa2440d8a54cc123490eb96d2768479d49ff9c7366125a9424364.
//
// Solidity: e Withdraw(receiver indexed address, tokens uint256)
//
// This method is copied from the generated bindings and slightly modified, so we can add logic to stay backwards compatible with the old withdraw event signature
func (bridge *BridgeContract) WatchWithdraw(opts *bind.WatchOpts, sink chan<- *contract.TokenWithdraw, receiver []common.Address) (event.Subscription, error) {
var receiverRule []interface{}
for _, receiverItem := range receiver {
receiverRule = append(receiverRule, receiverItem)
}
logs, sub, err := bridge.contract.WatchLogs(opts, "Withdraw", receiverRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(contract.TokenWithdraw)
if err := bridge.contract.UnpackLog(event, "Withdraw", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// TransferFunds transfers funds from one address to another
func (bridge *BridgeContract) TransferFunds(recipient common.Address, amount *big.Int) error {
err := bridge.transferFunds(recipient, amount)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.transferFunds(recipient, amount)
}
return err
}
func (bridge *BridgeContract) transferFunds(recipient common.Address, amount *big.Int) error {
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 0, GasPrice: nil,
}
_, err = bridge.transactor.Transfer(opts, recipient, amount)
return err
}
func (bridge *BridgeContract) Mint(receiver ERC20Address, amount *big.Int, txID string) error {
err := bridge.mint(receiver, amount, txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
err = bridge.mint(receiver, amount, txID)
}
return err
}
func (bridge *BridgeContract) mint(receiver ERC20Address, amount *big.Int, txID string) error {
log.Debug("Calling mint function in contract")
if amount == nil {
return errors.New("invalid amount")
}
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return err
}
// TODO estimate gas more correctly ..
gas, err := bridge.lc.SuggestGasPrice(context.Background())
if err != nil {
return err
}
newGas := big.NewInt(10 * gas.Int64())
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.TransactOpts{
Context: ctx, From: accountAddress,
Signer: bridge.getSignerFunc(),
Value: nil, Nonce: nil, GasLimit: 100000, GasPrice: newGas,
}
_, err = bridge.transactor.MintTokens(opts, common.Address(receiver), amount, txID)
return err
}
func (bridge *BridgeContract) IsMintTxID(txID string) (bool, error) {
res, err := bridge.isMintTxID(txID)
for IsNoPeerErr(err) {
time.Sleep(retryDelay)
res, err = bridge.isMintTxID(txID)
}
return res, err
}
func (bridge *BridgeContract) isMintTxID(txID string) (bool, error) {
log.Debug("Calling isMintID")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.IsMintID(opts, txID)
}
func (bridge *BridgeContract) getSignerFunc() bind.SignerFn {
return func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) {
accountAddress, err := bridge.lc.AccountAddress()
if err != nil {
return nil, err
}
if address != accountAddress {
return nil, errors.New("not authorized to sign this account")
}
networkID := int64(bridge.networkConfig.NetworkID)
return bridge.lc.SignTx(tx, big.NewInt(networkID))
}
}
func (bridge *BridgeContract) TokenBalance(address common.Address) (*big.Int, error) {
log.Debug("Calling TokenBalance function in contract")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
return bridge.caller.BalanceOf(opts, common.Address(address))
}
func (bridge *BridgeContract) EthBalance() (*big.Int, error) {
err := bridge.Refresh(nil) // force a refresh
return bridge.balance, err
}
// bindTTFT20 binds a generic wrapper to an already deployed contract.
//
// This method is copied from the generated bindings as a convenient way to get a *bind.Contract, as this is needed to implement the WatchWithdraw function ourselves
func bindTTFT20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, abi.ABI, error) {
parsed, err := abi.JSON(strings.NewReader(contract.TokenABI))
if err != nil {
return nil, parsed, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), parsed, nil
}
// GetHorizonClient gets the horizon client based on the wallet's network
func (b *BridgeContract) GetHorizonClient() (*horizonclient.Client, error) {
switch b.networkName {
case "smart-chain-testnet":
return horizonclient.DefaultTestNetClient, nil
case "main":
return horizonclient.DefaultPublicNetClient, nil
default:
return nil, errors.New("network is not supported")
}
}
func (b *BridgeContract) StreamStellarAccountPayments(ctx context.Context, accountID string, handler func(op operations.Operation)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.OperationRequest{
ForAccount: accountID,
}
return client.StreamPayments(ctx, opRequest, handler)
}
func (b *BridgeContract) StreamStellarAccountTransactions(ctx context.Context, accountID string, handler func(op hProtocol.Transaction)) error {
client, err := b.GetHorizonClient()
if err != nil {
return err
}
opRequest := horizonclient.TransactionRequest{
ForAccount: accountID,
}
return client.StreamTransactions(ctx, opRequest, handler)
}
func (b *BridgeContract) GetTransactionEffects(txHash string) (effects effects.EffectsPage, err error) {
client, err := b.GetHorizonClient()
if err != nil {
return effects, err
}
effectsReq := horizonclient.EffectRequest{
ForTransaction: txHash,
}
effects, err = client.Effects(effectsReq)
if err != nil {
return effects, err
}
return effects, nil
}
| {
return w.network
} | identifier_body |
train_fcn.py | import math, datetime, os
from FCN import *
from voxnet import VoxNet
from fmri_data import fMRI_data
from config import cfg
import time
from evaluation import *
from sklearn import svm
def | (data_index=None,cut_shape=None,data_type=['MCIc','MCInc'],pre_dir='/home/anzeng/rhb/fmri_data',
num_batches = 256*5,voxnet_point=None,test_size = 6,brain_map=[217]):
# fr = open(cfg.output, 'w')
tf.reset_default_graph()
time_dim = 80 # 挑选时间片个数
batch_size = 8
dataset = fMRI_data(data_type,data_index=data_index,varbass=False,dir=pre_dir)
#SVM index
#########################
svm_index = {}
train_len = 0
test_len = 0
for d_type in data_type:
t_dir = os.path.join(pre_dir,d_type)
t_len = os.listdir(t_dir)
t_len = len(t_len)
train_index = list(range(t_len))
test_index = data_index[d_type]['test']
for x in test_index:
train_index.remove(x)
_index = {'train':train_index,'test':test_index}
train_len += len(train_index)
test_len += len(test_index)
svm_index[d_type] = _index
print(train_len)
print(test_len)
print(svm_index)
svm_dataset = fMRI_data(data_type,data_index = svm_index,varbass=False,dir=pre_dir)
##########################
xyz = 32
input_shape = [None,xyz,xyz,xyz,1]
# for i in range(3):
# input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
# input_shape.append(1)
# print(input_shape)
voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
FCNs = Classifier_FCN(tf.placeholder(tf.float32,[None,time_dim,50]),nb_classes=2)
data_value = [[1], [1]]
# 创建数据
p = dict() # placeholders
p['labels'] = tf.placeholder(tf.float32, [None, 2])
p['data_value'] = tf.placeholder(tf.float32, [2, 1])
p['Weight'] = tf.matmul(p['labels'], p['data_value'])
p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(logits=FCNs[-2], labels=p['labels'])
p['Weight'] = tf.reshape(p['Weight'], [-1])
p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
p['loss'] = tf.reduce_mean(p['x_loss'])
p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])
p['prediction'] = tf.argmax(FCNs[-1],1)
p['y_true'] = tf.argmax(p['labels'],1)
p['correct_prediction'] = tf.equal(p['prediction'], p['y_true'])
p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'], tf.float32))
p['learning_rate'] = tf.placeholder(tf.float32)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
p['train'] = tf.train.AdamOptimizer(p['learning_rate'], epsilon=1e-3).minimize(p['loss'])
p['weights_decay'] = tf.train.GradientDescentOptimizer(p['learning_rate']).minimize(p['l2_loss'])
# p['test_error'] = tf.placeholder(tf.float32)
# 超参数设置
initial_learning_rate = 0.01
min_learning_rate = 0.000001
learning_rate_decay_limit = 0.0001
num_batches_per_epoch = len(dataset.train) / float(batch_size)
learning_decay = 10 * num_batches_per_epoch
weights_decay_after = 5 * num_batches_per_epoch
checkpoint_num = 0
learning_step = 0
min_loss = 1e308
if voxnet_point:
cfg.voxnet_checkpoint = voxnet_point
accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
if not os.path.isdir(cfg.fcn_checkpoint_dir):
os.mkdir(cfg.fcn_checkpoint_dir)
if not os.path.exists(accuracy_filename):
with open(accuracy_filename, 'a') as f:
f.write('')
with open(accuracy_filename,'a') as f:
f.write(str(brain_map)+'\n')
#返回值
test_evaluation = evaluation()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
#voxnet赋值
input_shape[0]=1
voxnet_data = np.ones(input_shape,np.float32)
input_shape[0]=-1
for batch_index in range(num_batches):
start = time.time()
# learning_rate = max(min_learning_rate,
# initial_learning_rate * 0.5 ** (learning_step / learning_decay))
learning_rate = 0.0001
learning_step += 1
if batch_index > weights_decay_after and batch_index % 256 == 0:
session.run(p['weights_decay'], feed_dict=feed_dict)
voxs, labels = dataset.train.oversampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:0.7, p['labels']: labels,
p['learning_rate']: learning_rate, FCNs.training: True,p['data_value']:data_value}
session.run(p['train'], feed_dict=feed_dict)
if batch_index and batch_index % 32 == 0:
print("{} batch: {}".format(datetime.datetime.now(), batch_index))
print('learning rate: {}'.format(learning_rate))
# fr.write("{} batch: {}".format(datetime.datetime.now(), batch_index))
# fr.write('learning rate: {}'.format(learning_rate))
feed_dict[FCNs.training] = False
loss = session.run(p['loss'], feed_dict=feed_dict)
print('loss: {}'.format(loss))
if (batch_index and loss > 1.5 * min_loss and
learning_rate > learning_rate_decay_limit):
min_loss = loss
learning_step *= 1.2
print("decreasing learning rate...")
min_loss = min(loss, min_loss)
if batch_index and batch_index % 16 == 0:
num_accuracy_batches = 20
train_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs, voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0,p['labels']: labels, FCNs.training: False}
predictions, y_true = session.run([p['prediction'], p['y_true']], feed_dict=feed_dict)
train_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print('training accuracy \n' + str(train_evaluation))
num_accuracy_batches = test_size
test_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
predictions,y_true = session.run([p['prediction'],p['y_true']], feed_dict=feed_dict)
test_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print(test_evaluation)
print('test accuracy \n'+str(test_evaluation))
with open(accuracy_filename, 'a') as f:
f.write('checkpoint_num:' + str(checkpoint_num) + ':\n')
f.write('train:\n' + str(train_evaluation) + '\n')
f.write('test:\n' + str(test_evaluation) + '\n')
if batch_index % 64 or train_evaluation.ACC >= 0.8 == 0:
######SVM分类器####################
svm_feature = np.zeros((train_len+test_len,128))
svm_label = np.zeros(train_len+test_len)
for x in range(train_len):
voxs, labels = svm_dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1,128])
svm_feature[x] = feature
# print(svm_feature[x])
svm_label[x] = y_true
for x in range(test_len):
voxs, labels = svm_dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1, 128])
svm_feature[train_len + x] = feature
svm_label[train_len + x] = y_true
# print(svm_feature[0:train_len])
# print(svm_label[0:train_len])
clf = svm.SVC(C=1.0,kernel='rbf',gamma='auto')
clf.fit(svm_feature[0:train_len],svm_label[0:train_len])
predictions = clf.predict(svm_feature)
svm_train_evaluation = evaluation(y_true=svm_label[:train_len],y_predict=predictions[:train_len])
svm_test_evaluation = evaluation(y_true=svm_label[train_len:],y_predict=predictions[train_len:])
print('svm_train:\n'+str(svm_train_evaluation))
print('svm_test:\n' + str(svm_test_evaluation))
with open(accuracy_filename,'a') as f:
f.write('svm_train:\n' + str(svm_train_evaluation) + '\n')
f.write('svm_test:\n' + str(svm_test_evaluation) + '\n')
#################################################
# fr.write('test accuracy: {}'.format(test_accuracy))
if batch_index % 128 == 0 or train_evaluation.ACC >= 0.85:
print('saving checkpoint {}...'.format(checkpoint_num))
filename = 'cx-{}.npz'.format(checkpoint_num)
filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
FCNs.npz_saver.save(session, filename)
print('checkpoint saved!')
checkpoint_num += 1
if train_evaluation.ACC >= 0.85:
break
end = time.time()
print('time:',(end-start)/60)
return test_evaluation
if __name__ == '__main__':
tf.app.run()
| main | identifier_name |
train_fcn.py | import math, datetime, os
from FCN import *
from voxnet import VoxNet
from fmri_data import fMRI_data
from config import cfg
import time
from evaluation import *
from sklearn import svm
def main(data_index=None,cut_shape=None,data_type=['MCIc','MCInc'],pre_dir='/home/anzeng/rhb/fmri_data',
num_batches = 256*5,voxnet_point=None,test_size = 6,brain_map=[217]):
# fr = open(cfg.output, 'w')
tf.reset_default_graph()
time_dim = 80 # 挑选时间片个数
batch_size = 8
dataset = fMRI_data(data_type,data_index=data_index,varbass=False,dir=pre_dir)
#SVM index
#########################
svm_index = {}
train_len = 0
test_len = 0
for d_type in data_type:
t_dir = os.path.join(pre_dir,d_type)
t_len = os.listdir(t_dir)
t_len = len(t_len)
train_index = list(range(t_len))
test_index = data_index[d_type]['test']
for x in test_index:
train_index.remove(x)
_index = {'train':train_index,'test':test_index}
train_len += len(train_index)
test_len += len(test_index)
svm_index[d_type] = _index
print(train_len)
print(test_len)
print(svm_index)
svm_dataset = fMRI_data(data_type,data_index = svm_index,varbass=False,dir=pre_dir)
##########################
xyz = 32
input_shape = [None,xyz,xyz,xyz,1]
# for i in range(3):
# input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
# input_shape.append(1)
# print(input_shape)
voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
FCNs = Classifier_FCN(tf.placeholder(tf.float32,[None,time_dim,50]),nb_classes=2)
data_value = [[1], [1]]
# 创建数据
p = dict() # placeholders
p['labels'] = tf.placeholder(tf.float32, [None, 2])
p['data_value'] = tf.placeholder(tf.float32, [2, 1])
p['Weight'] = tf.matmul(p['labels'], p['data_value'])
p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(logits=FCNs[-2], labels=p['labels'])
p['Weight'] = tf.reshape(p['Weight'], [-1])
p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
p['loss'] = tf.reduce_mean(p['x_loss'])
p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])
p['prediction'] = tf.argmax(FCNs[-1],1)
p['y_true'] = tf.argmax(p['labels'],1)
p['correct_prediction'] = tf.equal(p['prediction'], p['y_true'])
p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'], tf.float32))
p['learning_rate'] = tf.placeholder(tf.float32)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
p['train'] = tf.train.AdamOptimizer(p['learning_rate'], epsilon=1e-3).minimize(p['loss'])
p['weights_decay'] = tf.train.GradientDescentOptimizer(p['learning_rate']).minimize(p['l2_loss'])
# p['test_error'] = tf.placeholder(tf.float32)
# 超参数设置
initial_learning_rate = 0.01
min_learning_rate = 0.000001
learning_rate_decay_limit = 0.0001
num_batches_per_epoch = len(dataset.train) / float(batch_size)
learning_decay = 10 * num_batches_per_epoch
weights_decay_after = 5 * num_batches_per_epoch
checkpoint_num = 0
learning_step = 0
min_loss = 1e308
if voxnet_point:
cfg.voxnet_checkpoint = voxnet_point
accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
if not os.path.isdir(cfg.fcn_checkpoint_dir):
os.mkdir(cfg.fcn_checkpoint_dir)
if not os.path.exists(accuracy_filename):
with open(accuracy_filename, 'a') as f:
f.write('')
with open(accuracy_filename,'a') as f:
f.write(str(brain_map)+'\n')
#返回值
test_evaluation = evaluation()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
#voxnet赋值
input_shape[0]=1
voxnet_data = np.ones(input_shape,np.float32)
input_shape[0]=-1
for batch_index in range(num_batches):
start = time.time()
# learning_rate = max(min_learning_rate,
# initial_learning_rate * 0.5 ** (learning_step / learning_decay))
learning_rate = 0.0001
learning_step += 1
if batch_index > weights_decay_after and batch_index % 256 == 0:
session.run(p['weights_decay'], feed_dict=feed_dict)
voxs, labels = dataset.train.oversampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:0.7, p['labels']: labels,
p['learning_rate']: learning_rate, FCNs.training: True,p['data_value']:data_value}
session.run(p['train'], feed_dict=feed_dict)
if batch_index and batch_index % 32 == 0:
print("{} batch: {}".format(datetime.datetime.now(), batch_index))
print('learning rate: {}'.format(learning_rate))
# fr.write("{} batch: {}".format(datetime.datetime.now(), batch_index))
# fr.write('learning rate: {}'.format(learning_rate))
feed_dict[FCNs.training] = False
loss = session.run(p['loss'], feed_dict=feed_dict)
print('loss: {}'.format(loss))
if (batch_index and loss > 1.5 * min_loss and
learning_rate > learning_rate_decay_limit):
min_loss = loss
learning_step *= 1.2
print("decreasing learning rate...")
min_loss = min(loss, min_loss)
if batch_index and batch_index % 16 == 0:
num_accuracy_batches = 20
| ('time:',(end-start)/60)
return test_evaluation
if __name__ == '__main__':
tf.app.run()
| train_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs, voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0,p['labels']: labels, FCNs.training: False}
predictions, y_true = session.run([p['prediction'], p['y_true']], feed_dict=feed_dict)
train_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print('training accuracy \n' + str(train_evaluation))
num_accuracy_batches = test_size
test_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
predictions,y_true = session.run([p['prediction'],p['y_true']], feed_dict=feed_dict)
test_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print(test_evaluation)
print('test accuracy \n'+str(test_evaluation))
with open(accuracy_filename, 'a') as f:
f.write('checkpoint_num:' + str(checkpoint_num) + ':\n')
f.write('train:\n' + str(train_evaluation) + '\n')
f.write('test:\n' + str(test_evaluation) + '\n')
if batch_index % 64 or train_evaluation.ACC >= 0.8 == 0:
######SVM分类器####################
svm_feature = np.zeros((train_len+test_len,128))
svm_label = np.zeros(train_len+test_len)
for x in range(train_len):
voxs, labels = svm_dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1,128])
svm_feature[x] = feature
# print(svm_feature[x])
svm_label[x] = y_true
for x in range(test_len):
voxs, labels = svm_dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1, 128])
svm_feature[train_len + x] = feature
svm_label[train_len + x] = y_true
# print(svm_feature[0:train_len])
# print(svm_label[0:train_len])
clf = svm.SVC(C=1.0,kernel='rbf',gamma='auto')
clf.fit(svm_feature[0:train_len],svm_label[0:train_len])
predictions = clf.predict(svm_feature)
svm_train_evaluation = evaluation(y_true=svm_label[:train_len],y_predict=predictions[:train_len])
svm_test_evaluation = evaluation(y_true=svm_label[train_len:],y_predict=predictions[train_len:])
print('svm_train:\n'+str(svm_train_evaluation))
print('svm_test:\n' + str(svm_test_evaluation))
with open(accuracy_filename,'a') as f:
f.write('svm_train:\n' + str(svm_train_evaluation) + '\n')
f.write('svm_test:\n' + str(svm_test_evaluation) + '\n')
#################################################
# fr.write('test accuracy: {}'.format(test_accuracy))
if batch_index % 128 == 0 or train_evaluation.ACC >= 0.85:
print('saving checkpoint {}...'.format(checkpoint_num))
filename = 'cx-{}.npz'.format(checkpoint_num)
filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
FCNs.npz_saver.save(session, filename)
print('checkpoint saved!')
checkpoint_num += 1
if train_evaluation.ACC >= 0.85:
break
end = time.time()
print | conditional_block |
train_fcn.py | import math, datetime, os
from FCN import *
from voxnet import VoxNet
from fmri_data import fMRI_data
from config import cfg
import time
from evaluation import *
from sklearn import svm
def main(data_index=None,cut_shape=None,data_type=['MCIc','MCInc'],pre_dir='/home/anzeng/rhb/fmri_data',
num_batches = 256*5,voxnet_point=None,test_size = 6,brain_map=[217]):
# fr = open(cfg.output, 'w')
| tf.reset_default_graph()
time_dim = 80 # 挑选时间片个数
batch_size = 8
dataset = fMRI_data(data_type,data_index=data_index,varbass=False,dir=pre_dir)
#SVM index
#########################
svm_index = {}
train_len = 0
test_len = 0
for d_type in data_type:
t_dir = os.path.join(pre_dir,d_type)
t_len = os.listdir(t_dir)
t_len = len(t_len)
train_index = list(range(t_len))
test_index = data_index[d_type]['test']
for x in test_index:
train_index.remove(x)
_index = {'train':train_index,'test':test_index}
train_len += len(train_index)
test_len += len(test_index)
svm_index[d_type] = _index
print(train_len)
print(test_len)
print(svm_index)
svm_dataset = fMRI_data(data_type,data_index = svm_index,varbass=False,dir=pre_dir)
##########################
xyz = 32
input_shape = [None,xyz,xyz,xyz,1]
# for i in range(3):
# input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
# input_shape.append(1)
# print(input_shape)
voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
FCNs = Classifier_FCN(tf.placeholder(tf.float32,[None,time_dim,50]),nb_classes=2)
data_value = [[1], [1]]
# 创建数据
p = dict() # placeholders
p['labels'] = tf.placeholder(tf.float32, [None, 2])
p['data_value'] = tf.placeholder(tf.float32, [2, 1])
p['Weight'] = tf.matmul(p['labels'], p['data_value'])
p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(logits=FCNs[-2], labels=p['labels'])
p['Weight'] = tf.reshape(p['Weight'], [-1])
p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
p['loss'] = tf.reduce_mean(p['x_loss'])
p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])
p['prediction'] = tf.argmax(FCNs[-1],1)
p['y_true'] = tf.argmax(p['labels'],1)
p['correct_prediction'] = tf.equal(p['prediction'], p['y_true'])
p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'], tf.float32))
p['learning_rate'] = tf.placeholder(tf.float32)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
p['train'] = tf.train.AdamOptimizer(p['learning_rate'], epsilon=1e-3).minimize(p['loss'])
p['weights_decay'] = tf.train.GradientDescentOptimizer(p['learning_rate']).minimize(p['l2_loss'])
# p['test_error'] = tf.placeholder(tf.float32)
# 超参数设置
initial_learning_rate = 0.01
min_learning_rate = 0.000001
learning_rate_decay_limit = 0.0001
num_batches_per_epoch = len(dataset.train) / float(batch_size)
learning_decay = 10 * num_batches_per_epoch
weights_decay_after = 5 * num_batches_per_epoch
checkpoint_num = 0
learning_step = 0
min_loss = 1e308
if voxnet_point:
cfg.voxnet_checkpoint = voxnet_point
accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
if not os.path.isdir(cfg.fcn_checkpoint_dir):
os.mkdir(cfg.fcn_checkpoint_dir)
if not os.path.exists(accuracy_filename):
with open(accuracy_filename, 'a') as f:
f.write('')
with open(accuracy_filename,'a') as f:
f.write(str(brain_map)+'\n')
#返回值
test_evaluation = evaluation()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
#voxnet赋值
input_shape[0]=1
voxnet_data = np.ones(input_shape,np.float32)
input_shape[0]=-1
for batch_index in range(num_batches):
start = time.time()
# learning_rate = max(min_learning_rate,
# initial_learning_rate * 0.5 ** (learning_step / learning_decay))
learning_rate = 0.0001
learning_step += 1
if batch_index > weights_decay_after and batch_index % 256 == 0:
session.run(p['weights_decay'], feed_dict=feed_dict)
voxs, labels = dataset.train.oversampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:0.7, p['labels']: labels,
p['learning_rate']: learning_rate, FCNs.training: True,p['data_value']:data_value}
session.run(p['train'], feed_dict=feed_dict)
if batch_index and batch_index % 32 == 0:
print("{} batch: {}".format(datetime.datetime.now(), batch_index))
print('learning rate: {}'.format(learning_rate))
# fr.write("{} batch: {}".format(datetime.datetime.now(), batch_index))
# fr.write('learning rate: {}'.format(learning_rate))
feed_dict[FCNs.training] = False
loss = session.run(p['loss'], feed_dict=feed_dict)
print('loss: {}'.format(loss))
if (batch_index and loss > 1.5 * min_loss and
learning_rate > learning_rate_decay_limit):
min_loss = loss
learning_step *= 1.2
print("decreasing learning rate...")
min_loss = min(loss, min_loss)
if batch_index and batch_index % 16 == 0:
num_accuracy_batches = 20
train_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs, voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0,p['labels']: labels, FCNs.training: False}
predictions, y_true = session.run([p['prediction'], p['y_true']], feed_dict=feed_dict)
train_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print('training accuracy \n' + str(train_evaluation))
num_accuracy_batches = test_size
test_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
predictions,y_true = session.run([p['prediction'],p['y_true']], feed_dict=feed_dict)
test_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print(test_evaluation)
print('test accuracy \n'+str(test_evaluation))
with open(accuracy_filename, 'a') as f:
f.write('checkpoint_num:' + str(checkpoint_num) + ':\n')
f.write('train:\n' + str(train_evaluation) + '\n')
f.write('test:\n' + str(test_evaluation) + '\n')
if batch_index % 64 or train_evaluation.ACC >= 0.8 == 0:
######SVM分类器####################
svm_feature = np.zeros((train_len+test_len,128))
svm_label = np.zeros(train_len+test_len)
for x in range(train_len):
voxs, labels = svm_dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1,128])
svm_feature[x] = feature
# print(svm_feature[x])
svm_label[x] = y_true
for x in range(test_len):
voxs, labels = svm_dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1, 128])
svm_feature[train_len + x] = feature
svm_label[train_len + x] = y_true
# print(svm_feature[0:train_len])
# print(svm_label[0:train_len])
clf = svm.SVC(C=1.0,kernel='rbf',gamma='auto')
clf.fit(svm_feature[0:train_len],svm_label[0:train_len])
predictions = clf.predict(svm_feature)
svm_train_evaluation = evaluation(y_true=svm_label[:train_len],y_predict=predictions[:train_len])
svm_test_evaluation = evaluation(y_true=svm_label[train_len:],y_predict=predictions[train_len:])
print('svm_train:\n'+str(svm_train_evaluation))
print('svm_test:\n' + str(svm_test_evaluation))
with open(accuracy_filename,'a') as f:
f.write('svm_train:\n' + str(svm_train_evaluation) + '\n')
f.write('svm_test:\n' + str(svm_test_evaluation) + '\n')
#################################################
# fr.write('test accuracy: {}'.format(test_accuracy))
if batch_index % 128 == 0 or train_evaluation.ACC >= 0.85:
print('saving checkpoint {}...'.format(checkpoint_num))
filename = 'cx-{}.npz'.format(checkpoint_num)
filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
FCNs.npz_saver.save(session, filename)
print('checkpoint saved!')
checkpoint_num += 1
if train_evaluation.ACC >= 0.85:
break
end = time.time()
print('time:',(end-start)/60)
return test_evaluation
if __name__ == '__main__':
tf.app.run()
| identifier_body | |
train_fcn.py | import math, datetime, os
from FCN import *
from voxnet import VoxNet
from fmri_data import fMRI_data
from config import cfg
import time
from evaluation import *
from sklearn import svm
def main(data_index=None,cut_shape=None,data_type=['MCIc','MCInc'],pre_dir='/home/anzeng/rhb/fmri_data',
num_batches = 256*5,voxnet_point=None,test_size = 6,brain_map=[217]):
# fr = open(cfg.output, 'w')
tf.reset_default_graph()
time_dim = 80 # 挑选时间片个数
batch_size = 8
dataset = fMRI_data(data_type,data_index=data_index,varbass=False,dir=pre_dir)
#SVM index
#########################
svm_index = {}
train_len = 0
test_len = 0
for d_type in data_type:
t_dir = os.path.join(pre_dir,d_type)
t_len = os.listdir(t_dir)
t_len = len(t_len)
train_index = list(range(t_len))
test_index = data_index[d_type]['test']
for x in test_index:
train_index.remove(x)
_index = {'train':train_index,'test':test_index}
train_len += len(train_index)
test_len += len(test_index)
svm_index[d_type] = _index
print(train_len)
print(test_len)
print(svm_index)
svm_dataset = fMRI_data(data_type,data_index = svm_index,varbass=False,dir=pre_dir)
##########################
xyz = 32
input_shape = [None,xyz,xyz,xyz,1]
# for i in range(3):
# input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
# input_shape.append(1)
# print(input_shape)
voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
FCNs = Classifier_FCN(tf.placeholder(tf.float32,[None,time_dim,50]),nb_classes=2)
data_value = [[1], [1]]
# 创建数据
p = dict() # placeholders
p['labels'] = tf.placeholder(tf.float32, [None, 2])
p['data_value'] = tf.placeholder(tf.float32, [2, 1])
p['Weight'] = tf.matmul(p['labels'], p['data_value'])
p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(logits=FCNs[-2], labels=p['labels'])
p['Weight'] = tf.reshape(p['Weight'], [-1])
p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
p['loss'] = tf.reduce_mean(p['x_loss'])
p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])
p['prediction'] = tf.argmax(FCNs[-1],1)
p['y_true'] = tf.argmax(p['labels'],1)
p['correct_prediction'] = tf.equal(p['prediction'], p['y_true'])
p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'], tf.float32))
p['learning_rate'] = tf.placeholder(tf.float32)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
p['train'] = tf.train.AdamOptimizer(p['learning_rate'], epsilon=1e-3).minimize(p['loss'])
p['weights_decay'] = tf.train.GradientDescentOptimizer(p['learning_rate']).minimize(p['l2_loss'])
# p['test_error'] = tf.placeholder(tf.float32)
# 超参数设置
initial_learning_rate = 0.01
min_learning_rate = 0.000001
learning_rate_decay_limit = 0.0001
num_batches_per_epoch = len(dataset.train) / float(batch_size)
learning_decay = 10 * num_batches_per_epoch
weights_decay_after = 5 * num_batches_per_epoch
checkpoint_num = 0
learning_step = 0
min_loss = 1e308
if voxnet_point:
cfg.voxnet_checkpoint = voxnet_point
accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
if not os.path.isdir(cfg.fcn_checkpoint_dir):
os.mkdir(cfg.fcn_checkpoint_dir)
if not os.path.exists(accuracy_filename):
with open(accuracy_filename, 'a') as f:
f.write('')
with open(accuracy_filename,'a') as f:
f.write(str(brain_map)+'\n')
#返回值
test_evaluation = evaluation()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
#voxnet赋值
input_shape[0]=1
voxnet_data = np.ones(input_shape,np.float32)
input_shape[0]=-1
for batch_index in range(num_batches):
start = time.time()
# learning_rate = max(min_learning_rate,
# initial_learning_rate * 0.5 ** (learning_step / learning_decay))
learning_rate = 0.0001
learning_step += 1
| p['learning_rate']: learning_rate, FCNs.training: True,p['data_value']:data_value}
session.run(p['train'], feed_dict=feed_dict)
if batch_index and batch_index % 32 == 0:
print("{} batch: {}".format(datetime.datetime.now(), batch_index))
print('learning rate: {}'.format(learning_rate))
# fr.write("{} batch: {}".format(datetime.datetime.now(), batch_index))
# fr.write('learning rate: {}'.format(learning_rate))
feed_dict[FCNs.training] = False
loss = session.run(p['loss'], feed_dict=feed_dict)
print('loss: {}'.format(loss))
if (batch_index and loss > 1.5 * min_loss and
learning_rate > learning_rate_decay_limit):
min_loss = loss
learning_step *= 1.2
print("decreasing learning rate...")
min_loss = min(loss, min_loss)
if batch_index and batch_index % 16 == 0:
num_accuracy_batches = 20
train_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs, voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0,p['labels']: labels, FCNs.training: False}
predictions, y_true = session.run([p['prediction'], p['y_true']], feed_dict=feed_dict)
train_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print('training accuracy \n' + str(train_evaluation))
num_accuracy_batches = test_size
test_evaluation = evaluation()
for x in range(num_accuracy_batches):
voxs, labels = dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
predictions,y_true = session.run([p['prediction'],p['y_true']], feed_dict=feed_dict)
test_evaluation += evaluation(y_true=y_true, y_predict=predictions)
print(test_evaluation)
print('test accuracy \n'+str(test_evaluation))
with open(accuracy_filename, 'a') as f:
f.write('checkpoint_num:' + str(checkpoint_num) + ':\n')
f.write('train:\n' + str(train_evaluation) + '\n')
f.write('test:\n' + str(test_evaluation) + '\n')
if batch_index % 64 or train_evaluation.ACC >= 0.8 == 0:
######SVM分类器####################
svm_feature = np.zeros((train_len+test_len,128))
svm_label = np.zeros(train_len+test_len)
for x in range(train_len):
voxs, labels = svm_dataset.train.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1,128])
svm_feature[x] = feature
# print(svm_feature[x])
svm_label[x] = y_true
for x in range(test_len):
voxs, labels = svm_dataset.test.random_sampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size = 1)
feed_dict = {FCNs[0]: voxs, voxnet[0]: voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:1.0, p['labels']: labels, FCNs.training: False}
feature,y_true = session.run([FCNs['gap'],p['y_true']],feed_dict = feed_dict)
feature = np.reshape(feature,[1, 128])
svm_feature[train_len + x] = feature
svm_label[train_len + x] = y_true
# print(svm_feature[0:train_len])
# print(svm_label[0:train_len])
clf = svm.SVC(C=1.0,kernel='rbf',gamma='auto')
clf.fit(svm_feature[0:train_len],svm_label[0:train_len])
predictions = clf.predict(svm_feature)
svm_train_evaluation = evaluation(y_true=svm_label[:train_len],y_predict=predictions[:train_len])
svm_test_evaluation = evaluation(y_true=svm_label[train_len:],y_predict=predictions[train_len:])
print('svm_train:\n'+str(svm_train_evaluation))
print('svm_test:\n' + str(svm_test_evaluation))
with open(accuracy_filename,'a') as f:
f.write('svm_train:\n' + str(svm_train_evaluation) + '\n')
f.write('svm_test:\n' + str(svm_test_evaluation) + '\n')
#################################################
# fr.write('test accuracy: {}'.format(test_accuracy))
if batch_index % 128 == 0 or train_evaluation.ACC >= 0.85:
print('saving checkpoint {}...'.format(checkpoint_num))
filename = 'cx-{}.npz'.format(checkpoint_num)
filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
FCNs.npz_saver.save(session, filename)
print('checkpoint saved!')
checkpoint_num += 1
if train_evaluation.ACC >= 0.85:
break
end = time.time()
print('time:',(end-start)/60)
return test_evaluation
if __name__ == '__main__':
tf.app.run() | if batch_index > weights_decay_after and batch_index % 256 == 0:
session.run(p['weights_decay'], feed_dict=feed_dict)
voxs, labels = dataset.train.oversampling.get_time_batch(session,voxnet,cut_shape,time_dim=time_dim,batch_size=batch_size)
feed_dict = {FCNs[0]: voxs,voxnet[0]:voxnet_data,voxnet.keep_prob:1.0,FCNs.keep_prob:0.7, p['labels']: labels, | random_line_split |
emulatorlauncher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import time
import sys
from sys import exit
from Emulator import Emulator
from Evmapy import Evmapy
import generators
from generators.kodi.kodiGenerator import KodiGenerator
from generators.linapple.linappleGenerator import LinappleGenerator
from generators.libretro.libretroGenerator import LibretroGenerator
from generators.moonlight.moonlightGenerator import MoonlightGenerator
from generators.mupen.mupenGenerator import MupenGenerator
from generators.ppsspp.ppssppGenerator import PPSSPPGenerator
from generators.reicast.reicastGenerator import ReicastGenerator
from generators.flycast.flycastGenerator import FlycastGenerator
from generators.dolphin.dolphinGenerator import DolphinGenerator
from generators.pcsx2.pcsx2Generator import Pcsx2Generator
from generators.scummvm.scummvmGenerator import ScummVMGenerator
from generators.dosbox.dosboxGenerator import DosBoxGenerator
from generators.dosboxx.dosboxxGenerator import DosBoxxGenerator
from generators.vice.viceGenerator import ViceGenerator
from generators.fsuae.fsuaeGenerator import FsuaeGenerator
from generators.amiberry.amiberryGenerator import AmiberryGenerator
from generators.citra.citraGenerator import CitraGenerator
from generators.daphne.daphneGenerator import DaphneGenerator
from generators.cannonball.cannonballGenerator import CannonballGenerator
from generators.openbor.openborGenerator import OpenborGenerator
from generators.wine.wineGenerator import WineGenerator
from generators.cemu.cemuGenerator import CemuGenerator
from generators.melonds.melondsGenerator import MelonDSGenerator
from generators.rpcs3.rpcs3Generator import Rpcs3Generator
import controllersConfig as controllers
import signal
import batoceraFiles
import os
import subprocess
import json
import utils.videoMode as videoMode
from utils.logger import eslog
generators = {
'kodi': KodiGenerator(),
'linapple': LinappleGenerator(),
'libretro': LibretroGenerator(),
'moonlight': MoonlightGenerator(),
'scummvm': ScummVMGenerator(),
'dosbox': DosBoxGenerator(),
'dosboxx': DosBoxxGenerator(),
'mupen64plus': MupenGenerator(),
'vice': ViceGenerator(),
'fsuae': FsuaeGenerator(),
'amiberry': AmiberryGenerator(),
'reicast': ReicastGenerator(),
'flycast': FlycastGenerator(),
'dolphin': DolphinGenerator(),
'pcsx2': Pcsx2Generator(),
'ppsspp': PPSSPPGenerator(),
'citra' : CitraGenerator(),
'daphne' : DaphneGenerator(),
'cannonball' : CannonballGenerator(),
'openbor' : OpenborGenerator(),
'wine' : WineGenerator(),
'cemu' : CemuGenerator(),
'melonds' : MelonDSGenerator(),
'rpcs3' : Rpcs3Generator()
}
def main(args, maxnbplayers):
playersControllers = dict()
controllersInput = []
for p in range(1, maxnbplayers+1):
ci = {}
ci["index"] = getattr(args, "p{}index" .format(p))
ci["guid"] = getattr(args, "p{}guid" .format(p))
ci["name"] = str(getattr(args, "p{}name" .format(p))).replace("®", "®")
ci["devicepath"] = getattr(args, "p{}devicepath".format(p))
ci["nbbuttons"] = getattr(args, "p{}nbbuttons" .format(p))
ci["nbhats"] = getattr(args, "p{}nbhats" .format(p))
ci["nbaxes"] = getattr(args, "p{}nbaxes" .format(p))
controllersInput.append(ci)
# Read the controller configuration
playersControllers = controllers.loadControllerConfig(controllersInput)
# find the system to run
systemName = args.system
eslog.log("Running system: {}".format(systemName))
system = Emulator(systemName, args.rom)
system.config["emulator-forced"] = False
system.config["core-forced"] = False
if args.emulator is not None:
system.config["emulator"] = args.emulator
system.config["emulator-forced"] = True # tip to indicated that the emulator was forced
if args.core is not None:
system.config["core"] = args.core
system.config["core-forced"] = True
eslog.debug("Settings: {}".format(system.config))
if "emulator" in system.config and "core" in system.config:
eslog.log("emulator: {}, core: {}".format(system.config["emulator"], system.config["core"]))
else:
if "emulator" in system.config:
e |
# the resolution must be changed before configuration while the configuration may depend on it (ie bezels)
wantedGameMode = generators[system.config['emulator']].getResolutionMode(system.config)
systemMode = videoMode.getCurrentMode()
resolutionChanged = False
exitCode = -1
try:
eslog.log("current video mode: {}".format(systemMode))
eslog.log("wanted video mode: {}".format(wantedGameMode))
if wantedGameMode != 'default' and wantedGameMode != systemMode:
videoMode.changeMode(wantedGameMode)
resolutionChanged = True
gameResolution = videoMode.getCurrentResolution()
eslog.log("resolution: {}x{}".format(str(gameResolution["width"]), str(gameResolution["height"])))
# savedir: create the save directory if not already done
dirname = os.path.join(batoceraFiles.savesDir, system.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
# core
effectiveCore = ""
if "core" in system.config and system.config["core"] is not None:
effectiveCore = system.config["core"]
effectiveRom = ""
if args.rom is not None:
effectiveRom = args.rom
# network options
if args.netplaymode is not None:
system.config["netplay.mode"] = args.netplaymode
if args.netplayspectator is not None:
system.config["netplay.spectator"] = args.netplayspectator
if args.netplayip is not None:
system.config["netplay.server.ip"] = args.netplayip
if args.netplayport is not None:
system.config["netplay.server.port"] = args.netplayport
# run a script before emulator starts
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/userdata/system/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
# run the emulator
try:
Evmapy.start(systemName, system.config['emulator'], effectiveCore, effectiveRom, playersControllers)
exitCode = runCommand(generators[system.config['emulator']].generate(system, args.rom, playersControllers, gameResolution))
finally:
Evmapy.stop()
# run a script after emulator shuts down
callExternalScripts("/userdata/system/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
finally:
# always restore the resolution
if resolutionChanged:
try:
videoMode.changeMode(systemMode)
except Exception:
pass # don't fail
# exit
return exitCode
def callExternalScripts(folder, event, args):
if not os.path.isdir(folder):
return
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
callExternalScripts(os.path.join(folder, file), event, args)
else:
if os.access(os.path.join(folder, file), os.X_OK):
eslog.log("calling external script: " + str([os.path.join(folder, file), event] + args))
subprocess.call([os.path.join(folder, file), event] + args)
def runCommand(command):
global proc
command.env.update(os.environ)
eslog.log("command: {}".format(str(command)))
eslog.log("command: {}".format(str(command.array)))
eslog.log("env: {}".format(str(command.env)))
proc = subprocess.Popen(command.array, env=command.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exitcode = -1
try:
out, err = proc.communicate()
exitcode = proc.returncode
sys.stdout.write(out)
sys.stderr.write(err)
except:
eslog("emulator exited")
return exitcode
def signal_handler(signal, frame):
global proc
print('Exiting')
if proc:
print('killing proc')
proc.kill()
if __name__ == '__main__':
proc = None
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(description='emulator-launcher script')
maxnbplayers = 8
for p in range(1, maxnbplayers+1):
parser.add_argument("-p{}index" .format(p), help="player{} controller index" .format(p), type=int, required=False)
parser.add_argument("-p{}guid" .format(p), help="player{} controller SDL2 guid" .format(p), type=str, required=False)
parser.add_argument("-p{}name" .format(p), help="player{} controller name" .format(p), type=str, required=False)
parser.add_argument("-p{}devicepath".format(p), help="player{} controller device" .format(p), type=str, required=False)
parser.add_argument("-p{}nbbuttons" .format(p), help="player{} controller number of buttons".format(p), type=str, required=False)
parser.add_argument("-p{}nbhats" .format(p), help="player{} controller number of hats" .format(p), type=str, required=False)
parser.add_argument("-p{}nbaxes" .format(p), help="player{} controller number of axes" .format(p), type=str, required=False)
parser.add_argument("-system", help="select the system to launch", type=str, required=True)
parser.add_argument("-rom", help="rom absolute path", type=str, required=True)
parser.add_argument("-emulator", help="force emulator", type=str, required=False)
parser.add_argument("-core", help="force emulator core", type=str, required=False)
parser.add_argument("-netplaymode", help="host/client", type=str, required=False)
parser.add_argument("-netplayspectator", help="enable spectator mode", default=False, action='store_true', required=False)
parser.add_argument("-netplayip", help="remote ip", type=str, required=False)
parser.add_argument("-netplayport", help="remote port", type=str, required=False)
args = parser.parse_args()
try:
exitcode = -1
exitcode = main(args, maxnbplayers)
except Exception as e:
eslog.error("configgen exception: ", exc_info=True)
time.sleep(1) # this seems to be required so that the gpu memory is restituated and available for es
eslog.log("Exiting configgen with status {}".format(str(exitcode)))
exit(exitcode)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| slog.log("emulator: {}".format(system.config["emulator"]))
| conditional_block |
emulatorlauncher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import time
import sys
from sys import exit
from Emulator import Emulator
from Evmapy import Evmapy
import generators
from generators.kodi.kodiGenerator import KodiGenerator
from generators.linapple.linappleGenerator import LinappleGenerator
from generators.libretro.libretroGenerator import LibretroGenerator
from generators.moonlight.moonlightGenerator import MoonlightGenerator
from generators.mupen.mupenGenerator import MupenGenerator
from generators.ppsspp.ppssppGenerator import PPSSPPGenerator
from generators.reicast.reicastGenerator import ReicastGenerator
from generators.flycast.flycastGenerator import FlycastGenerator
from generators.dolphin.dolphinGenerator import DolphinGenerator
from generators.pcsx2.pcsx2Generator import Pcsx2Generator
from generators.scummvm.scummvmGenerator import ScummVMGenerator
from generators.dosbox.dosboxGenerator import DosBoxGenerator
from generators.dosboxx.dosboxxGenerator import DosBoxxGenerator
from generators.vice.viceGenerator import ViceGenerator
from generators.fsuae.fsuaeGenerator import FsuaeGenerator
from generators.amiberry.amiberryGenerator import AmiberryGenerator
from generators.citra.citraGenerator import CitraGenerator
from generators.daphne.daphneGenerator import DaphneGenerator
from generators.cannonball.cannonballGenerator import CannonballGenerator
from generators.openbor.openborGenerator import OpenborGenerator
from generators.wine.wineGenerator import WineGenerator
from generators.cemu.cemuGenerator import CemuGenerator
from generators.melonds.melondsGenerator import MelonDSGenerator
from generators.rpcs3.rpcs3Generator import Rpcs3Generator
import controllersConfig as controllers
import signal
import batoceraFiles
import os
import subprocess
import json
import utils.videoMode as videoMode
from utils.logger import eslog
generators = {
'kodi': KodiGenerator(),
'linapple': LinappleGenerator(),
'libretro': LibretroGenerator(),
'moonlight': MoonlightGenerator(),
'scummvm': ScummVMGenerator(),
'dosbox': DosBoxGenerator(),
'dosboxx': DosBoxxGenerator(),
'mupen64plus': MupenGenerator(),
'vice': ViceGenerator(),
'fsuae': FsuaeGenerator(),
'amiberry': AmiberryGenerator(),
'reicast': ReicastGenerator(),
'flycast': FlycastGenerator(),
'dolphin': DolphinGenerator(),
'pcsx2': Pcsx2Generator(),
'ppsspp': PPSSPPGenerator(),
'citra' : CitraGenerator(),
'daphne' : DaphneGenerator(),
'cannonball' : CannonballGenerator(),
'openbor' : OpenborGenerator(),
'wine' : WineGenerator(),
'cemu' : CemuGenerator(),
'melonds' : MelonDSGenerator(),
'rpcs3' : Rpcs3Generator()
}
def main(args, maxnbplayers):
playersControllers = dict()
controllersInput = []
for p in range(1, maxnbplayers+1):
ci = {}
ci["index"] = getattr(args, "p{}index" .format(p))
ci["guid"] = getattr(args, "p{}guid" .format(p))
ci["name"] = str(getattr(args, "p{}name" .format(p))).replace("®", "®")
ci["devicepath"] = getattr(args, "p{}devicepath".format(p))
ci["nbbuttons"] = getattr(args, "p{}nbbuttons" .format(p))
ci["nbhats"] = getattr(args, "p{}nbhats" .format(p))
ci["nbaxes"] = getattr(args, "p{}nbaxes" .format(p))
controllersInput.append(ci)
# Read the controller configuration
playersControllers = controllers.loadControllerConfig(controllersInput)
# find the system to run
systemName = args.system
eslog.log("Running system: {}".format(systemName))
system = Emulator(systemName, args.rom)
system.config["emulator-forced"] = False
system.config["core-forced"] = False
if args.emulator is not None:
system.config["emulator"] = args.emulator
system.config["emulator-forced"] = True # tip to indicated that the emulator was forced
if args.core is not None:
system.config["core"] = args.core
system.config["core-forced"] = True
eslog.debug("Settings: {}".format(system.config))
if "emulator" in system.config and "core" in system.config:
eslog.log("emulator: {}, core: {}".format(system.config["emulator"], system.config["core"]))
else:
if "emulator" in system.config:
eslog.log("emulator: {}".format(system.config["emulator"]))
# the resolution must be changed before configuration while the configuration may depend on it (ie bezels)
wantedGameMode = generators[system.config['emulator']].getResolutionMode(system.config)
systemMode = videoMode.getCurrentMode()
resolutionChanged = False
exitCode = -1
try:
eslog.log("current video mode: {}".format(systemMode))
eslog.log("wanted video mode: {}".format(wantedGameMode))
if wantedGameMode != 'default' and wantedGameMode != systemMode:
videoMode.changeMode(wantedGameMode)
resolutionChanged = True
gameResolution = videoMode.getCurrentResolution()
eslog.log("resolution: {}x{}".format(str(gameResolution["width"]), str(gameResolution["height"])))
# savedir: create the save directory if not already done
dirname = os.path.join(batoceraFiles.savesDir, system.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
# core
effectiveCore = ""
if "core" in system.config and system.config["core"] is not None:
effectiveCore = system.config["core"]
effectiveRom = ""
if args.rom is not None:
effectiveRom = args.rom
# network options
if args.netplaymode is not None:
system.config["netplay.mode"] = args.netplaymode
if args.netplayspectator is not None:
system.config["netplay.spectator"] = args.netplayspectator
if args.netplayip is not None:
system.config["netplay.server.ip"] = args.netplayip
if args.netplayport is not None:
system.config["netplay.server.port"] = args.netplayport
# run a script before emulator starts
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/userdata/system/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
# run the emulator
try:
Evmapy.start(systemName, system.config['emulator'], effectiveCore, effectiveRom, playersControllers)
exitCode = runCommand(generators[system.config['emulator']].generate(system, args.rom, playersControllers, gameResolution))
finally:
Evmapy.stop()
# run a script after emulator shuts down
callExternalScripts("/userdata/system/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
finally:
# always restore the resolution
if resolutionChanged:
try:
videoMode.changeMode(systemMode)
except Exception:
pass # don't fail
# exit
return exitCode
def c | folder, event, args):
if not os.path.isdir(folder):
return
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
callExternalScripts(os.path.join(folder, file), event, args)
else:
if os.access(os.path.join(folder, file), os.X_OK):
eslog.log("calling external script: " + str([os.path.join(folder, file), event] + args))
subprocess.call([os.path.join(folder, file), event] + args)
def runCommand(command):
global proc
command.env.update(os.environ)
eslog.log("command: {}".format(str(command)))
eslog.log("command: {}".format(str(command.array)))
eslog.log("env: {}".format(str(command.env)))
proc = subprocess.Popen(command.array, env=command.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exitcode = -1
try:
out, err = proc.communicate()
exitcode = proc.returncode
sys.stdout.write(out)
sys.stderr.write(err)
except:
eslog("emulator exited")
return exitcode
def signal_handler(signal, frame):
global proc
print('Exiting')
if proc:
print('killing proc')
proc.kill()
if __name__ == '__main__':
proc = None
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(description='emulator-launcher script')
maxnbplayers = 8
for p in range(1, maxnbplayers+1):
parser.add_argument("-p{}index" .format(p), help="player{} controller index" .format(p), type=int, required=False)
parser.add_argument("-p{}guid" .format(p), help="player{} controller SDL2 guid" .format(p), type=str, required=False)
parser.add_argument("-p{}name" .format(p), help="player{} controller name" .format(p), type=str, required=False)
parser.add_argument("-p{}devicepath".format(p), help="player{} controller device" .format(p), type=str, required=False)
parser.add_argument("-p{}nbbuttons" .format(p), help="player{} controller number of buttons".format(p), type=str, required=False)
parser.add_argument("-p{}nbhats" .format(p), help="player{} controller number of hats" .format(p), type=str, required=False)
parser.add_argument("-p{}nbaxes" .format(p), help="player{} controller number of axes" .format(p), type=str, required=False)
parser.add_argument("-system", help="select the system to launch", type=str, required=True)
parser.add_argument("-rom", help="rom absolute path", type=str, required=True)
parser.add_argument("-emulator", help="force emulator", type=str, required=False)
parser.add_argument("-core", help="force emulator core", type=str, required=False)
parser.add_argument("-netplaymode", help="host/client", type=str, required=False)
parser.add_argument("-netplayspectator", help="enable spectator mode", default=False, action='store_true', required=False)
parser.add_argument("-netplayip", help="remote ip", type=str, required=False)
parser.add_argument("-netplayport", help="remote port", type=str, required=False)
args = parser.parse_args()
try:
exitcode = -1
exitcode = main(args, maxnbplayers)
except Exception as e:
eslog.error("configgen exception: ", exc_info=True)
time.sleep(1) # this seems to be required so that the gpu memory is restituated and available for es
eslog.log("Exiting configgen with status {}".format(str(exitcode)))
exit(exitcode)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| allExternalScripts( | identifier_name |
emulatorlauncher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import time
import sys
from sys import exit
from Emulator import Emulator
from Evmapy import Evmapy
import generators
from generators.kodi.kodiGenerator import KodiGenerator
from generators.linapple.linappleGenerator import LinappleGenerator
from generators.libretro.libretroGenerator import LibretroGenerator
from generators.moonlight.moonlightGenerator import MoonlightGenerator
from generators.mupen.mupenGenerator import MupenGenerator
from generators.ppsspp.ppssppGenerator import PPSSPPGenerator
from generators.reicast.reicastGenerator import ReicastGenerator
from generators.flycast.flycastGenerator import FlycastGenerator
from generators.dolphin.dolphinGenerator import DolphinGenerator
from generators.pcsx2.pcsx2Generator import Pcsx2Generator
from generators.scummvm.scummvmGenerator import ScummVMGenerator
from generators.dosbox.dosboxGenerator import DosBoxGenerator
from generators.dosboxx.dosboxxGenerator import DosBoxxGenerator
from generators.vice.viceGenerator import ViceGenerator
from generators.fsuae.fsuaeGenerator import FsuaeGenerator
from generators.amiberry.amiberryGenerator import AmiberryGenerator
from generators.citra.citraGenerator import CitraGenerator
from generators.daphne.daphneGenerator import DaphneGenerator
from generators.cannonball.cannonballGenerator import CannonballGenerator
from generators.openbor.openborGenerator import OpenborGenerator
from generators.wine.wineGenerator import WineGenerator
from generators.cemu.cemuGenerator import CemuGenerator
from generators.melonds.melondsGenerator import MelonDSGenerator
from generators.rpcs3.rpcs3Generator import Rpcs3Generator
import controllersConfig as controllers
import signal
import batoceraFiles
import os
import subprocess
import json
import utils.videoMode as videoMode
from utils.logger import eslog
generators = {
'kodi': KodiGenerator(),
'linapple': LinappleGenerator(),
'libretro': LibretroGenerator(),
'moonlight': MoonlightGenerator(),
'scummvm': ScummVMGenerator(),
'dosbox': DosBoxGenerator(),
'dosboxx': DosBoxxGenerator(),
'mupen64plus': MupenGenerator(),
'vice': ViceGenerator(),
'fsuae': FsuaeGenerator(),
'amiberry': AmiberryGenerator(),
'reicast': ReicastGenerator(),
'flycast': FlycastGenerator(),
'dolphin': DolphinGenerator(),
'pcsx2': Pcsx2Generator(),
'ppsspp': PPSSPPGenerator(),
'citra' : CitraGenerator(),
'daphne' : DaphneGenerator(),
'cannonball' : CannonballGenerator(),
'openbor' : OpenborGenerator(),
'wine' : WineGenerator(),
'cemu' : CemuGenerator(),
'melonds' : MelonDSGenerator(),
'rpcs3' : Rpcs3Generator()
}
def main(args, maxnbplayers):
playersControllers = dict()
controllersInput = []
for p in range(1, maxnbplayers+1):
ci = {}
ci["index"] = getattr(args, "p{}index" .format(p))
ci["guid"] = getattr(args, "p{}guid" .format(p))
ci["name"] = str(getattr(args, "p{}name" .format(p))).replace("®", "®")
ci["devicepath"] = getattr(args, "p{}devicepath".format(p))
ci["nbbuttons"] = getattr(args, "p{}nbbuttons" .format(p))
ci["nbhats"] = getattr(args, "p{}nbhats" .format(p))
ci["nbaxes"] = getattr(args, "p{}nbaxes" .format(p))
controllersInput.append(ci)
# Read the controller configuration
playersControllers = controllers.loadControllerConfig(controllersInput)
# find the system to run
systemName = args.system
eslog.log("Running system: {}".format(systemName))
system = Emulator(systemName, args.rom)
system.config["emulator-forced"] = False
system.config["core-forced"] = False
if args.emulator is not None:
system.config["emulator"] = args.emulator
system.config["emulator-forced"] = True # tip to indicated that the emulator was forced
if args.core is not None:
system.config["core"] = args.core
system.config["core-forced"] = True
eslog.debug("Settings: {}".format(system.config))
if "emulator" in system.config and "core" in system.config:
eslog.log("emulator: {}, core: {}".format(system.config["emulator"], system.config["core"]))
else:
if "emulator" in system.config:
eslog.log("emulator: {}".format(system.config["emulator"]))
# the resolution must be changed before configuration while the configuration may depend on it (ie bezels)
wantedGameMode = generators[system.config['emulator']].getResolutionMode(system.config)
systemMode = videoMode.getCurrentMode()
resolutionChanged = False
exitCode = -1
try:
eslog.log("current video mode: {}".format(systemMode))
eslog.log("wanted video mode: {}".format(wantedGameMode))
if wantedGameMode != 'default' and wantedGameMode != systemMode:
videoMode.changeMode(wantedGameMode)
resolutionChanged = True
gameResolution = videoMode.getCurrentResolution()
eslog.log("resolution: {}x{}".format(str(gameResolution["width"]), str(gameResolution["height"])))
# savedir: create the save directory if not already done
dirname = os.path.join(batoceraFiles.savesDir, system.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
# core
effectiveCore = ""
if "core" in system.config and system.config["core"] is not None:
effectiveCore = system.config["core"]
effectiveRom = ""
if args.rom is not None:
effectiveRom = args.rom
# network options
if args.netplaymode is not None:
system.config["netplay.mode"] = args.netplaymode
if args.netplayspectator is not None:
system.config["netplay.spectator"] = args.netplayspectator
if args.netplayip is not None:
system.config["netplay.server.ip"] = args.netplayip
if args.netplayport is not None:
system.config["netplay.server.port"] = args.netplayport
# run a script before emulator starts
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/userdata/system/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
# run the emulator
try:
Evmapy.start(systemName, system.config['emulator'], effectiveCore, effectiveRom, playersControllers)
exitCode = runCommand(generators[system.config['emulator']].generate(system, args.rom, playersControllers, gameResolution))
finally:
Evmapy.stop()
# run a script after emulator shuts down
callExternalScripts("/userdata/system/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
finally:
# always restore the resolution
if resolutionChanged:
try:
videoMode.changeMode(systemMode)
except Exception:
pass # don't fail
# exit
return exitCode
def callExternalScripts(folder, event, args):
if not os.path.isdir(folder):
return
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
callExternalScripts(os.path.join(folder, file), event, args)
else:
if os.access(os.path.join(folder, file), os.X_OK):
eslog.log("calling external script: " + str([os.path.join(folder, file), event] + args))
subprocess.call([os.path.join(folder, file), event] + args)
def runCommand(command):
g |
def signal_handler(signal, frame):
global proc
print('Exiting')
if proc:
print('killing proc')
proc.kill()
if __name__ == '__main__':
proc = None
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(description='emulator-launcher script')
maxnbplayers = 8
for p in range(1, maxnbplayers+1):
parser.add_argument("-p{}index" .format(p), help="player{} controller index" .format(p), type=int, required=False)
parser.add_argument("-p{}guid" .format(p), help="player{} controller SDL2 guid" .format(p), type=str, required=False)
parser.add_argument("-p{}name" .format(p), help="player{} controller name" .format(p), type=str, required=False)
parser.add_argument("-p{}devicepath".format(p), help="player{} controller device" .format(p), type=str, required=False)
parser.add_argument("-p{}nbbuttons" .format(p), help="player{} controller number of buttons".format(p), type=str, required=False)
parser.add_argument("-p{}nbhats" .format(p), help="player{} controller number of hats" .format(p), type=str, required=False)
parser.add_argument("-p{}nbaxes" .format(p), help="player{} controller number of axes" .format(p), type=str, required=False)
parser.add_argument("-system", help="select the system to launch", type=str, required=True)
parser.add_argument("-rom", help="rom absolute path", type=str, required=True)
parser.add_argument("-emulator", help="force emulator", type=str, required=False)
parser.add_argument("-core", help="force emulator core", type=str, required=False)
parser.add_argument("-netplaymode", help="host/client", type=str, required=False)
parser.add_argument("-netplayspectator", help="enable spectator mode", default=False, action='store_true', required=False)
parser.add_argument("-netplayip", help="remote ip", type=str, required=False)
parser.add_argument("-netplayport", help="remote port", type=str, required=False)
args = parser.parse_args()
try:
exitcode = -1
exitcode = main(args, maxnbplayers)
except Exception as e:
eslog.error("configgen exception: ", exc_info=True)
time.sleep(1) # this seems to be required so that the gpu memory is restituated and available for es
eslog.log("Exiting configgen with status {}".format(str(exitcode)))
exit(exitcode)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lobal proc
command.env.update(os.environ)
eslog.log("command: {}".format(str(command)))
eslog.log("command: {}".format(str(command.array)))
eslog.log("env: {}".format(str(command.env)))
proc = subprocess.Popen(command.array, env=command.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exitcode = -1
try:
out, err = proc.communicate()
exitcode = proc.returncode
sys.stdout.write(out)
sys.stderr.write(err)
except:
eslog("emulator exited")
return exitcode
| identifier_body |
emulatorlauncher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import time
import sys
from sys import exit
from Emulator import Emulator
from Evmapy import Evmapy
import generators
from generators.kodi.kodiGenerator import KodiGenerator
from generators.linapple.linappleGenerator import LinappleGenerator
from generators.libretro.libretroGenerator import LibretroGenerator
from generators.moonlight.moonlightGenerator import MoonlightGenerator
from generators.mupen.mupenGenerator import MupenGenerator
from generators.ppsspp.ppssppGenerator import PPSSPPGenerator
from generators.reicast.reicastGenerator import ReicastGenerator
from generators.flycast.flycastGenerator import FlycastGenerator
from generators.dolphin.dolphinGenerator import DolphinGenerator
from generators.pcsx2.pcsx2Generator import Pcsx2Generator
from generators.scummvm.scummvmGenerator import ScummVMGenerator
from generators.dosbox.dosboxGenerator import DosBoxGenerator
from generators.dosboxx.dosboxxGenerator import DosBoxxGenerator
from generators.vice.viceGenerator import ViceGenerator
from generators.fsuae.fsuaeGenerator import FsuaeGenerator
from generators.amiberry.amiberryGenerator import AmiberryGenerator
from generators.citra.citraGenerator import CitraGenerator
from generators.daphne.daphneGenerator import DaphneGenerator
from generators.cannonball.cannonballGenerator import CannonballGenerator
from generators.openbor.openborGenerator import OpenborGenerator
from generators.wine.wineGenerator import WineGenerator
from generators.cemu.cemuGenerator import CemuGenerator
from generators.melonds.melondsGenerator import MelonDSGenerator
from generators.rpcs3.rpcs3Generator import Rpcs3Generator
import controllersConfig as controllers
import signal
import batoceraFiles
import os
import subprocess
import json
import utils.videoMode as videoMode
from utils.logger import eslog
generators = {
'kodi': KodiGenerator(),
'linapple': LinappleGenerator(),
'libretro': LibretroGenerator(),
'moonlight': MoonlightGenerator(),
'scummvm': ScummVMGenerator(),
'dosbox': DosBoxGenerator(),
'dosboxx': DosBoxxGenerator(),
'mupen64plus': MupenGenerator(),
'vice': ViceGenerator(),
'fsuae': FsuaeGenerator(),
'amiberry': AmiberryGenerator(),
'reicast': ReicastGenerator(),
'flycast': FlycastGenerator(),
'dolphin': DolphinGenerator(),
'pcsx2': Pcsx2Generator(),
'ppsspp': PPSSPPGenerator(),
'citra' : CitraGenerator(),
'daphne' : DaphneGenerator(),
'cannonball' : CannonballGenerator(),
'openbor' : OpenborGenerator(),
'wine' : WineGenerator(),
'cemu' : CemuGenerator(),
'melonds' : MelonDSGenerator(),
'rpcs3' : Rpcs3Generator()
}
def main(args, maxnbplayers):
playersControllers = dict()
controllersInput = []
for p in range(1, maxnbplayers+1):
ci = {}
ci["index"] = getattr(args, "p{}index" .format(p))
ci["guid"] = getattr(args, "p{}guid" .format(p))
ci["name"] = str(getattr(args, "p{}name" .format(p))).replace("®", "®")
ci["devicepath"] = getattr(args, "p{}devicepath".format(p))
ci["nbbuttons"] = getattr(args, "p{}nbbuttons" .format(p))
ci["nbhats"] = getattr(args, "p{}nbhats" .format(p))
ci["nbaxes"] = getattr(args, "p{}nbaxes" .format(p))
controllersInput.append(ci)
# Read the controller configuration
playersControllers = controllers.loadControllerConfig(controllersInput)
# find the system to run
systemName = args.system
eslog.log("Running system: {}".format(systemName))
system = Emulator(systemName, args.rom)
system.config["emulator-forced"] = False
system.config["core-forced"] = False | system.config["emulator-forced"] = True # tip to indicated that the emulator was forced
if args.core is not None:
system.config["core"] = args.core
system.config["core-forced"] = True
eslog.debug("Settings: {}".format(system.config))
if "emulator" in system.config and "core" in system.config:
eslog.log("emulator: {}, core: {}".format(system.config["emulator"], system.config["core"]))
else:
if "emulator" in system.config:
eslog.log("emulator: {}".format(system.config["emulator"]))
# the resolution must be changed before configuration while the configuration may depend on it (ie bezels)
wantedGameMode = generators[system.config['emulator']].getResolutionMode(system.config)
systemMode = videoMode.getCurrentMode()
resolutionChanged = False
exitCode = -1
try:
eslog.log("current video mode: {}".format(systemMode))
eslog.log("wanted video mode: {}".format(wantedGameMode))
if wantedGameMode != 'default' and wantedGameMode != systemMode:
videoMode.changeMode(wantedGameMode)
resolutionChanged = True
gameResolution = videoMode.getCurrentResolution()
eslog.log("resolution: {}x{}".format(str(gameResolution["width"]), str(gameResolution["height"])))
# savedir: create the save directory if not already done
dirname = os.path.join(batoceraFiles.savesDir, system.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
# core
effectiveCore = ""
if "core" in system.config and system.config["core"] is not None:
effectiveCore = system.config["core"]
effectiveRom = ""
if args.rom is not None:
effectiveRom = args.rom
# network options
if args.netplaymode is not None:
system.config["netplay.mode"] = args.netplaymode
if args.netplayspectator is not None:
system.config["netplay.spectator"] = args.netplayspectator
if args.netplayip is not None:
system.config["netplay.server.ip"] = args.netplayip
if args.netplayport is not None:
system.config["netplay.server.port"] = args.netplayport
# run a script before emulator starts
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/userdata/system/scripts", "gameStart", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
# run the emulator
try:
Evmapy.start(systemName, system.config['emulator'], effectiveCore, effectiveRom, playersControllers)
exitCode = runCommand(generators[system.config['emulator']].generate(system, args.rom, playersControllers, gameResolution))
finally:
Evmapy.stop()
# run a script after emulator shuts down
callExternalScripts("/userdata/system/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
callExternalScripts("/usr/share/batocera/configgen/scripts", "gameStop", [systemName, system.config['emulator'], effectiveCore, effectiveRom])
finally:
# always restore the resolution
if resolutionChanged:
try:
videoMode.changeMode(systemMode)
except Exception:
pass # don't fail
# exit
return exitCode
def callExternalScripts(folder, event, args):
if not os.path.isdir(folder):
return
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
callExternalScripts(os.path.join(folder, file), event, args)
else:
if os.access(os.path.join(folder, file), os.X_OK):
eslog.log("calling external script: " + str([os.path.join(folder, file), event] + args))
subprocess.call([os.path.join(folder, file), event] + args)
def runCommand(command):
global proc
command.env.update(os.environ)
eslog.log("command: {}".format(str(command)))
eslog.log("command: {}".format(str(command.array)))
eslog.log("env: {}".format(str(command.env)))
proc = subprocess.Popen(command.array, env=command.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exitcode = -1
try:
out, err = proc.communicate()
exitcode = proc.returncode
sys.stdout.write(out)
sys.stderr.write(err)
except:
eslog("emulator exited")
return exitcode
def signal_handler(signal, frame):
global proc
print('Exiting')
if proc:
print('killing proc')
proc.kill()
if __name__ == '__main__':
proc = None
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(description='emulator-launcher script')
maxnbplayers = 8
for p in range(1, maxnbplayers+1):
parser.add_argument("-p{}index" .format(p), help="player{} controller index" .format(p), type=int, required=False)
parser.add_argument("-p{}guid" .format(p), help="player{} controller SDL2 guid" .format(p), type=str, required=False)
parser.add_argument("-p{}name" .format(p), help="player{} controller name" .format(p), type=str, required=False)
parser.add_argument("-p{}devicepath".format(p), help="player{} controller device" .format(p), type=str, required=False)
parser.add_argument("-p{}nbbuttons" .format(p), help="player{} controller number of buttons".format(p), type=str, required=False)
parser.add_argument("-p{}nbhats" .format(p), help="player{} controller number of hats" .format(p), type=str, required=False)
parser.add_argument("-p{}nbaxes" .format(p), help="player{} controller number of axes" .format(p), type=str, required=False)
parser.add_argument("-system", help="select the system to launch", type=str, required=True)
parser.add_argument("-rom", help="rom absolute path", type=str, required=True)
parser.add_argument("-emulator", help="force emulator", type=str, required=False)
parser.add_argument("-core", help="force emulator core", type=str, required=False)
parser.add_argument("-netplaymode", help="host/client", type=str, required=False)
parser.add_argument("-netplayspectator", help="enable spectator mode", default=False, action='store_true', required=False)
parser.add_argument("-netplayip", help="remote ip", type=str, required=False)
parser.add_argument("-netplayport", help="remote port", type=str, required=False)
args = parser.parse_args()
try:
exitcode = -1
exitcode = main(args, maxnbplayers)
except Exception as e:
eslog.error("configgen exception: ", exc_info=True)
time.sleep(1) # this seems to be required so that the gpu memory is restituated and available for es
eslog.log("Exiting configgen with status {}".format(str(exitcode)))
exit(exitcode)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | if args.emulator is not None:
system.config["emulator"] = args.emulator | random_line_split |
BattleState.ts | /// <reference path="../lib/Phaser/phaser.d.ts"/>
/// <reference path="../lib/Phaser/phaser-tiled.d.ts"/>
module RpgGame {
declare function refreshInventory(): void;
export class BattleState extends Phaser.State {
//Turn based queue systeem
private queue: AttackTurnQueue;
private queueGroup: any;
//ENEMY
private enemies: Unit[];
private attackingUnit: Unit;
//Input handelen
private canAttack: boolean;
//Damage van gekozen skill van speler opslaan
private playerDamage: number;
//DOM ELEMENTEN
private ActionsDiv: HTMLElement;
private SkillsDiv: HTMLElement;
private itemsDiv: HTMLElement;
private BattleMenu: HTMLElement;
private AttackBtn: HTMLElement;
private ItemBtn: HTMLElement;
private SkipBtn: HTMLElement;
private FleeBtn: HTMLElement;
preload() {
this.game.load.image('bg', '../sprites/backgrounds/battle_background.png');
this.game.load.audio("attack", "../Media/Battle/attack.mp3");
this.game.load.audio("hurt", "../Media/Battle/hit.mp3");
this.BattleMenu = document.getElementById("battlemenu");
this.ActionsDiv = document.getElementById("actions");
this.SkillsDiv = document.getElementById("skillslist");
this.itemsDiv = document.getElementById("itemslist");
this.AttackBtn = document.getElementById("attackbtn");
this.ItemBtn = document.getElementById("itembtn");
this.SkipBtn = document.getElementById("skipbtn");
this.FleeBtn = document.getElementById("fleebtn");
this.enemies = new Array<Unit>();
this.scale.fullScreenScaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.scaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.minWidth = 320;
this.scale.minHeight = 260;
this.scale.maxWidth = 1920;
this.scale.maxHeight = 1080;
this.scale.pageAlignVertically = true;
this.scale.pageAlignHorizontally = true;
}
create() {
//speler niet laten bewegen
speler.setCanPlayerMove(false);
//scale verhogen
speler.scale.set(4);
this.CreateBackground();
this.CreateEventListeners();
this.GetEnemies();
this.ShowPlayer();
//Battlemenu zichtbaar maken
this.BattleMenu.classList.remove("hidden");
var BackToOpenWorldTest = this.input.keyboard.addKey(Phaser.Keyboard.A);
BackToOpenWorldTest.onDown.add(this.CheckForBackToOpenWorld, this);
//Speler mag pas aanvallen zodra een skill geselecteerd is.
this.canAttack = false;
}
update() {
this.enemies.forEach((monster) => {
monster.tint = 0xffffff;
});
if (this.canAttack) {
//For loop zodat we er makkelijk uit kunnen breken zodra er een gehovert is.
for (var sprite of this.enemies) {
if (sprite.input.pointerOver()) {
sprite.tint = 0xff3333;
break;
}
}
}
}
shutdown() {
this.game.stage.backgroundColor = '#000';
this.BattleMenu.classList.add("hidden");
this.HideSkills();
this.HideItems();
//speler mag weer bewegen
speler.setCanPlayerMove(true);
//scaling weer goedzetten van speler. |
this.world.remove(speler);
this.world.remove(speler.getPortrait());
for (var i = 0; i < this.enemies.length; i++) {
//Element van wereld en enemies array verwijderen.
this.enemies.splice(i, 1);
}
//Hele state opschonen
this.world.removeAll();
}
GetEnemies() {
$.ajax({
url: "../Game/GetRandomMonsters",
type: "POST",
data: {},
success: this.LoadEnemyAssets.bind(this)
});
}
LoadEnemyAssets(data) {
this.game.load.onLoadComplete.addOnce(this.CreateEnemies.bind(this, data), this);
for (var i = 0; i < data.length; i++) {
//Asset inladen
this.game.load.image("enemy" + data[i].id, "../images/exiles/" + data[i].image_url);
}
//Laden starten
this.game.load.start();
//Callback voor als alle images geladen zijn
}
CreateEnemies(data) {
//Variabelen die per loop veranderen
var nextPriority = speler.GetPriority()
var startX = 1000;
var yPosition = 1080 - 150;
for (var i = 0; i < data.length; i++) {
//Nextpriority met een omhoog zetten
nextPriority++;
//Monster creeren, monsters hebben 50hp elk
var monster = new Unit(this.game, nextPriority, 1, 50 ,startX, yPosition, "enemy" + data[i].id);
monster.anchor.set(1, 1);
//Indien het monster niet de goede kant op kijkt, de sprite flippen, tevens scaled dit de sprite omlaag naar 40% van de normale lengte/breedte.
monster.scale.setTo(-0.6, 0.6);
startX += 250;
//Javascript is een goede taal btw haHAA
var MinimumMultiplier = 5;
var RandomMultiplier = Math.floor(Math.random() * (speler.getLevel() - MinimumMultiplier + 1)) + MinimumMultiplier;
//Random stats meegeven
monster.setLevel(1 * RandomMultiplier);
monster.setIntelligence(1 * RandomMultiplier);
monster.setStrength(1 * RandomMultiplier);
this.enemies.push(monster);
this.game.add.existing(monster);
}
//Queue updaten
this.createQueue();
this.updateVisualQueue();
//Listeners creeren
this.CreateMonsterListeners();
}
CreateEventListeners() {
this.AttackBtn.addEventListener('click', this.ShowSkills.bind(this));
this.ItemBtn.addEventListener('click', this.ShowItems.bind(this));
this.SkillsDiv.addEventListener('click', this.ChooseSkill.bind(this));
this.itemsDiv.addEventListener('click', this.ChooseItem.bind(this));
this.SkipBtn.addEventListener('click', this.HandleSkip.bind(this));
this.FleeBtn.addEventListener('click', this.HandleFlee.bind(this));
}
CreateMonsterListeners() {
for (var monster of this.enemies) {
monster.events.onInputDown.add(this.HandlePlayerAttack, this, 0, {target: monster});
}
}
CheckForBackToOpenWorld() {
this.BackToOpenWorld();
}
ShowPlayer() {
var playerX = 600;
speler.x = playerX;
speler.y = 1080 - 300;
console.log(speler.y);
this.game.physics.enable(speler, Phaser.Physics.ARCADE);
this.game.add.existing(speler);
}
CreateBackground() {
this.game.stage.backgroundColor = "#50695A";
var bg = this.game.add.sprite(0, 0, 'bg');
//Height verminderen zodat de image niet onder het battle menu komt te vallen.
bg.height = this.game.height - 66;
bg.anchor.set(0);
bg.scale.set(1);
}
ShowSkills() {
this.ActionsDiv.classList.add("hidden");
this.SkillsDiv.classList.remove("hidden");
}
HideSkills() {
this.SkillsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ShowItems() {
this.ActionsDiv.classList.add("hidden");
this.itemsDiv.classList.remove("hidden");
}
HideItems() {
this.itemsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ChooseItem(e) {
var naam = e.srcElement.innerText;
var item: Item;
item = new Item();
item.SetName(naam);
speler.GetInventory().UseItem(item);
this.NextTurn();
}
ChooseSkill(e) {
this.playerDamage = e.srcElement.childNodes[1].innerText;
this.canAttack = true;
}
HandlePlayerAttack(target: Unit) {
if (this.canAttack) {
this.canAttack = false;
this.attackingUnit = this.queue.poll();
this.attackingUnit.attack();
this.game.sound.play("attack");
target.SetCurrentHealth(target.GetCurrentHealth() - this.playerDamage);
console.log("Target Health : " + target.GetCurrentHealth());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
}
HandleAiAttack(attacker: Unit, target: Unit) {
this.attackingUnit = attacker;
this.game.sound.play("attack");
//Random damage genereren voor AI. ai kunnen geen skills gebruiken.
var damage: number;
damage = this.game.rnd.integerInRange(0, 20);
target.SetCurrentHealth(target.GetCurrentHealth() - (attacker.getStrength() + damage));
console.log("Talk Shit, Get Hit' : Health = " + target.GetCurrentHealth().toString());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
CheckForDeath(target: Unit) {
if (target.getDead()) {
if (target.name == "Player") {
this.LostBattle();
} else {
//Monster deleten uit lijst
var index: number;
index = this.enemies.indexOf(target);
this.enemies.splice(index, 1);
//Prioriteit verversen van enemies
this.enemies.forEach((monster) => {
if (monster.GetPriority() > target.GetPriority()) {
monster.SetPriority(monster.GetPriority() - 1);
}
});
//Controleren of er nog enemies leven;
if (this.enemies.length <= 0) {
this.WonBattle();
} else {
this.NextTurn();
}
}
//Uit queue verwijderen voor aanvallen
this.queue.remove(target);
this.queueGroup.remove(target.getPortrait());
//Vernietigen.
target.getPortrait().destroy();
target.destroy();
} else {
this.NextTurn();
}
}
HandleSkip() {
this.attackingUnit = speler;
this.NextTurn();
}
HandleFlee() {
this.attackingUnit = speler;
var randomNumber: number;
randomNumber = this.game.rnd.integerInRange(0, 100);
if (randomNumber > 75) {
//Fleeing successvol, terug naar open wereld.
this.BackToOpenWorld();
} else {
//turn skippen
this.NextTurn();
}
}
NextTurn() {
//prioriteit verhogen
this.attackingUnit.SetPriority(this.attackingUnit.GetPriority() + this.queue.getSize());
//Divs verbergen
this.HideSkills();
this.HideItems();
this.game.time.events.add(1000 * ANIM_SPEED, () => {
//na een delay de queue updaten
this.updateQueue()
})
}
WonBattle() {
var charactername: string;
charactername = $("#inventory").find(".gameh1").text().split(" - ")[1];
$.ajax({
context: this,
url: "../Character/generateVictoryItems",
type: "POST",
data: { charactername: charactername },
success: function (data) {
if (data === true) {
//Nieuwe items gekregen, melding geven aan speler
console.log("ITEMS GEKREGEN WOO");
//Inventory refreshen
refreshInventory();
}
//Terug naar andere state
this.BackToOpenWorld();
}
});
}
LostBattle() {
this.game.state.start('MainMenu');
}
BackToOpenWorld() {
this.game.state.start('GameState');
}
createQueue() {
this.queue = new AttackTurnQueue();
//Portraits en healthbars creeren
speler.createPortrait(this.game);
speler.CreateHealthBar(this.game);
this.enemies.forEach((enemy) => enemy.createPortrait(this.game));
this.enemies.forEach((enemy) => enemy.CreateHealthBar(this.game));
//speler en enemies toevoegen aan queue
this.queue.add(speler);
this.enemies.forEach((enemy) => this.queue.add(enemy));
//Queue verversen met nieuwe waardes
this.queue.updateQueue();
//alle portraits in een queue zetten, zodat we ze gezamelijk kunnen verschuiven
this.queueGroup = this.add.group()
this.queueGroup.add(speler.getPortrait());
this.enemies.forEach((enemy) => this.queueGroup.add(enemy.getPortrait()))
//Boven in het scherm plaatsen
this.queueGroup.y = 50
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
}
updateQueue() {
//Prioriteit van monsters en spelers updaten.
this.enemies.forEach((monster) => {
monster.SetPriority(Phaser.Math.max(1, monster.GetPriority() - monster.getSpeed()));
})
speler.SetPriority(Phaser.Math.max(1, speler.GetPriority() - speler.getSpeed()));
//oude attacking unit weer toevoegen
this.queue.add(this.attackingUnit)
//queue updaten
this.queue.updateQueue()
//visuele queue updaten op scherm
this.updateVisualQueue()
//kijken of speler de volgende is
if (!this.queue.peek().name.includes('Player')) {
this.BattleMenu.classList.add("hidden");
this.HandleAiAttack(this.queue.poll(), speler);
} else {
//spelermag aanvallen
this.BattleMenu.classList.remove("hidden");
}
}
updateVisualQueue() {
//Queue width etc updaten
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
for (var i = 0; i < this.queue.getArray().length; i++) {
//unit opvragen en portret updaten
var unit = this.queue.getArray()[i].value;
var portrait = unit.getPortrait();
//margin toevoegen om ze uit elkaar te houden
var posx = i * (portrait.width + 5)
//animatie toevoegen voor aanpassing
if (portrait.alpha == 0) {
portrait.x = posx
this.game.add.tween(portrait).to({ alpha: 1 }, 500 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
} else {
this.game.add.tween(portrait).to({ x: posx }, 300 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
}
//Prioriteit nummer weergeven
portrait.text.setText(' ' + unit.Priority + ' ')
}
}
}
} | speler.scale.set(2); | random_line_split |
BattleState.ts | /// <reference path="../lib/Phaser/phaser.d.ts"/>
/// <reference path="../lib/Phaser/phaser-tiled.d.ts"/>
module RpgGame {
declare function refreshInventory(): void;
export class BattleState extends Phaser.State {
//Turn based queue systeem
private queue: AttackTurnQueue;
private queueGroup: any;
//ENEMY
private enemies: Unit[];
private attackingUnit: Unit;
//Input handelen
private canAttack: boolean;
//Damage van gekozen skill van speler opslaan
private playerDamage: number;
//DOM ELEMENTEN
private ActionsDiv: HTMLElement;
private SkillsDiv: HTMLElement;
private itemsDiv: HTMLElement;
private BattleMenu: HTMLElement;
private AttackBtn: HTMLElement;
private ItemBtn: HTMLElement;
private SkipBtn: HTMLElement;
private FleeBtn: HTMLElement;
preload() {
this.game.load.image('bg', '../sprites/backgrounds/battle_background.png');
this.game.load.audio("attack", "../Media/Battle/attack.mp3");
this.game.load.audio("hurt", "../Media/Battle/hit.mp3");
this.BattleMenu = document.getElementById("battlemenu");
this.ActionsDiv = document.getElementById("actions");
this.SkillsDiv = document.getElementById("skillslist");
this.itemsDiv = document.getElementById("itemslist");
this.AttackBtn = document.getElementById("attackbtn");
this.ItemBtn = document.getElementById("itembtn");
this.SkipBtn = document.getElementById("skipbtn");
this.FleeBtn = document.getElementById("fleebtn");
this.enemies = new Array<Unit>();
this.scale.fullScreenScaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.scaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.minWidth = 320;
this.scale.minHeight = 260;
this.scale.maxWidth = 1920;
this.scale.maxHeight = 1080;
this.scale.pageAlignVertically = true;
this.scale.pageAlignHorizontally = true;
}
create() {
//speler niet laten bewegen
speler.setCanPlayerMove(false);
//scale verhogen
speler.scale.set(4);
this.CreateBackground();
this.CreateEventListeners();
this.GetEnemies();
this.ShowPlayer();
//Battlemenu zichtbaar maken
this.BattleMenu.classList.remove("hidden");
var BackToOpenWorldTest = this.input.keyboard.addKey(Phaser.Keyboard.A);
BackToOpenWorldTest.onDown.add(this.CheckForBackToOpenWorld, this);
//Speler mag pas aanvallen zodra een skill geselecteerd is.
this.canAttack = false;
}
update() {
this.enemies.forEach((monster) => {
monster.tint = 0xffffff;
});
if (this.canAttack) {
//For loop zodat we er makkelijk uit kunnen breken zodra er een gehovert is.
for (var sprite of this.enemies) {
if (sprite.input.pointerOver()) {
sprite.tint = 0xff3333;
break;
}
}
}
}
shutdown() {
this.game.stage.backgroundColor = '#000';
this.BattleMenu.classList.add("hidden");
this.HideSkills();
this.HideItems();
//speler mag weer bewegen
speler.setCanPlayerMove(true);
//scaling weer goedzetten van speler.
speler.scale.set(2);
this.world.remove(speler);
this.world.remove(speler.getPortrait());
for (var i = 0; i < this.enemies.length; i++) {
//Element van wereld en enemies array verwijderen.
this.enemies.splice(i, 1);
}
//Hele state opschonen
this.world.removeAll();
}
GetEnemies() {
$.ajax({
url: "../Game/GetRandomMonsters",
type: "POST",
data: {},
success: this.LoadEnemyAssets.bind(this)
});
}
LoadEnemyAssets(data) {
this.game.load.onLoadComplete.addOnce(this.CreateEnemies.bind(this, data), this);
for (var i = 0; i < data.length; i++) {
//Asset inladen
this.game.load.image("enemy" + data[i].id, "../images/exiles/" + data[i].image_url);
}
//Laden starten
this.game.load.start();
//Callback voor als alle images geladen zijn
}
CreateEnemies(data) {
//Variabelen die per loop veranderen
var nextPriority = speler.GetPriority()
var startX = 1000;
var yPosition = 1080 - 150;
for (var i = 0; i < data.length; i++) {
//Nextpriority met een omhoog zetten
nextPriority++;
//Monster creeren, monsters hebben 50hp elk
var monster = new Unit(this.game, nextPriority, 1, 50 ,startX, yPosition, "enemy" + data[i].id);
monster.anchor.set(1, 1);
//Indien het monster niet de goede kant op kijkt, de sprite flippen, tevens scaled dit de sprite omlaag naar 40% van de normale lengte/breedte.
monster.scale.setTo(-0.6, 0.6);
startX += 250;
//Javascript is een goede taal btw haHAA
var MinimumMultiplier = 5;
var RandomMultiplier = Math.floor(Math.random() * (speler.getLevel() - MinimumMultiplier + 1)) + MinimumMultiplier;
//Random stats meegeven
monster.setLevel(1 * RandomMultiplier);
monster.setIntelligence(1 * RandomMultiplier);
monster.setStrength(1 * RandomMultiplier);
this.enemies.push(monster);
this.game.add.existing(monster);
}
//Queue updaten
this.createQueue();
this.updateVisualQueue();
//Listeners creeren
this.CreateMonsterListeners();
}
CreateEventListeners() {
this.AttackBtn.addEventListener('click', this.ShowSkills.bind(this));
this.ItemBtn.addEventListener('click', this.ShowItems.bind(this));
this.SkillsDiv.addEventListener('click', this.ChooseSkill.bind(this));
this.itemsDiv.addEventListener('click', this.ChooseItem.bind(this));
this.SkipBtn.addEventListener('click', this.HandleSkip.bind(this));
this.FleeBtn.addEventListener('click', this.HandleFlee.bind(this));
}
CreateMonsterListeners() {
for (var monster of this.enemies) {
monster.events.onInputDown.add(this.HandlePlayerAttack, this, 0, {target: monster});
}
}
CheckForBackToOpenWorld() {
this.BackToOpenWorld();
}
ShowPlayer() {
var playerX = 600;
speler.x = playerX;
speler.y = 1080 - 300;
console.log(speler.y);
this.game.physics.enable(speler, Phaser.Physics.ARCADE);
this.game.add.existing(speler);
}
CreateBackground() {
this.game.stage.backgroundColor = "#50695A";
var bg = this.game.add.sprite(0, 0, 'bg');
//Height verminderen zodat de image niet onder het battle menu komt te vallen.
bg.height = this.game.height - 66;
bg.anchor.set(0);
bg.scale.set(1);
}
ShowSkills() {
this.ActionsDiv.classList.add("hidden");
this.SkillsDiv.classList.remove("hidden");
}
HideSkills() {
this.SkillsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ShowItems() {
this.ActionsDiv.classList.add("hidden");
this.itemsDiv.classList.remove("hidden");
}
HideItems() {
this.itemsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ChooseItem(e) {
var naam = e.srcElement.innerText;
var item: Item;
item = new Item();
item.SetName(naam);
speler.GetInventory().UseItem(item);
this.NextTurn();
}
ChooseSkill(e) {
this.playerDamage = e.srcElement.childNodes[1].innerText;
this.canAttack = true;
}
HandlePlayerAttack(target: Unit) {
if (this.canAttack) {
this.canAttack = false;
this.attackingUnit = this.queue.poll();
this.attackingUnit.attack();
this.game.sound.play("attack");
target.SetCurrentHealth(target.GetCurrentHealth() - this.playerDamage);
console.log("Target Health : " + target.GetCurrentHealth());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
}
HandleAiAttack(attacker: Unit, target: Unit) {
this.attackingUnit = attacker;
this.game.sound.play("attack");
//Random damage genereren voor AI. ai kunnen geen skills gebruiken.
var damage: number;
damage = this.game.rnd.integerInRange(0, 20);
target.SetCurrentHealth(target.GetCurrentHealth() - (attacker.getStrength() + damage));
console.log("Talk Shit, Get Hit' : Health = " + target.GetCurrentHealth().toString());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
CheckForDeath(target: Unit) {
if (target.getDead()) {
if (target.name == "Player") {
this.LostBattle();
} else {
//Monster deleten uit lijst
var index: number;
index = this.enemies.indexOf(target);
this.enemies.splice(index, 1);
//Prioriteit verversen van enemies
this.enemies.forEach((monster) => {
if (monster.GetPriority() > target.GetPriority()) {
monster.SetPriority(monster.GetPriority() - 1);
}
});
//Controleren of er nog enemies leven;
if (this.enemies.length <= 0) {
this.WonBattle();
} else {
this.NextTurn();
}
}
//Uit queue verwijderen voor aanvallen
this.queue.remove(target);
this.queueGroup.remove(target.getPortrait());
//Vernietigen.
target.getPortrait().destroy();
target.destroy();
} else {
| }
HandleSkip() {
this.attackingUnit = speler;
this.NextTurn();
}
HandleFlee() {
this.attackingUnit = speler;
var randomNumber: number;
randomNumber = this.game.rnd.integerInRange(0, 100);
if (randomNumber > 75) {
//Fleeing successvol, terug naar open wereld.
this.BackToOpenWorld();
} else {
//turn skippen
this.NextTurn();
}
}
NextTurn() {
//prioriteit verhogen
this.attackingUnit.SetPriority(this.attackingUnit.GetPriority() + this.queue.getSize());
//Divs verbergen
this.HideSkills();
this.HideItems();
this.game.time.events.add(1000 * ANIM_SPEED, () => {
//na een delay de queue updaten
this.updateQueue()
})
}
WonBattle() {
var charactername: string;
charactername = $("#inventory").find(".gameh1").text().split(" - ")[1];
$.ajax({
context: this,
url: "../Character/generateVictoryItems",
type: "POST",
data: { charactername: charactername },
success: function (data) {
if (data === true) {
//Nieuwe items gekregen, melding geven aan speler
console.log("ITEMS GEKREGEN WOO");
//Inventory refreshen
refreshInventory();
}
//Terug naar andere state
this.BackToOpenWorld();
}
});
}
LostBattle() {
this.game.state.start('MainMenu');
}
BackToOpenWorld() {
this.game.state.start('GameState');
}
createQueue() {
this.queue = new AttackTurnQueue();
//Portraits en healthbars creeren
speler.createPortrait(this.game);
speler.CreateHealthBar(this.game);
this.enemies.forEach((enemy) => enemy.createPortrait(this.game));
this.enemies.forEach((enemy) => enemy.CreateHealthBar(this.game));
//speler en enemies toevoegen aan queue
this.queue.add(speler);
this.enemies.forEach((enemy) => this.queue.add(enemy));
//Queue verversen met nieuwe waardes
this.queue.updateQueue();
//alle portraits in een queue zetten, zodat we ze gezamelijk kunnen verschuiven
this.queueGroup = this.add.group()
this.queueGroup.add(speler.getPortrait());
this.enemies.forEach((enemy) => this.queueGroup.add(enemy.getPortrait()))
//Boven in het scherm plaatsen
this.queueGroup.y = 50
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
}
updateQueue() {
//Prioriteit van monsters en spelers updaten.
this.enemies.forEach((monster) => {
monster.SetPriority(Phaser.Math.max(1, monster.GetPriority() - monster.getSpeed()));
})
speler.SetPriority(Phaser.Math.max(1, speler.GetPriority() - speler.getSpeed()));
//oude attacking unit weer toevoegen
this.queue.add(this.attackingUnit)
//queue updaten
this.queue.updateQueue()
//visuele queue updaten op scherm
this.updateVisualQueue()
//kijken of speler de volgende is
if (!this.queue.peek().name.includes('Player')) {
this.BattleMenu.classList.add("hidden");
this.HandleAiAttack(this.queue.poll(), speler);
} else {
//spelermag aanvallen
this.BattleMenu.classList.remove("hidden");
}
}
updateVisualQueue() {
//Queue width etc updaten
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
for (var i = 0; i < this.queue.getArray().length; i++) {
//unit opvragen en portret updaten
var unit = this.queue.getArray()[i].value;
var portrait = unit.getPortrait();
//margin toevoegen om ze uit elkaar te houden
var posx = i * (portrait.width + 5)
//animatie toevoegen voor aanpassing
if (portrait.alpha == 0) {
portrait.x = posx
this.game.add.tween(portrait).to({ alpha: 1 }, 500 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
} else {
this.game.add.tween(portrait).to({ x: posx }, 300 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
}
//Prioriteit nummer weergeven
portrait.text.setText(' ' + unit.Priority + ' ')
}
}
}
} | this.NextTurn();
}
| conditional_block |
BattleState.ts | /// <reference path="../lib/Phaser/phaser.d.ts"/>
/// <reference path="../lib/Phaser/phaser-tiled.d.ts"/>
module RpgGame {
declare function refreshInventory(): void;
export class BattleState extends Phaser.State {
//Turn based queue systeem
private queue: AttackTurnQueue;
private queueGroup: any;
//ENEMY
private enemies: Unit[];
private attackingUnit: Unit;
//Input handelen
private canAttack: boolean;
//Damage van gekozen skill van speler opslaan
private playerDamage: number;
//DOM ELEMENTEN
private ActionsDiv: HTMLElement;
private SkillsDiv: HTMLElement;
private itemsDiv: HTMLElement;
private BattleMenu: HTMLElement;
private AttackBtn: HTMLElement;
private ItemBtn: HTMLElement;
private SkipBtn: HTMLElement;
private FleeBtn: HTMLElement;
preload() {
this.game.load.image('bg', '../sprites/backgrounds/battle_background.png');
this.game.load.audio("attack", "../Media/Battle/attack.mp3");
this.game.load.audio("hurt", "../Media/Battle/hit.mp3");
this.BattleMenu = document.getElementById("battlemenu");
this.ActionsDiv = document.getElementById("actions");
this.SkillsDiv = document.getElementById("skillslist");
this.itemsDiv = document.getElementById("itemslist");
this.AttackBtn = document.getElementById("attackbtn");
this.ItemBtn = document.getElementById("itembtn");
this.SkipBtn = document.getElementById("skipbtn");
this.FleeBtn = document.getElementById("fleebtn");
this.enemies = new Array<Unit>();
this.scale.fullScreenScaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.scaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.minWidth = 320;
this.scale.minHeight = 260;
this.scale.maxWidth = 1920;
this.scale.maxHeight = 1080;
this.scale.pageAlignVertically = true;
this.scale.pageAlignHorizontally = true;
}
create() {
//speler niet laten bewegen
speler.setCanPlayerMove(false);
//scale verhogen
speler.scale.set(4);
this.CreateBackground();
this.CreateEventListeners();
this.GetEnemies();
this.ShowPlayer();
//Battlemenu zichtbaar maken
this.BattleMenu.classList.remove("hidden");
var BackToOpenWorldTest = this.input.keyboard.addKey(Phaser.Keyboard.A);
BackToOpenWorldTest.onDown.add(this.CheckForBackToOpenWorld, this);
//Speler mag pas aanvallen zodra een skill geselecteerd is.
this.canAttack = false;
}
update() {
this.enemies.forEach((monster) => {
monster.tint = 0xffffff;
});
if (this.canAttack) {
//For loop zodat we er makkelijk uit kunnen breken zodra er een gehovert is.
for (var sprite of this.enemies) {
if (sprite.input.pointerOver()) {
sprite.tint = 0xff3333;
break;
}
}
}
}
shutdown() {
this.game.stage.backgroundColor = '#000';
this.BattleMenu.classList.add("hidden");
this.HideSkills();
this.HideItems();
//speler mag weer bewegen
speler.setCanPlayerMove(true);
//scaling weer goedzetten van speler.
speler.scale.set(2);
this.world.remove(speler);
this.world.remove(speler.getPortrait());
for (var i = 0; i < this.enemies.length; i++) {
//Element van wereld en enemies array verwijderen.
this.enemies.splice(i, 1);
}
//Hele state opschonen
this.world.removeAll();
}
GetEnemies() {
$.ajax({
url: "../Game/GetRandomMonsters",
type: "POST",
data: {},
success: this.LoadEnemyAssets.bind(this)
});
}
LoadEnemyAssets(data) {
this.game.load.onLoadComplete.addOnce(this.CreateEnemies.bind(this, data), this);
for (var i = 0; i < data.length; i++) {
//Asset inladen
this.game.load.image("enemy" + data[i].id, "../images/exiles/" + data[i].image_url);
}
//Laden starten
this.game.load.start();
//Callback voor als alle images geladen zijn
}
CreateEnemies(data) {
//Variabelen die per loop veranderen
var nextPriority = speler.GetPriority()
var startX = 1000;
var yPosition = 1080 - 150;
for (var i = 0; i < data.length; i++) {
//Nextpriority met een omhoog zetten
nextPriority++;
//Monster creeren, monsters hebben 50hp elk
var monster = new Unit(this.game, nextPriority, 1, 50 ,startX, yPosition, "enemy" + data[i].id);
monster.anchor.set(1, 1);
//Indien het monster niet de goede kant op kijkt, de sprite flippen, tevens scaled dit de sprite omlaag naar 40% van de normale lengte/breedte.
monster.scale.setTo(-0.6, 0.6);
startX += 250;
//Javascript is een goede taal btw haHAA
var MinimumMultiplier = 5;
var RandomMultiplier = Math.floor(Math.random() * (speler.getLevel() - MinimumMultiplier + 1)) + MinimumMultiplier;
//Random stats meegeven
monster.setLevel(1 * RandomMultiplier);
monster.setIntelligence(1 * RandomMultiplier);
monster.setStrength(1 * RandomMultiplier);
this.enemies.push(monster);
this.game.add.existing(monster);
}
//Queue updaten
this.createQueue();
this.updateVisualQueue();
//Listeners creeren
this.CreateMonsterListeners();
}
CreateEventListeners() {
this.AttackBtn.addEventListener('click', this.ShowSkills.bind(this));
this.ItemBtn.addEventListener('click', this.ShowItems.bind(this));
this.SkillsDiv.addEventListener('click', this.ChooseSkill.bind(this));
this.itemsDiv.addEventListener('click', this.ChooseItem.bind(this));
this.SkipBtn.addEventListener('click', this.HandleSkip.bind(this));
this.FleeBtn.addEventListener('click', this.HandleFlee.bind(this));
}
CreateMonsterListeners() {
for (var monster of this.enemies) {
monster.events.onInputDown.add(this.HandlePlayerAttack, this, 0, {target: monster});
}
}
CheckForBackToOpenWorld() {
this.BackToOpenWorld();
}
ShowPlayer() {
var playerX = 600;
speler.x = playerX;
speler.y = 1080 - 300;
console.log(speler.y);
this.game.physics.enable(speler, Phaser.Physics.ARCADE);
this.game.add.existing(speler);
}
CreateBackground() {
this.game.stage.backgroundColor = "#50695A";
var bg = this.game.add.sprite(0, 0, 'bg');
//Height verminderen zodat de image niet onder het battle menu komt te vallen.
bg.height = this.game.height - 66;
bg.anchor.set(0);
bg.scale.set(1);
}
ShowSkills() {
this.ActionsDiv.classList.add("hidden");
this.SkillsDiv.classList.remove("hidden");
}
HideSkills() {
this.SkillsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ShowItems() {
this.ActionsDiv.classList.add("hidden");
this.itemsDiv.classList.remove("hidden");
}
HideItems() {
this.itemsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ChooseItem(e) {
var naam = e.srcElement.innerText;
var item: Item;
item = new Item();
item.SetName(naam);
speler.GetInventory().UseItem(item);
this.NextTurn();
}
ChooseSkill(e) {
this.playerDamage = e.srcElement.childNodes[1].innerText;
this.canAttack = true;
}
HandlePlayerAttack(target: Unit) {
| HandleAiAttack(attacker: Unit, target: Unit) {
this.attackingUnit = attacker;
this.game.sound.play("attack");
//Random damage genereren voor AI. ai kunnen geen skills gebruiken.
var damage: number;
damage = this.game.rnd.integerInRange(0, 20);
target.SetCurrentHealth(target.GetCurrentHealth() - (attacker.getStrength() + damage));
console.log("Talk Shit, Get Hit' : Health = " + target.GetCurrentHealth().toString());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
CheckForDeath(target: Unit) {
if (target.getDead()) {
if (target.name == "Player") {
this.LostBattle();
} else {
//Monster deleten uit lijst
var index: number;
index = this.enemies.indexOf(target);
this.enemies.splice(index, 1);
//Prioriteit verversen van enemies
this.enemies.forEach((monster) => {
if (monster.GetPriority() > target.GetPriority()) {
monster.SetPriority(monster.GetPriority() - 1);
}
});
//Controleren of er nog enemies leven;
if (this.enemies.length <= 0) {
this.WonBattle();
} else {
this.NextTurn();
}
}
//Uit queue verwijderen voor aanvallen
this.queue.remove(target);
this.queueGroup.remove(target.getPortrait());
//Vernietigen.
target.getPortrait().destroy();
target.destroy();
} else {
this.NextTurn();
}
}
HandleSkip() {
this.attackingUnit = speler;
this.NextTurn();
}
HandleFlee() {
this.attackingUnit = speler;
var randomNumber: number;
randomNumber = this.game.rnd.integerInRange(0, 100);
if (randomNumber > 75) {
//Fleeing successvol, terug naar open wereld.
this.BackToOpenWorld();
} else {
//turn skippen
this.NextTurn();
}
}
NextTurn() {
//prioriteit verhogen
this.attackingUnit.SetPriority(this.attackingUnit.GetPriority() + this.queue.getSize());
//Divs verbergen
this.HideSkills();
this.HideItems();
this.game.time.events.add(1000 * ANIM_SPEED, () => {
//na een delay de queue updaten
this.updateQueue()
})
}
WonBattle() {
var charactername: string;
charactername = $("#inventory").find(".gameh1").text().split(" - ")[1];
$.ajax({
context: this,
url: "../Character/generateVictoryItems",
type: "POST",
data: { charactername: charactername },
success: function (data) {
if (data === true) {
//Nieuwe items gekregen, melding geven aan speler
console.log("ITEMS GEKREGEN WOO");
//Inventory refreshen
refreshInventory();
}
//Terug naar andere state
this.BackToOpenWorld();
}
});
}
LostBattle() {
this.game.state.start('MainMenu');
}
BackToOpenWorld() {
this.game.state.start('GameState');
}
createQueue() {
this.queue = new AttackTurnQueue();
//Portraits en healthbars creeren
speler.createPortrait(this.game);
speler.CreateHealthBar(this.game);
this.enemies.forEach((enemy) => enemy.createPortrait(this.game));
this.enemies.forEach((enemy) => enemy.CreateHealthBar(this.game));
//speler en enemies toevoegen aan queue
this.queue.add(speler);
this.enemies.forEach((enemy) => this.queue.add(enemy));
//Queue verversen met nieuwe waardes
this.queue.updateQueue();
//alle portraits in een queue zetten, zodat we ze gezamelijk kunnen verschuiven
this.queueGroup = this.add.group()
this.queueGroup.add(speler.getPortrait());
this.enemies.forEach((enemy) => this.queueGroup.add(enemy.getPortrait()))
//Boven in het scherm plaatsen
this.queueGroup.y = 50
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
}
updateQueue() {
//Prioriteit van monsters en spelers updaten.
this.enemies.forEach((monster) => {
monster.SetPriority(Phaser.Math.max(1, monster.GetPriority() - monster.getSpeed()));
})
speler.SetPriority(Phaser.Math.max(1, speler.GetPriority() - speler.getSpeed()));
//oude attacking unit weer toevoegen
this.queue.add(this.attackingUnit)
//queue updaten
this.queue.updateQueue()
//visuele queue updaten op scherm
this.updateVisualQueue()
//kijken of speler de volgende is
if (!this.queue.peek().name.includes('Player')) {
this.BattleMenu.classList.add("hidden");
this.HandleAiAttack(this.queue.poll(), speler);
} else {
//spelermag aanvallen
this.BattleMenu.classList.remove("hidden");
}
}
updateVisualQueue() {
//Queue width etc updaten
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
for (var i = 0; i < this.queue.getArray().length; i++) {
//unit opvragen en portret updaten
var unit = this.queue.getArray()[i].value;
var portrait = unit.getPortrait();
//margin toevoegen om ze uit elkaar te houden
var posx = i * (portrait.width + 5)
//animatie toevoegen voor aanpassing
if (portrait.alpha == 0) {
portrait.x = posx
this.game.add.tween(portrait).to({ alpha: 1 }, 500 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
} else {
this.game.add.tween(portrait).to({ x: posx }, 300 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
}
//Prioriteit nummer weergeven
portrait.text.setText(' ' + unit.Priority + ' ')
}
}
}
} | if (this.canAttack) {
this.canAttack = false;
this.attackingUnit = this.queue.poll();
this.attackingUnit.attack();
this.game.sound.play("attack");
target.SetCurrentHealth(target.GetCurrentHealth() - this.playerDamage);
console.log("Target Health : " + target.GetCurrentHealth());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
}
| identifier_body |
BattleState.ts | /// <reference path="../lib/Phaser/phaser.d.ts"/>
/// <reference path="../lib/Phaser/phaser-tiled.d.ts"/>
module RpgGame {
declare function refreshInventory(): void;
export class BattleState extends Phaser.State {
//Turn based queue systeem
private queue: AttackTurnQueue;
private queueGroup: any;
//ENEMY
private enemies: Unit[];
private attackingUnit: Unit;
//Input handelen
private canAttack: boolean;
//Damage van gekozen skill van speler opslaan
private playerDamage: number;
//DOM ELEMENTEN
private ActionsDiv: HTMLElement;
private SkillsDiv: HTMLElement;
private itemsDiv: HTMLElement;
private BattleMenu: HTMLElement;
private AttackBtn: HTMLElement;
private ItemBtn: HTMLElement;
private SkipBtn: HTMLElement;
private FleeBtn: HTMLElement;
preload() {
this.game.load.image('bg', '../sprites/backgrounds/battle_background.png');
this.game.load.audio("attack", "../Media/Battle/attack.mp3");
this.game.load.audio("hurt", "../Media/Battle/hit.mp3");
this.BattleMenu = document.getElementById("battlemenu");
this.ActionsDiv = document.getElementById("actions");
this.SkillsDiv = document.getElementById("skillslist");
this.itemsDiv = document.getElementById("itemslist");
this.AttackBtn = document.getElementById("attackbtn");
this.ItemBtn = document.getElementById("itembtn");
this.SkipBtn = document.getElementById("skipbtn");
this.FleeBtn = document.getElementById("fleebtn");
this.enemies = new Array<Unit>();
this.scale.fullScreenScaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.scaleMode = Phaser.ScaleManager.SHOW_ALL;
this.scale.minWidth = 320;
this.scale.minHeight = 260;
this.scale.maxWidth = 1920;
this.scale.maxHeight = 1080;
this.scale.pageAlignVertically = true;
this.scale.pageAlignHorizontally = true;
}
create() {
//speler niet laten bewegen
speler.setCanPlayerMove(false);
//scale verhogen
speler.scale.set(4);
this.CreateBackground();
this.CreateEventListeners();
this.GetEnemies();
this.ShowPlayer();
//Battlemenu zichtbaar maken
this.BattleMenu.classList.remove("hidden");
var BackToOpenWorldTest = this.input.keyboard.addKey(Phaser.Keyboard.A);
BackToOpenWorldTest.onDown.add(this.CheckForBackToOpenWorld, this);
//Speler mag pas aanvallen zodra een skill geselecteerd is.
this.canAttack = false;
}
update() {
this.enemies.forEach((monster) => {
monster.tint = 0xffffff;
});
if (this.canAttack) {
//For loop zodat we er makkelijk uit kunnen breken zodra er een gehovert is.
for (var sprite of this.enemies) {
if (sprite.input.pointerOver()) {
sprite.tint = 0xff3333;
break;
}
}
}
}
shutdown() {
this.game.stage.backgroundColor = '#000';
this.BattleMenu.classList.add("hidden");
this.HideSkills();
this.HideItems();
//speler mag weer bewegen
speler.setCanPlayerMove(true);
//scaling weer goedzetten van speler.
speler.scale.set(2);
this.world.remove(speler);
this.world.remove(speler.getPortrait());
for (var i = 0; i < this.enemies.length; i++) {
//Element van wereld en enemies array verwijderen.
this.enemies.splice(i, 1);
}
//Hele state opschonen
this.world.removeAll();
}
GetEnemies() {
$.ajax({
url: "../Game/GetRandomMonsters",
type: "POST",
data: {},
success: this.LoadEnemyAssets.bind(this)
});
}
LoadEnemyAssets(data) {
this.game.load.onLoadComplete.addOnce(this.CreateEnemies.bind(this, data), this);
for (var i = 0; i < data.length; i++) {
//Asset inladen
this.game.load.image("enemy" + data[i].id, "../images/exiles/" + data[i].image_url);
}
//Laden starten
this.game.load.start();
//Callback voor als alle images geladen zijn
}
CreateEnemies(data) {
//Variabelen die per loop veranderen
var nextPriority = speler.GetPriority()
var startX = 1000;
var yPosition = 1080 - 150;
for (var i = 0; i < data.length; i++) {
//Nextpriority met een omhoog zetten
nextPriority++;
//Monster creeren, monsters hebben 50hp elk
var monster = new Unit(this.game, nextPriority, 1, 50 ,startX, yPosition, "enemy" + data[i].id);
monster.anchor.set(1, 1);
//Indien het monster niet de goede kant op kijkt, de sprite flippen, tevens scaled dit de sprite omlaag naar 40% van de normale lengte/breedte.
monster.scale.setTo(-0.6, 0.6);
startX += 250;
//Javascript is een goede taal btw haHAA
var MinimumMultiplier = 5;
var RandomMultiplier = Math.floor(Math.random() * (speler.getLevel() - MinimumMultiplier + 1)) + MinimumMultiplier;
//Random stats meegeven
monster.setLevel(1 * RandomMultiplier);
monster.setIntelligence(1 * RandomMultiplier);
monster.setStrength(1 * RandomMultiplier);
this.enemies.push(monster);
this.game.add.existing(monster);
}
//Queue updaten
this.createQueue();
this.updateVisualQueue();
//Listeners creeren
this.CreateMonsterListeners();
}
CreateEventListeners() {
this.AttackBtn.addEventListener('click', this.ShowSkills.bind(this));
this.ItemBtn.addEventListener('click', this.ShowItems.bind(this));
this.SkillsDiv.addEventListener('click', this.ChooseSkill.bind(this));
this.itemsDiv.addEventListener('click', this.ChooseItem.bind(this));
this.SkipBtn.addEventListener('click', this.HandleSkip.bind(this));
this.FleeBtn.addEventListener('click', this.HandleFlee.bind(this));
}
CreateMonsterListeners() {
for (var monster of this.enemies) {
monster.events.onInputDown.add(this.HandlePlayerAttack, this, 0, {target: monster});
}
}
CheckForBackToOpenWorld() {
this.BackToOpenWorld();
}
ShowPlayer() {
var playerX = 600;
speler.x = playerX;
speler.y = 1080 - 300;
console.log(speler.y);
this.game.physics.enable(speler, Phaser.Physics.ARCADE);
this.game.add.existing(speler);
}
CreateBackground() {
this.game.stage.backgroundColor = "#50695A";
var bg = this.game.add.sprite(0, 0, 'bg');
//Height verminderen zodat de image niet onder het battle menu komt te vallen.
bg.height = this.game.height - 66;
bg.anchor.set(0);
bg.scale.set(1);
}
ShowSkills() {
this.ActionsDiv.classList.add("hidden");
this.SkillsDiv.classList.remove("hidden");
}
HideSkills() {
this.SkillsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ShowItems() {
this.ActionsDiv.classList.add("hidden");
this.itemsDiv.classList.remove("hidden");
}
Hi | {
this.itemsDiv.classList.add("hidden");
this.ActionsDiv.classList.remove("hidden");
}
ChooseItem(e) {
var naam = e.srcElement.innerText;
var item: Item;
item = new Item();
item.SetName(naam);
speler.GetInventory().UseItem(item);
this.NextTurn();
}
ChooseSkill(e) {
this.playerDamage = e.srcElement.childNodes[1].innerText;
this.canAttack = true;
}
HandlePlayerAttack(target: Unit) {
if (this.canAttack) {
this.canAttack = false;
this.attackingUnit = this.queue.poll();
this.attackingUnit.attack();
this.game.sound.play("attack");
target.SetCurrentHealth(target.GetCurrentHealth() - this.playerDamage);
console.log("Target Health : " + target.GetCurrentHealth());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
}
HandleAiAttack(attacker: Unit, target: Unit) {
this.attackingUnit = attacker;
this.game.sound.play("attack");
//Random damage genereren voor AI. ai kunnen geen skills gebruiken.
var damage: number;
damage = this.game.rnd.integerInRange(0, 20);
target.SetCurrentHealth(target.GetCurrentHealth() - (attacker.getStrength() + damage));
console.log("Talk Shit, Get Hit' : Health = " + target.GetCurrentHealth().toString());
this.game.sound.play("hurt");
target.UpdateHealthBar();
this.CheckForDeath(target);
}
CheckForDeath(target: Unit) {
if (target.getDead()) {
if (target.name == "Player") {
this.LostBattle();
} else {
//Monster deleten uit lijst
var index: number;
index = this.enemies.indexOf(target);
this.enemies.splice(index, 1);
//Prioriteit verversen van enemies
this.enemies.forEach((monster) => {
if (monster.GetPriority() > target.GetPriority()) {
monster.SetPriority(monster.GetPriority() - 1);
}
});
//Controleren of er nog enemies leven;
if (this.enemies.length <= 0) {
this.WonBattle();
} else {
this.NextTurn();
}
}
//Uit queue verwijderen voor aanvallen
this.queue.remove(target);
this.queueGroup.remove(target.getPortrait());
//Vernietigen.
target.getPortrait().destroy();
target.destroy();
} else {
this.NextTurn();
}
}
HandleSkip() {
this.attackingUnit = speler;
this.NextTurn();
}
HandleFlee() {
this.attackingUnit = speler;
var randomNumber: number;
randomNumber = this.game.rnd.integerInRange(0, 100);
if (randomNumber > 75) {
//Fleeing successvol, terug naar open wereld.
this.BackToOpenWorld();
} else {
//turn skippen
this.NextTurn();
}
}
NextTurn() {
//prioriteit verhogen
this.attackingUnit.SetPriority(this.attackingUnit.GetPriority() + this.queue.getSize());
//Divs verbergen
this.HideSkills();
this.HideItems();
this.game.time.events.add(1000 * ANIM_SPEED, () => {
//na een delay de queue updaten
this.updateQueue()
})
}
WonBattle() {
var charactername: string;
charactername = $("#inventory").find(".gameh1").text().split(" - ")[1];
$.ajax({
context: this,
url: "../Character/generateVictoryItems",
type: "POST",
data: { charactername: charactername },
success: function (data) {
if (data === true) {
//Nieuwe items gekregen, melding geven aan speler
console.log("ITEMS GEKREGEN WOO");
//Inventory refreshen
refreshInventory();
}
//Terug naar andere state
this.BackToOpenWorld();
}
});
}
LostBattle() {
this.game.state.start('MainMenu');
}
BackToOpenWorld() {
this.game.state.start('GameState');
}
createQueue() {
this.queue = new AttackTurnQueue();
//Portraits en healthbars creeren
speler.createPortrait(this.game);
speler.CreateHealthBar(this.game);
this.enemies.forEach((enemy) => enemy.createPortrait(this.game));
this.enemies.forEach((enemy) => enemy.CreateHealthBar(this.game));
//speler en enemies toevoegen aan queue
this.queue.add(speler);
this.enemies.forEach((enemy) => this.queue.add(enemy));
//Queue verversen met nieuwe waardes
this.queue.updateQueue();
//alle portraits in een queue zetten, zodat we ze gezamelijk kunnen verschuiven
this.queueGroup = this.add.group()
this.queueGroup.add(speler.getPortrait());
this.enemies.forEach((enemy) => this.queueGroup.add(enemy.getPortrait()))
//Boven in het scherm plaatsen
this.queueGroup.y = 50
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
}
updateQueue() {
//Prioriteit van monsters en spelers updaten.
this.enemies.forEach((monster) => {
monster.SetPriority(Phaser.Math.max(1, monster.GetPriority() - monster.getSpeed()));
})
speler.SetPriority(Phaser.Math.max(1, speler.GetPriority() - speler.getSpeed()));
//oude attacking unit weer toevoegen
this.queue.add(this.attackingUnit)
//queue updaten
this.queue.updateQueue()
//visuele queue updaten op scherm
this.updateVisualQueue()
//kijken of speler de volgende is
if (!this.queue.peek().name.includes('Player')) {
this.BattleMenu.classList.add("hidden");
this.HandleAiAttack(this.queue.poll(), speler);
} else {
//spelermag aanvallen
this.BattleMenu.classList.remove("hidden");
}
}
updateVisualQueue() {
//Queue width etc updaten
var portraitWidth = this.queueGroup.children[0].width + 5
var totalWidth = portraitWidth * this.queueGroup.children.length
//centreren op het scherm op basis van de width
this.queueGroup.x = (this.game.camera.x + (this.game.width / 2)) - totalWidth / 2
for (var i = 0; i < this.queue.getArray().length; i++) {
//unit opvragen en portret updaten
var unit = this.queue.getArray()[i].value;
var portrait = unit.getPortrait();
//margin toevoegen om ze uit elkaar te houden
var posx = i * (portrait.width + 5)
//animatie toevoegen voor aanpassing
if (portrait.alpha == 0) {
portrait.x = posx
this.game.add.tween(portrait).to({ alpha: 1 }, 500 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
} else {
this.game.add.tween(portrait).to({ x: posx }, 300 * ANIM_SPEED, Phaser.Easing.Linear.None, true)
}
//Prioriteit nummer weergeven
portrait.text.setText(' ' + unit.Priority + ' ')
}
}
}
} | deItems() | identifier_name |
trans_norm.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import time
import itertools
from datetime import datetime
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
from utilities import deleteFiles, writeToFile
tf.app.flags.DEFINE_float("learning_rate", 0.00005, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("en_vocab_size", 43, "English vocabulary size.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 44, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "./data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./train", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_integer("patience", 20, "Patience")
tf.app.flags.DEFINE_boolean("reuse", True, "Reuse prepared data")
FLAGS = tf.app.flags.FLAGS
# Limit (hard) the amount of GPU memory to be used by a process during a
# session
config_all = tf.ConfigProto()
# config_all.gpu_options.per_process_gpu_memory_fraction=0.5
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
_buckets = [(10, 15), (20, 25), (40, 50)]
def set_vocab_size(vocab_path, lang='en'):
with open(vocab_path) as ifi:
vocab_size = len(ifi.readlines())
FLAGS.__setattr__(lang + '_vocab_size', vocab_size)
def read_data(filename_queue):
"""
This function reads the TFRecords file and returns a single sample tensor.
The returned tensor will generally be added to a queue.
"""
reader = tf.RecordReader()
_, serialized_sample = reader.read(filename_queue)
context_features, sequences = tf.parse_single_sequence_example(
serialized_sample,
sequence_features={
'inp_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
'out_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
}
)
# since the SequenceExample is stored in at least a 2D array, the first
# dimension being the length of the sequence and the second being the feature
# size for each item in the sequence, we need to flatten it. This is because
# in our case, the items are merely token ids.
inp_seq = tf.reshape(sequences['inp_seq'], [-1])
out_seq = tf.reshape(sequences['out_seq'], [-1])
return inp_seq, out_seq
def prepare_data_queues(datasource, set_type):
filenames = [getattr(datasource, set_type + '_path')]
filename_queue = tf.train.string_input_producer(filenames)
inp_seq, out_seq = read_data(filename_queue)
# next is to generate the batch using tf.train.shuffle_batch
inp_seq_batch, out_seq_batch = tf.train.shuffle_batch([inp_seq, out_seq],
FLAGS.batch_size,
dynamic_pad=True,
name="sequence_batch")
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
print(FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, use_lstm=True,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("{} : Reading model parameters from {}".format(
datetime.now().ctime(), ckpt.model_checkpoint_path))
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("{} : Created model with fresh parameters.".format(
datetime.now().ctime()))
session.run(tf.initialize_all_variables())
return model
def train():
print("Preparing data in %s" % FLAGS.data_dir)
# change the reuse parameter if you want to build the data again
en_train, fr_train, en_dev, fr_dev, en_test, fr_test, \
en_vocab_path, fr_vocab_path = \
data_utils.prepare_data(FLAGS.data_dir, reuse=FLAGS.reuse)
set_vocab_size(en_vocab_path, 'en')
set_vocab_size(fr_vocab_path, 'fr')
with tf.Session(config=config_all) as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
print('Bucket Sizes : {}'.format(train_bucket_sizes))
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
# Creating summaries for the parameters
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Creating a summary writer
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
summary_op = tf.merge_all_summaries()
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
history_ppxs = []
bad_counter = 0
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.expm1(loss) if loss < 300 else float('inf')
print ("%s : global step %d learning rate %.7f step-time %.2f "
"perplexity %.9f" % (datetime.now().ctime(),
model.global_step.eval(),
model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
bucket_ppx = []
for bucket_id in xrange(len(_buckets)):
dev_batches = [[u for u in k if u is not None] for k in
itertools.izip_longest(
*[dev_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)])]
for batch in dev_batches[:-1]:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
bucket_ppx.append(eval_ppx)
dev_ppx = np.mean(bucket_ppx)
print(" dev eval: perplexity %.5f" % (dev_ppx))
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, model.global_step.eval())
history_ppxs.append(dev_ppx)
if (len(history_ppxs) > FLAGS.patience and
dev_ppx >= np.array(history_ppxs)[:-FLAGS.patience].min()):
bad_counter += 1
# if bad_counter > FLAGS.patience:
# print("Patience reached")
# break
sys.stdout.flush()
def update_error_counts(in_seqs, out_seqs):
# During the test_eval, send in buckets
# During decoding, dissolve the buckets and send the entire input
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
error_str = '{}\nIn: {}\nNorm: {}\nOut: {}\n\n'
_, rev_en_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.en'))
_, rev_fr_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.fr'))
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
for in_seq, out_seq in zip(in_seqs, out_seqs):
# inp = ''.join([rev_en_vocab[i] for i in in_seq[0]]).replace('_', ' ')
inp = ''.join([rev_en_vocab[i] for i in in_seq[0]])
inp = inp.split('_S_')[1]
norm = ''.join([rev_fr_vocab[i] for i in in_seq[1][:-1]]).replace('_', ' ')
out = ''.join([rev_fr_vocab[i] for i in out_seq]).replace('_', ' ')
if inp == norm:
if out != norm:
stats['R2W'] += 1
writeToFile(test_out, error_str.format('R2W', inp, norm, out))
else:
if out == norm:
stats['W2R'] += 1
# writeToFile(test_out, error_str.format('W2R', inp, norm, out))
elif out == inp:
stats['W2W_NC'] += 1
writeToFile(test_out, error_str.format('W2W_NC', inp, norm, out))
else:
stats['W2W_C'] += 1
writeToFile(test_out, error_str.format('W2W_C', inp, norm, out))
return stats
def eval_test():
tf.reset_default_graph()
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
deleteFiles([test_out])
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
# change the reuse parameter if you want to build the data again
_, _, _, _, en_test, fr_test, _, _ = data_utils.prepare_data(FLAGS.data_dir,
reuse=FLAGS.reuse)
with tf.Session(config=config_all) as sess:
model = create_model(sess, True)
test_set = read_data(en_test, fr_test)
test_bucket_sizes = [len(test_set[b]) for b in range(len(_buckets))]
print('Bucket Sizes : {}'.format(test_bucket_sizes))
total_loss, num_batches = 0, 0
for bucket_id in range(len(_buckets)):
all_batches = ([u for u in k if u is not None] for k in
itertools.izip_longest(
*[test_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)]))
for batch in all_batches:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
# setting the model batch size in case it is smaller (would be for the
# last batch in the bucket)
model.batch_size = len(batch)
_, eval_loss, logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
outputs = np.argmax(logits, axis=2).transpose()
outseq = [out[:list(out).index(data_utils.EOS_ID)] for out in outputs
if data_utils.EOS_ID in out]
stat_updates = update_error_counts(batch, outseq)
stats = {k: stats[k] + v for k, v in stat_updates.items()}
total_loss += math.exp(eval_loss)
num_batches += 1
# resetting the madel batch size
model.batch_size = FLAGS.batch_size
print("Loss over the test set : {}".format(total_loss / num_batches))
print(stats)
precision = stats['W2R'] / sum([stats['W2R'], stats['R2W'],
stats['W2W_C']])
recall = stats['W2R'] / sum([stats['W2R'], stats['W2W_NC'],
stats['W2W_C']])
f_m = (2 * precision * recall) / (precision + recall)
print('P: {}\nR: {}\nF: {}'.format(precision, recall, f_m))
def decode(in_file, with_labels=True):
with tf.Session(config=config_all) as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
en_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.en")
fr_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.fr")
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([tf.compat.as_str(rev_fr_vocab[output]) for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def | (_):
if FLAGS.decode:
# decode()
eval_test()
else:
train()
# eval_test()
if __name__ == "__main__":
tf.app.run()
| main | identifier_name |
trans_norm.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import time
import itertools
from datetime import datetime
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
from utilities import deleteFiles, writeToFile
tf.app.flags.DEFINE_float("learning_rate", 0.00005, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("en_vocab_size", 43, "English vocabulary size.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 44, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "./data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./train", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_integer("patience", 20, "Patience")
tf.app.flags.DEFINE_boolean("reuse", True, "Reuse prepared data")
FLAGS = tf.app.flags.FLAGS
# Limit (hard) the amount of GPU memory to be used by a process during a
# session
config_all = tf.ConfigProto()
# config_all.gpu_options.per_process_gpu_memory_fraction=0.5
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
_buckets = [(10, 15), (20, 25), (40, 50)]
def set_vocab_size(vocab_path, lang='en'):
with open(vocab_path) as ifi:
vocab_size = len(ifi.readlines())
FLAGS.__setattr__(lang + '_vocab_size', vocab_size)
def read_data(filename_queue):
"""
This function reads the TFRecords file and returns a single sample tensor.
The returned tensor will generally be added to a queue.
"""
reader = tf.RecordReader()
_, serialized_sample = reader.read(filename_queue)
context_features, sequences = tf.parse_single_sequence_example(
serialized_sample,
sequence_features={
'inp_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
'out_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
}
)
# since the SequenceExample is stored in at least a 2D array, the first
# dimension being the length of the sequence and the second being the feature
# size for each item in the sequence, we need to flatten it. This is because
# in our case, the items are merely token ids.
inp_seq = tf.reshape(sequences['inp_seq'], [-1])
out_seq = tf.reshape(sequences['out_seq'], [-1])
return inp_seq, out_seq
def prepare_data_queues(datasource, set_type):
filenames = [getattr(datasource, set_type + '_path')]
filename_queue = tf.train.string_input_producer(filenames)
inp_seq, out_seq = read_data(filename_queue)
# next is to generate the batch using tf.train.shuffle_batch
inp_seq_batch, out_seq_batch = tf.train.shuffle_batch([inp_seq, out_seq],
FLAGS.batch_size,
dynamic_pad=True,
name="sequence_batch")
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
print(FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, use_lstm=True,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("{} : Reading model parameters from {}".format(
datetime.now().ctime(), ckpt.model_checkpoint_path))
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("{} : Created model with fresh parameters.".format(
datetime.now().ctime()))
session.run(tf.initialize_all_variables())
return model
def train():
print("Preparing data in %s" % FLAGS.data_dir)
# change the reuse parameter if you want to build the data again
en_train, fr_train, en_dev, fr_dev, en_test, fr_test, \
en_vocab_path, fr_vocab_path = \
data_utils.prepare_data(FLAGS.data_dir, reuse=FLAGS.reuse)
set_vocab_size(en_vocab_path, 'en')
set_vocab_size(fr_vocab_path, 'fr')
with tf.Session(config=config_all) as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
print('Bucket Sizes : {}'.format(train_bucket_sizes))
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
# Creating summaries for the parameters
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Creating a summary writer
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
summary_op = tf.merge_all_summaries()
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
history_ppxs = []
bad_counter = 0
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.expm1(loss) if loss < 300 else float('inf')
print ("%s : global step %d learning rate %.7f step-time %.2f "
"perplexity %.9f" % (datetime.now().ctime(),
model.global_step.eval(),
model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
bucket_ppx = []
for bucket_id in xrange(len(_buckets)):
dev_batches = [[u for u in k if u is not None] for k in
itertools.izip_longest(
*[dev_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)])]
for batch in dev_batches[:-1]:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
bucket_ppx.append(eval_ppx)
dev_ppx = np.mean(bucket_ppx)
print(" dev eval: perplexity %.5f" % (dev_ppx))
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, model.global_step.eval())
history_ppxs.append(dev_ppx)
if (len(history_ppxs) > FLAGS.patience and
dev_ppx >= np.array(history_ppxs)[:-FLAGS.patience].min()):
bad_counter += 1
# if bad_counter > FLAGS.patience:
# print("Patience reached")
# break
sys.stdout.flush()
def update_error_counts(in_seqs, out_seqs):
# During the test_eval, send in buckets
# During decoding, dissolve the buckets and send the entire input
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
error_str = '{}\nIn: {}\nNorm: {}\nOut: {}\n\n'
_, rev_en_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.en'))
_, rev_fr_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.fr'))
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
for in_seq, out_seq in zip(in_seqs, out_seqs):
# inp = ''.join([rev_en_vocab[i] for i in in_seq[0]]).replace('_', ' ')
inp = ''.join([rev_en_vocab[i] for i in in_seq[0]])
inp = inp.split('_S_')[1]
norm = ''.join([rev_fr_vocab[i] for i in in_seq[1][:-1]]).replace('_', ' ')
out = ''.join([rev_fr_vocab[i] for i in out_seq]).replace('_', ' ')
if inp == norm:
if out != norm:
stats['R2W'] += 1
writeToFile(test_out, error_str.format('R2W', inp, norm, out))
else:
if out == norm:
stats['W2R'] += 1
# writeToFile(test_out, error_str.format('W2R', inp, norm, out))
elif out == inp:
stats['W2W_NC'] += 1
writeToFile(test_out, error_str.format('W2W_NC', inp, norm, out))
else:
stats['W2W_C'] += 1
writeToFile(test_out, error_str.format('W2W_C', inp, norm, out))
return stats
def eval_test():
tf.reset_default_graph()
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
deleteFiles([test_out])
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
# change the reuse parameter if you want to build the data again
_, _, _, _, en_test, fr_test, _, _ = data_utils.prepare_data(FLAGS.data_dir,
reuse=FLAGS.reuse)
with tf.Session(config=config_all) as sess:
model = create_model(sess, True)
test_set = read_data(en_test, fr_test)
test_bucket_sizes = [len(test_set[b]) for b in range(len(_buckets))]
print('Bucket Sizes : {}'.format(test_bucket_sizes))
total_loss, num_batches = 0, 0
for bucket_id in range(len(_buckets)):
all_batches = ([u for u in k if u is not None] for k in
itertools.izip_longest(
*[test_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)]))
for batch in all_batches:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
# setting the model batch size in case it is smaller (would be for the
# last batch in the bucket)
model.batch_size = len(batch)
_, eval_loss, logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
outputs = np.argmax(logits, axis=2).transpose()
outseq = [out[:list(out).index(data_utils.EOS_ID)] for out in outputs
if data_utils.EOS_ID in out]
stat_updates = update_error_counts(batch, outseq)
stats = {k: stats[k] + v for k, v in stat_updates.items()}
total_loss += math.exp(eval_loss)
num_batches += 1
# resetting the madel batch size
model.batch_size = FLAGS.batch_size
print("Loss over the test set : {}".format(total_loss / num_batches))
print(stats)
precision = stats['W2R'] / sum([stats['W2R'], stats['R2W'],
stats['W2W_C']])
recall = stats['W2R'] / sum([stats['W2R'], stats['W2W_NC'],
stats['W2W_C']])
f_m = (2 * precision * recall) / (precision + recall)
print('P: {}\nR: {}\nF: {}'.format(precision, recall, f_m))
def decode(in_file, with_labels=True):
with tf.Session(config=config_all) as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
en_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.en")
fr_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.fr")
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([tf.compat.as_str(rev_fr_vocab[output]) for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def main(_):
if FLAGS.decode:
# decode()
eval_test()
else:
|
if __name__ == "__main__":
tf.app.run()
| train()
# eval_test() | conditional_block |
trans_norm.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import time
import itertools
from datetime import datetime
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
from utilities import deleteFiles, writeToFile
tf.app.flags.DEFINE_float("learning_rate", 0.00005, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("en_vocab_size", 43, "English vocabulary size.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 44, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "./data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./train", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_integer("patience", 20, "Patience")
tf.app.flags.DEFINE_boolean("reuse", True, "Reuse prepared data")
FLAGS = tf.app.flags.FLAGS
# Limit (hard) the amount of GPU memory to be used by a process during a
# session
config_all = tf.ConfigProto()
# config_all.gpu_options.per_process_gpu_memory_fraction=0.5
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
_buckets = [(10, 15), (20, 25), (40, 50)]
def set_vocab_size(vocab_path, lang='en'):
|
def read_data(filename_queue):
"""
This function reads the TFRecords file and returns a single sample tensor.
The returned tensor will generally be added to a queue.
"""
reader = tf.RecordReader()
_, serialized_sample = reader.read(filename_queue)
context_features, sequences = tf.parse_single_sequence_example(
serialized_sample,
sequence_features={
'inp_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
'out_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
}
)
# since the SequenceExample is stored in at least a 2D array, the first
# dimension being the length of the sequence and the second being the feature
# size for each item in the sequence, we need to flatten it. This is because
# in our case, the items are merely token ids.
inp_seq = tf.reshape(sequences['inp_seq'], [-1])
out_seq = tf.reshape(sequences['out_seq'], [-1])
return inp_seq, out_seq
def prepare_data_queues(datasource, set_type):
filenames = [getattr(datasource, set_type + '_path')]
filename_queue = tf.train.string_input_producer(filenames)
inp_seq, out_seq = read_data(filename_queue)
# next is to generate the batch using tf.train.shuffle_batch
inp_seq_batch, out_seq_batch = tf.train.shuffle_batch([inp_seq, out_seq],
FLAGS.batch_size,
dynamic_pad=True,
name="sequence_batch")
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
print(FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, use_lstm=True,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("{} : Reading model parameters from {}".format(
datetime.now().ctime(), ckpt.model_checkpoint_path))
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("{} : Created model with fresh parameters.".format(
datetime.now().ctime()))
session.run(tf.initialize_all_variables())
return model
def train():
print("Preparing data in %s" % FLAGS.data_dir)
# change the reuse parameter if you want to build the data again
en_train, fr_train, en_dev, fr_dev, en_test, fr_test, \
en_vocab_path, fr_vocab_path = \
data_utils.prepare_data(FLAGS.data_dir, reuse=FLAGS.reuse)
set_vocab_size(en_vocab_path, 'en')
set_vocab_size(fr_vocab_path, 'fr')
with tf.Session(config=config_all) as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
print('Bucket Sizes : {}'.format(train_bucket_sizes))
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
# Creating summaries for the parameters
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Creating a summary writer
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
summary_op = tf.merge_all_summaries()
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
history_ppxs = []
bad_counter = 0
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.expm1(loss) if loss < 300 else float('inf')
print ("%s : global step %d learning rate %.7f step-time %.2f "
"perplexity %.9f" % (datetime.now().ctime(),
model.global_step.eval(),
model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
bucket_ppx = []
for bucket_id in xrange(len(_buckets)):
dev_batches = [[u for u in k if u is not None] for k in
itertools.izip_longest(
*[dev_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)])]
for batch in dev_batches[:-1]:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
bucket_ppx.append(eval_ppx)
dev_ppx = np.mean(bucket_ppx)
print(" dev eval: perplexity %.5f" % (dev_ppx))
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, model.global_step.eval())
history_ppxs.append(dev_ppx)
if (len(history_ppxs) > FLAGS.patience and
dev_ppx >= np.array(history_ppxs)[:-FLAGS.patience].min()):
bad_counter += 1
# if bad_counter > FLAGS.patience:
# print("Patience reached")
# break
sys.stdout.flush()
def update_error_counts(in_seqs, out_seqs):
# During the test_eval, send in buckets
# During decoding, dissolve the buckets and send the entire input
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
error_str = '{}\nIn: {}\nNorm: {}\nOut: {}\n\n'
_, rev_en_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.en'))
_, rev_fr_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.fr'))
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
for in_seq, out_seq in zip(in_seqs, out_seqs):
# inp = ''.join([rev_en_vocab[i] for i in in_seq[0]]).replace('_', ' ')
inp = ''.join([rev_en_vocab[i] for i in in_seq[0]])
inp = inp.split('_S_')[1]
norm = ''.join([rev_fr_vocab[i] for i in in_seq[1][:-1]]).replace('_', ' ')
out = ''.join([rev_fr_vocab[i] for i in out_seq]).replace('_', ' ')
if inp == norm:
if out != norm:
stats['R2W'] += 1
writeToFile(test_out, error_str.format('R2W', inp, norm, out))
else:
if out == norm:
stats['W2R'] += 1
# writeToFile(test_out, error_str.format('W2R', inp, norm, out))
elif out == inp:
stats['W2W_NC'] += 1
writeToFile(test_out, error_str.format('W2W_NC', inp, norm, out))
else:
stats['W2W_C'] += 1
writeToFile(test_out, error_str.format('W2W_C', inp, norm, out))
return stats
def eval_test():
tf.reset_default_graph()
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
deleteFiles([test_out])
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
# change the reuse parameter if you want to build the data again
_, _, _, _, en_test, fr_test, _, _ = data_utils.prepare_data(FLAGS.data_dir,
reuse=FLAGS.reuse)
with tf.Session(config=config_all) as sess:
model = create_model(sess, True)
test_set = read_data(en_test, fr_test)
test_bucket_sizes = [len(test_set[b]) for b in range(len(_buckets))]
print('Bucket Sizes : {}'.format(test_bucket_sizes))
total_loss, num_batches = 0, 0
for bucket_id in range(len(_buckets)):
all_batches = ([u for u in k if u is not None] for k in
itertools.izip_longest(
*[test_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)]))
for batch in all_batches:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
# setting the model batch size in case it is smaller (would be for the
# last batch in the bucket)
model.batch_size = len(batch)
_, eval_loss, logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
outputs = np.argmax(logits, axis=2).transpose()
outseq = [out[:list(out).index(data_utils.EOS_ID)] for out in outputs
if data_utils.EOS_ID in out]
stat_updates = update_error_counts(batch, outseq)
stats = {k: stats[k] + v for k, v in stat_updates.items()}
total_loss += math.exp(eval_loss)
num_batches += 1
# resetting the madel batch size
model.batch_size = FLAGS.batch_size
print("Loss over the test set : {}".format(total_loss / num_batches))
print(stats)
precision = stats['W2R'] / sum([stats['W2R'], stats['R2W'],
stats['W2W_C']])
recall = stats['W2R'] / sum([stats['W2R'], stats['W2W_NC'],
stats['W2W_C']])
f_m = (2 * precision * recall) / (precision + recall)
print('P: {}\nR: {}\nF: {}'.format(precision, recall, f_m))
def decode(in_file, with_labels=True):
with tf.Session(config=config_all) as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
en_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.en")
fr_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.fr")
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([tf.compat.as_str(rev_fr_vocab[output]) for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def main(_):
if FLAGS.decode:
# decode()
eval_test()
else:
train()
# eval_test()
if __name__ == "__main__":
tf.app.run()
| with open(vocab_path) as ifi:
vocab_size = len(ifi.readlines())
FLAGS.__setattr__(lang + '_vocab_size', vocab_size) | identifier_body |
trans_norm.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import time
import itertools
from datetime import datetime
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
from utilities import deleteFiles, writeToFile
tf.app.flags.DEFINE_float("learning_rate", 0.00005, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("en_vocab_size", 43, "English vocabulary size.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 44, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "./data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./train", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_integer("patience", 20, "Patience")
tf.app.flags.DEFINE_boolean("reuse", True, "Reuse prepared data")
FLAGS = tf.app.flags.FLAGS
# Limit (hard) the amount of GPU memory to be used by a process during a
# session
config_all = tf.ConfigProto()
# config_all.gpu_options.per_process_gpu_memory_fraction=0.5
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
_buckets = [(10, 15), (20, 25), (40, 50)]
def set_vocab_size(vocab_path, lang='en'):
with open(vocab_path) as ifi:
vocab_size = len(ifi.readlines())
FLAGS.__setattr__(lang + '_vocab_size', vocab_size)
def read_data(filename_queue):
"""
This function reads the TFRecords file and returns a single sample tensor.
The returned tensor will generally be added to a queue.
"""
reader = tf.RecordReader()
_, serialized_sample = reader.read(filename_queue)
context_features, sequences = tf.parse_single_sequence_example(
serialized_sample,
sequence_features={
'inp_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
'out_seq': tf.FixedLenSequenceFeature((1,), tf.int64,
allow_missing=False),
}
)
# since the SequenceExample is stored in at least a 2D array, the first
# dimension being the length of the sequence and the second being the feature
# size for each item in the sequence, we need to flatten it. This is because
# in our case, the items are merely token ids.
inp_seq = tf.reshape(sequences['inp_seq'], [-1])
out_seq = tf.reshape(sequences['out_seq'], [-1])
return inp_seq, out_seq
def prepare_data_queues(datasource, set_type):
filenames = [getattr(datasource, set_type + '_path')]
filename_queue = tf.train.string_input_producer(filenames)
inp_seq, out_seq = read_data(filename_queue)
# next is to generate the batch using tf.train.shuffle_batch
inp_seq_batch, out_seq_batch = tf.train.shuffle_batch([inp_seq, out_seq],
FLAGS.batch_size,
dynamic_pad=True,
name="sequence_batch")
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
print(FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, use_lstm=True,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("{} : Reading model parameters from {}".format(
datetime.now().ctime(), ckpt.model_checkpoint_path))
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("{} : Created model with fresh parameters.".format(
datetime.now().ctime()))
session.run(tf.initialize_all_variables())
return model
def train():
print("Preparing data in %s" % FLAGS.data_dir)
# change the reuse parameter if you want to build the data again
en_train, fr_train, en_dev, fr_dev, en_test, fr_test, \
en_vocab_path, fr_vocab_path = \
data_utils.prepare_data(FLAGS.data_dir, reuse=FLAGS.reuse)
set_vocab_size(en_vocab_path, 'en')
set_vocab_size(fr_vocab_path, 'fr')
with tf.Session(config=config_all) as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
print('Bucket Sizes : {}'.format(train_bucket_sizes))
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
# Creating summaries for the parameters
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Creating a summary writer
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
summary_op = tf.merge_all_summaries()
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
history_ppxs = []
bad_counter = 0
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.expm1(loss) if loss < 300 else float('inf')
print ("%s : global step %d learning rate %.7f step-time %.2f "
"perplexity %.9f" % (datetime.now().ctime(),
model.global_step.eval(),
model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
bucket_ppx = []
for bucket_id in xrange(len(_buckets)):
dev_batches = [[u for u in k if u is not None] for k in
itertools.izip_longest(
*[dev_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)])]
for batch in dev_batches[:-1]:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
bucket_ppx.append(eval_ppx)
dev_ppx = np.mean(bucket_ppx)
print(" dev eval: perplexity %.5f" % (dev_ppx))
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, model.global_step.eval())
history_ppxs.append(dev_ppx)
if (len(history_ppxs) > FLAGS.patience and
dev_ppx >= np.array(history_ppxs)[:-FLAGS.patience].min()):
bad_counter += 1
# if bad_counter > FLAGS.patience:
# print("Patience reached")
# break
sys.stdout.flush()
def update_error_counts(in_seqs, out_seqs):
# During the test_eval, send in buckets
# During decoding, dissolve the buckets and send the entire input
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
error_str = '{}\nIn: {}\nNorm: {}\nOut: {}\n\n'
_, rev_en_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.en'))
_, rev_fr_vocab = data_utils.initialize_vocabulary(os.path.join(
FLAGS.data_dir, 'vocab.fr'))
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
for in_seq, out_seq in zip(in_seqs, out_seqs):
# inp = ''.join([rev_en_vocab[i] for i in in_seq[0]]).replace('_', ' ')
inp = ''.join([rev_en_vocab[i] for i in in_seq[0]])
inp = inp.split('_S_')[1]
norm = ''.join([rev_fr_vocab[i] for i in in_seq[1][:-1]]).replace('_', ' ')
out = ''.join([rev_fr_vocab[i] for i in out_seq]).replace('_', ' ')
if inp == norm:
if out != norm:
stats['R2W'] += 1
writeToFile(test_out, error_str.format('R2W', inp, norm, out))
else:
if out == norm:
stats['W2R'] += 1
# writeToFile(test_out, error_str.format('W2R', inp, norm, out))
elif out == inp:
stats['W2W_NC'] += 1
writeToFile(test_out, error_str.format('W2W_NC', inp, norm, out))
else:
stats['W2W_C'] += 1
writeToFile(test_out, error_str.format('W2W_C', inp, norm, out))
return stats
def eval_test():
tf.reset_default_graph()
test_out = os.path.join(FLAGS.data_dir, 'test_errors.out')
deleteFiles([test_out])
stats = {'R2W': 0, 'W2R': 0, 'W2W_C': 0, 'W2W_NC': 0}
# change the reuse parameter if you want to build the data again
_, _, _, _, en_test, fr_test, _, _ = data_utils.prepare_data(FLAGS.data_dir,
reuse=FLAGS.reuse)
with tf.Session(config=config_all) as sess:
model = create_model(sess, True)
test_set = read_data(en_test, fr_test)
test_bucket_sizes = [len(test_set[b]) for b in range(len(_buckets))]
print('Bucket Sizes : {}'.format(test_bucket_sizes))
total_loss, num_batches = 0, 0
for bucket_id in range(len(_buckets)):
all_batches = ([u for u in k if u is not None] for k in
itertools.izip_longest( | *[test_set[bucket_id][i::FLAGS.batch_size]
for i in range(FLAGS.batch_size)]))
for batch in all_batches:
encoder_inputs, decoder_inputs, target_weights = model.prepare_batch(
batch, bucket_id)
# setting the model batch size in case it is smaller (would be for the
# last batch in the bucket)
model.batch_size = len(batch)
_, eval_loss, logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
outputs = np.argmax(logits, axis=2).transpose()
outseq = [out[:list(out).index(data_utils.EOS_ID)] for out in outputs
if data_utils.EOS_ID in out]
stat_updates = update_error_counts(batch, outseq)
stats = {k: stats[k] + v for k, v in stat_updates.items()}
total_loss += math.exp(eval_loss)
num_batches += 1
# resetting the madel batch size
model.batch_size = FLAGS.batch_size
print("Loss over the test set : {}".format(total_loss / num_batches))
print(stats)
precision = stats['W2R'] / sum([stats['W2R'], stats['R2W'],
stats['W2W_C']])
recall = stats['W2R'] / sum([stats['W2R'], stats['W2W_NC'],
stats['W2W_C']])
f_m = (2 * precision * recall) / (precision + recall)
print('P: {}\nR: {}\nF: {}'.format(precision, recall, f_m))
def decode(in_file, with_labels=True):
with tf.Session(config=config_all) as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
en_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.en")
fr_vocab_path = os.path.join(FLAGS.data_dir,
"vocab.fr")
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([tf.compat.as_str(rev_fr_vocab[output]) for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def main(_):
if FLAGS.decode:
# decode()
eval_test()
else:
train()
# eval_test()
if __name__ == "__main__":
tf.app.run() | random_line_split | |
program.go | package main
import (
"bufio"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"github.com/PuerkitoBio/goquery"
"gopkg.in/yaml.v2"
"github.com/Loyalsoldier/cn-blocked-domain/crawler"
"github.com/Loyalsoldier/cn-blocked-domain/utils"
)
var (
ErrConfigFormatNotSupported = errors.New("config format not supported")
ErrConfigIsEmpty = errors.New("config is empty")
ErrCrawlConfigIsEmpty = errors.New("crawl config is empty")
ErrFilterConfigIsEmpty = errors.New("filter config is empty")
ErrCustomizeConfigIsEmpty = errors.New("Customize config is empty")
ErrInvalidPageNumber = errors.New("invalid page number")
)
type URL struct {
BaseURL string `yaml:"base_url,omitempty" json:"base_url,omitempty"`
InitSuffixURL string `yaml:"init_suffix_url,omitempty" json:"init_suffix_url,omitempty"`
SuffixURL string `yaml:"suffix_url,omitempty" json:"suffix_url,omitempty"`
}
type Type struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
TypeURL string `yaml:"type_url,omitempty" json:"type_url,omitempty"`
Referer string `yaml:"referer,omitempty" json:"referer,omitempty"`
IsCrawl bool `yaml:"is_crawl,omitempty" json:"is_crawl,omitempty"`
From int `yaml:"from,omitempty" json:"from,omitempty"`
To int `yaml:"to,omitempty" json:"to,omitempty"`
}
type Elem struct {
Container string `yaml:"container,omitempty" json:"container,omitempty"`
Content string `yaml:"content,omitempty" json:"content,omitempty"`
Condition string `yaml:"condition,omitempty" json:"condition,omitempty"`
Attr string `yaml:"attr,omitempty" json:"attr,omitempty"`
Splitter string `yaml:"splitter,omitempty" json:"splitter,omitempty"`
}
type Crawl struct {
*URL
Types []*Type `yaml:"types,omitempty" json:"types,omitempty"`
InitElement *Elem `yaml:"init_element,omitempty" json:"init_element,omitempty"`
CrawlElement *Elem `yaml:"crawl_element,omitempty" json:"crawl_element,omitempty"`
}
type FilterType struct {
Domain string `yaml:"domain,omitempty" json:"domain,omitempty"`
IP string `yaml:"ip,omitempty" json:"ip,omitempty"`
}
type Filter struct {
Regexp *FilterType `yaml:"regexp,omitempty" json:"regexp,omitempty"`
Percent int `yaml:"percent,omitempty" json:"percent,omitempty"`
}
type Customize struct {
CPUCores int `yaml:"cpu_cores,omitempty" json:"cpu_cores,omitempty"`
MaxCapacity int `yaml:"max_capacity,omitempty" json:"max_capacity,omitempty"`
OutputDir string `yaml:"output_dir,omitempty" json:"output_dir,omitempty"`
RawFilename string `yaml:"raw_filename,omitempty" json:"raw_filename,omitempty"`
DomainFilename string `yaml:"domain_filename,omitempty" json:"domain_filename,omitempty"`
IPFilename string `yaml:"ip_filename,omitempty" json:"ip_filename,omitempty"`
}
// RawConfig defines configuration from config files
type RawConfig struct {
*Crawl
*Filter
*Customize
}
func (r *RawConfig) ParseRawConfig(configFile string) error {
switch {
case strings.HasSuffix(configFile, ".yaml"), strings.HasSuffix(configFile, ".yml"):
configBytes, err := os.ReadFile(configFile)
if err != nil {
return err
}
if err := yaml.Unmarshal(configBytes, &r); err != nil {
return err
}
case strings.HasSuffix(configFile, ".json"):
configBytes, err := json.Marshal(configFile)
if err != nil {
return err
}
if err := json.Unmarshal(configBytes, &r); err != nil {
return err
}
default:
return ErrConfigFormatNotSupported
}
return nil
}
// GreatFireURL defines the structure of the format of URL
type GreatFireURL struct {
BaseURL string
TypeURL string
SuffixURL string
InitSuffixURL string
}
// CrawlType defines the structure of AlexaTop1000 type of URLs and list
type CrawlType struct {
*GreatFireURL
Name string
IsCrawl bool
MaxPage int
From, To int
InitElement *Elem
CrawlElement *Elem
CrawlReferer string
CrawlList []string
}
// Config defines the real configuration used in the program
type Config struct {
*Filter
*Customize
Types []*CrawlType
}
// GenerateConfig generates raw config to config that can be used in the program
func (c *Config) GenerateConfig(r *RawConfig) error {
if r != nil {
if r.Filter != nil {
c.Filter = r.Filter
} else {
return ErrFilterConfigIsEmpty
}
if r.Customize != nil {
c.Customize = r.Customize
} else {
return ErrCustomizeConfigIsEmpty
}
if r.Crawl != nil && r.Crawl.Types != nil {
c.Types = make([]*CrawlType, len(r.Crawl.Types))
for i := 0; i < len(r.Crawl.Types); i++ {
rawType := r.Crawl.Types[i]
c.Types[i] = &CrawlType{
GreatFireURL: &GreatFireURL{
BaseURL: r.Crawl.URL.BaseURL,
TypeURL: rawType.TypeURL,
SuffixURL: r.Crawl.URL.SuffixURL,
InitSuffixURL: r.Crawl.URL.InitSuffixURL,
},
Name: rawType.Name,
IsCrawl: rawType.IsCrawl,
From: rawType.From,
To: rawType.To,
InitElement: r.Crawl.InitElement,
CrawlElement: r.Crawl.CrawlElement,
CrawlReferer: rawType.Referer,
}
}
return nil
} else {
return ErrCrawlConfigIsEmpty
}
}
return ErrConfigIsEmpty
}
// SetNumCPU sets the maximum number of Goroutines
func (c *Config) SetNumCPU() error {
if c.Customize != nil {
setNum := c.Customize.CPUCores
originalNumCPU := runtime.NumCPU()
log.Println("Original CPU cores:", originalNumCPU)
if setNum > originalNumCPU {
runtime.GOMAXPROCS(setNum)
log.Println("Now CPU cores:", setNum)
return nil
}
switch {
case originalNumCPU == 1:
originalNumCPU = 3
case originalNumCPU == 2:
originalNumCPU *= 3
case originalNumCPU == 3:
originalNumCPU *= 2
case originalNumCPU == 4:
originalNumCPU = 10
default:
originalNumCPU += int(0.5 * float64(originalNumCPU))
}
runtime.GOMAXPROCS(originalNumCPU)
c.Customize.CPUCores = originalNumCPU
log.Println("Now CPU cores:", originalNumCPU)
return nil
} else {
return ErrCustomizeConfigIsEmpty
}
}
// CrawlMaxPage gets the max page of crawl type
func (c *Config) CrawlMaxPage() chan error |
// GenerateCrawlList generates lists for each crawl type to be crawled latter
func (c *Config) GenerateCrawlList() error {
for idx, crawlType := range c.Types {
if !crawlType.IsCrawl {
continue
}
maxpage := crawlType.MaxPage
from := crawlType.From
to := crawlType.To
if to < 0 {
to = maxpage
}
if from < 0 || from > maxpage || to > maxpage || from > to {
return ErrInvalidPageNumber
}
log.Printf("Type %s will be crawled from page %d to %d", crawlType.Name, from, to)
list := make([]string, 0, maxpage)
for i := from; i <= to; i++ {
url := crawlType.BaseURL + crawlType.TypeURL + crawlType.SuffixURL + strconv.Itoa(i)
list = append(list, url)
}
c.Types[idx].CrawlList = list
}
return nil
}
// Crawl gets HTML content for crawl types
func (c *Config) Crawl(rawResultChan chan map[*string]int) {
var wg sync.WaitGroup
workerPool := make(chan struct{}, c.Customize.CPUCores)
for _, crawlType := range c.Types {
for _, url := range crawlType.CrawlList {
workerPool <- struct{}{}
wg.Add(1)
go func(url string, crawlType *CrawlType) {
defer func() {
if err := recover(); err != nil {
log.Printf("Goroutine panic: fetching %v : %v\n", url, err)
}
}()
container := crawlType.CrawlElement.Container
content := crawlType.CrawlElement.Content
attr := crawlType.CrawlElement.Attr
condition := crawlType.CrawlElement.Condition
log.Println("Crawling:", url)
resp, err := crawler.Crawl(url, crawlType.CrawlReferer)
utils.Must(err)
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
utils.Must(err)
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
utils.Must(err)
// Find items
doc.Find(container).Each(func(i int, s *goquery.Selection) {
percent := 0
// For each item found, get contents
rawDomain, _ := s.Find(content).Attr(attr)
if blockedPercentage := strings.TrimSpace(s.Find(condition).Text()); blockedPercentage != "" {
percent, _ = strconv.Atoi(blockedPercentage[:len(blockedPercentage)-1])
}
rawResult := make(map[*string]int)
rawResult[&rawDomain] = percent
rawResultChan <- rawResult
})
wg.Done()
<-workerPool
}(url, crawlType)
}
}
wg.Wait()
close(rawResultChan)
}
// FilterAndWrite filters HTML conent and write results to files
func (c *Config) FilterAndWrite(rawResultChan chan map[*string]int) {
defer func() {
if err := recover(); err != nil {
log.Printf("Runtime panic: %v\n", err)
}
}()
// Make output dir
utils.Must(os.MkdirAll(filepath.Join("./", c.Customize.OutputDir), 0755))
rawDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.RawFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer rawDomainFile.Close()
finalDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.DomainFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalDomainFile.Close()
finalIPfile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.IPFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalIPfile.Close()
resultMap := make(map[string]struct{})
domainReg := regexp.MustCompile(c.Filter.Regexp.Domain)
rawReader := bufio.NewWriter(rawDomainFile)
for result := range rawResultChan {
for url, percent := range result {
url := strings.ToLower(*url)
// Write raw results to raw.txt file
rawReader.WriteString(fmt.Sprintf("%s | %d\n", url, percent))
if percent >= c.Filter.Percent {
matchList := domainReg.FindStringSubmatch(url)
if len(matchList) > 0 {
domain := matchList[len(matchList)-2]
// Write filtered results to console
fmt.Printf("%s | %d\n", domain, percent)
// Write filtered results to map to make them unique
resultMap[domain] = struct{}{}
}
}
}
}
rawReader.Flush()
resultSlice := make([]string, 0, len(resultMap))
ipSlice := make([]string, 0, len(resultMap))
ipReg := regexp.MustCompile(c.Filter.Regexp.IP)
for domainOrIP := range resultMap {
ipElem := ipReg.FindStringSubmatch(domainOrIP)
if len(ipElem) > 0 {
ipSlice = append(ipSlice, ipElem[0])
continue
}
resultSlice = append(resultSlice, domainOrIP)
}
// Unique and sort domain slice
sort.SliceStable(resultSlice, func(i, j int) bool {
return len(strings.Split(resultSlice[i], ".")) < len(strings.Split(resultSlice[j], "."))
})
resultSlice = buildTreeAndUnique(resultSlice)
sort.Strings(resultSlice)
// Write filtered result to domains.txt file
domainReader := bufio.NewWriter(finalDomainFile)
for _, domain := range resultSlice {
domainReader.WriteString(fmt.Sprintf("%s\n", domain))
}
domainReader.Flush()
// Sort IP slice
sort.Strings(ipSlice)
// Write IP results to ip.txt file
ipReader := bufio.NewWriter(finalIPfile)
for _, ip := range ipSlice {
ipReader.WriteString(fmt.Sprintf("%s\n", ip))
}
ipReader.Flush()
}
| {
var wg sync.WaitGroup
wg.Add(len(c.Types))
e := make(chan error, len(c.Types))
for idx, crawlType := range c.Types {
go func(idx int, crawlType *CrawlType) {
crawlInitURL := crawlType.BaseURL + crawlType.TypeURL + crawlType.InitSuffixURL
crawlName := crawlType.Name
crawlContent := crawlType.InitElement.Content
switch crawlType.IsCrawl {
case false:
log.Printf("Type %s has been disabled to crawl.\n", crawlName)
default:
resp, err := crawler.Crawl(crawlInitURL, crawlType.CrawlReferer)
if err != nil {
e <- err
return
}
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
if err != nil {
e <- err
return
}
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
if err != nil {
e <- err
return
}
// Find items
doc.Find(crawlType.InitElement.Container).Each(func(i int, s *goquery.Selection) {
// For each item found, get contents
if lastPageHref, exists := s.Find(crawlContent).Attr(crawlType.InitElement.Attr); !exists {
log.Printf("Cannot find HTML element `%s`\n", crawlContent)
} else {
matchedSlice := strings.Split(lastPageHref, crawlType.InitElement.Splitter)
if len(matchedSlice) == 2 {
maxPageString := matchedSlice[1]
if maxpage, err := strconv.Atoi(maxPageString); err != nil {
log.Printf("Failed to get max page of type %s.\n", crawlName)
} else {
c.Types[idx].MaxPage = maxpage
log.Printf("Type %s has pages: %d\n", crawlName, maxpage+1)
}
}
}
})
}
wg.Done()
}(idx, crawlType)
}
wg.Wait()
defer close(e)
return e
} | identifier_body |
program.go | package main
import (
"bufio"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"github.com/PuerkitoBio/goquery"
"gopkg.in/yaml.v2"
"github.com/Loyalsoldier/cn-blocked-domain/crawler"
"github.com/Loyalsoldier/cn-blocked-domain/utils"
)
var (
ErrConfigFormatNotSupported = errors.New("config format not supported")
ErrConfigIsEmpty = errors.New("config is empty")
ErrCrawlConfigIsEmpty = errors.New("crawl config is empty")
ErrFilterConfigIsEmpty = errors.New("filter config is empty")
ErrCustomizeConfigIsEmpty = errors.New("Customize config is empty")
ErrInvalidPageNumber = errors.New("invalid page number")
)
type URL struct {
BaseURL string `yaml:"base_url,omitempty" json:"base_url,omitempty"`
InitSuffixURL string `yaml:"init_suffix_url,omitempty" json:"init_suffix_url,omitempty"`
SuffixURL string `yaml:"suffix_url,omitempty" json:"suffix_url,omitempty"`
}
type Type struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
TypeURL string `yaml:"type_url,omitempty" json:"type_url,omitempty"`
Referer string `yaml:"referer,omitempty" json:"referer,omitempty"`
IsCrawl bool `yaml:"is_crawl,omitempty" json:"is_crawl,omitempty"`
From int `yaml:"from,omitempty" json:"from,omitempty"`
To int `yaml:"to,omitempty" json:"to,omitempty"`
}
type Elem struct {
Container string `yaml:"container,omitempty" json:"container,omitempty"`
Content string `yaml:"content,omitempty" json:"content,omitempty"`
Condition string `yaml:"condition,omitempty" json:"condition,omitempty"`
Attr string `yaml:"attr,omitempty" json:"attr,omitempty"`
Splitter string `yaml:"splitter,omitempty" json:"splitter,omitempty"`
}
type Crawl struct {
*URL
Types []*Type `yaml:"types,omitempty" json:"types,omitempty"`
InitElement *Elem `yaml:"init_element,omitempty" json:"init_element,omitempty"`
CrawlElement *Elem `yaml:"crawl_element,omitempty" json:"crawl_element,omitempty"`
}
type FilterType struct {
Domain string `yaml:"domain,omitempty" json:"domain,omitempty"`
IP string `yaml:"ip,omitempty" json:"ip,omitempty"`
}
type Filter struct {
Regexp *FilterType `yaml:"regexp,omitempty" json:"regexp,omitempty"`
Percent int `yaml:"percent,omitempty" json:"percent,omitempty"`
}
type Customize struct {
CPUCores int `yaml:"cpu_cores,omitempty" json:"cpu_cores,omitempty"`
MaxCapacity int `yaml:"max_capacity,omitempty" json:"max_capacity,omitempty"`
OutputDir string `yaml:"output_dir,omitempty" json:"output_dir,omitempty"`
RawFilename string `yaml:"raw_filename,omitempty" json:"raw_filename,omitempty"`
DomainFilename string `yaml:"domain_filename,omitempty" json:"domain_filename,omitempty"`
IPFilename string `yaml:"ip_filename,omitempty" json:"ip_filename,omitempty"`
}
// RawConfig defines configuration from config files
type RawConfig struct {
*Crawl
*Filter
*Customize
}
func (r *RawConfig) ParseRawConfig(configFile string) error {
switch {
case strings.HasSuffix(configFile, ".yaml"), strings.HasSuffix(configFile, ".yml"):
configBytes, err := os.ReadFile(configFile)
if err != nil {
return err
}
if err := yaml.Unmarshal(configBytes, &r); err != nil {
return err
}
case strings.HasSuffix(configFile, ".json"):
configBytes, err := json.Marshal(configFile)
if err != nil {
return err
}
if err := json.Unmarshal(configBytes, &r); err != nil {
return err
}
default:
return ErrConfigFormatNotSupported
}
return nil
}
// GreatFireURL defines the structure of the format of URL
type GreatFireURL struct {
BaseURL string
TypeURL string
SuffixURL string
InitSuffixURL string
}
// CrawlType defines the structure of AlexaTop1000 type of URLs and list
type CrawlType struct {
*GreatFireURL
Name string
IsCrawl bool
MaxPage int
From, To int
InitElement *Elem
CrawlElement *Elem
CrawlReferer string
CrawlList []string
}
// Config defines the real configuration used in the program
type Config struct {
*Filter
*Customize
Types []*CrawlType
}
// GenerateConfig generates raw config to config that can be used in the program
func (c *Config) GenerateConfig(r *RawConfig) error {
if r != nil {
if r.Filter != nil {
c.Filter = r.Filter
} else {
return ErrFilterConfigIsEmpty
}
if r.Customize != nil {
c.Customize = r.Customize
} else {
return ErrCustomizeConfigIsEmpty
}
if r.Crawl != nil && r.Crawl.Types != nil {
c.Types = make([]*CrawlType, len(r.Crawl.Types))
for i := 0; i < len(r.Crawl.Types); i++ {
rawType := r.Crawl.Types[i]
c.Types[i] = &CrawlType{
GreatFireURL: &GreatFireURL{
BaseURL: r.Crawl.URL.BaseURL,
TypeURL: rawType.TypeURL,
SuffixURL: r.Crawl.URL.SuffixURL,
InitSuffixURL: r.Crawl.URL.InitSuffixURL,
},
Name: rawType.Name,
IsCrawl: rawType.IsCrawl,
From: rawType.From,
To: rawType.To,
InitElement: r.Crawl.InitElement,
CrawlElement: r.Crawl.CrawlElement,
CrawlReferer: rawType.Referer,
}
}
return nil
} else {
return ErrCrawlConfigIsEmpty
}
}
return ErrConfigIsEmpty
}
// SetNumCPU sets the maximum number of Goroutines
func (c *Config) SetNumCPU() error {
if c.Customize != nil {
setNum := c.Customize.CPUCores
originalNumCPU := runtime.NumCPU()
log.Println("Original CPU cores:", originalNumCPU)
if setNum > originalNumCPU {
runtime.GOMAXPROCS(setNum)
log.Println("Now CPU cores:", setNum)
return nil
}
switch {
case originalNumCPU == 1:
originalNumCPU = 3
case originalNumCPU == 2:
originalNumCPU *= 3
case originalNumCPU == 3:
originalNumCPU *= 2
case originalNumCPU == 4:
originalNumCPU = 10
default:
originalNumCPU += int(0.5 * float64(originalNumCPU))
}
runtime.GOMAXPROCS(originalNumCPU)
c.Customize.CPUCores = originalNumCPU
log.Println("Now CPU cores:", originalNumCPU)
return nil
} else {
return ErrCustomizeConfigIsEmpty
}
}
// CrawlMaxPage gets the max page of crawl type
func (c *Config) CrawlMaxPage() chan error {
var wg sync.WaitGroup
wg.Add(len(c.Types))
e := make(chan error, len(c.Types))
for idx, crawlType := range c.Types {
go func(idx int, crawlType *CrawlType) {
crawlInitURL := crawlType.BaseURL + crawlType.TypeURL + crawlType.InitSuffixURL
crawlName := crawlType.Name
crawlContent := crawlType.InitElement.Content
switch crawlType.IsCrawl {
case false:
log.Printf("Type %s has been disabled to crawl.\n", crawlName)
default:
resp, err := crawler.Crawl(crawlInitURL, crawlType.CrawlReferer)
if err != nil {
e <- err
return
}
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
if err != nil {
e <- err
return
}
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
if err != nil {
e <- err
return
}
// Find items
doc.Find(crawlType.InitElement.Container).Each(func(i int, s *goquery.Selection) {
// For each item found, get contents
if lastPageHref, exists := s.Find(crawlContent).Attr(crawlType.InitElement.Attr); !exists {
log.Printf("Cannot find HTML element `%s`\n", crawlContent)
} else {
matchedSlice := strings.Split(lastPageHref, crawlType.InitElement.Splitter)
if len(matchedSlice) == 2 {
maxPageString := matchedSlice[1]
if maxpage, err := strconv.Atoi(maxPageString); err != nil {
log.Printf("Failed to get max page of type %s.\n", crawlName)
} else {
c.Types[idx].MaxPage = maxpage
log.Printf("Type %s has pages: %d\n", crawlName, maxpage+1)
}
}
}
})
}
wg.Done()
}(idx, crawlType)
}
wg.Wait()
defer close(e)
return e
}
// GenerateCrawlList generates lists for each crawl type to be crawled latter
func (c *Config) GenerateCrawlList() error {
for idx, crawlType := range c.Types {
if !crawlType.IsCrawl {
continue
}
maxpage := crawlType.MaxPage
from := crawlType.From
to := crawlType.To
if to < 0 {
to = maxpage
}
if from < 0 || from > maxpage || to > maxpage || from > to {
return ErrInvalidPageNumber
}
log.Printf("Type %s will be crawled from page %d to %d", crawlType.Name, from, to)
list := make([]string, 0, maxpage)
for i := from; i <= to; i++ {
url := crawlType.BaseURL + crawlType.TypeURL + crawlType.SuffixURL + strconv.Itoa(i)
list = append(list, url)
}
c.Types[idx].CrawlList = list
}
return nil
}
// Crawl gets HTML content for crawl types
func (c *Config) Crawl(rawResultChan chan map[*string]int) {
var wg sync.WaitGroup
workerPool := make(chan struct{}, c.Customize.CPUCores)
for _, crawlType := range c.Types {
for _, url := range crawlType.CrawlList {
workerPool <- struct{}{}
wg.Add(1)
go func(url string, crawlType *CrawlType) {
defer func() {
if err := recover(); err != nil {
log.Printf("Goroutine panic: fetching %v : %v\n", url, err)
}
}()
container := crawlType.CrawlElement.Container
content := crawlType.CrawlElement.Content
attr := crawlType.CrawlElement.Attr
condition := crawlType.CrawlElement.Condition
log.Println("Crawling:", url)
resp, err := crawler.Crawl(url, crawlType.CrawlReferer)
utils.Must(err)
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
utils.Must(err)
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
utils.Must(err)
// Find items
doc.Find(container).Each(func(i int, s *goquery.Selection) {
percent := 0
// For each item found, get contents
rawDomain, _ := s.Find(content).Attr(attr)
if blockedPercentage := strings.TrimSpace(s.Find(condition).Text()); blockedPercentage != "" {
percent, _ = strconv.Atoi(blockedPercentage[:len(blockedPercentage)-1])
}
rawResult := make(map[*string]int)
rawResult[&rawDomain] = percent
rawResultChan <- rawResult
})
wg.Done()
<-workerPool
}(url, crawlType)
}
}
wg.Wait()
close(rawResultChan)
}
// FilterAndWrite filters HTML conent and write results to files
func (c *Config) FilterAndWrite(rawResultChan chan map[*string]int) {
defer func() {
if err := recover(); err != nil {
log.Printf("Runtime panic: %v\n", err)
}
}()
// Make output dir
utils.Must(os.MkdirAll(filepath.Join("./", c.Customize.OutputDir), 0755))
rawDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.RawFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer rawDomainFile.Close()
finalDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.DomainFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalDomainFile.Close()
finalIPfile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.IPFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalIPfile.Close()
resultMap := make(map[string]struct{})
domainReg := regexp.MustCompile(c.Filter.Regexp.Domain)
rawReader := bufio.NewWriter(rawDomainFile)
for result := range rawResultChan {
for url, percent := range result {
url := strings.ToLower(*url)
// Write raw results to raw.txt file
rawReader.WriteString(fmt.Sprintf("%s | %d\n", url, percent))
if percent >= c.Filter.Percent {
matchList := domainReg.FindStringSubmatch(url)
if len(matchList) > 0 {
domain := matchList[len(matchList)-2]
// Write filtered results to console
fmt.Printf("%s | %d\n", domain, percent)
// Write filtered results to map to make them unique
resultMap[domain] = struct{}{}
}
}
}
}
rawReader.Flush()
| for domainOrIP := range resultMap {
ipElem := ipReg.FindStringSubmatch(domainOrIP)
if len(ipElem) > 0 {
ipSlice = append(ipSlice, ipElem[0])
continue
}
resultSlice = append(resultSlice, domainOrIP)
}
// Unique and sort domain slice
sort.SliceStable(resultSlice, func(i, j int) bool {
return len(strings.Split(resultSlice[i], ".")) < len(strings.Split(resultSlice[j], "."))
})
resultSlice = buildTreeAndUnique(resultSlice)
sort.Strings(resultSlice)
// Write filtered result to domains.txt file
domainReader := bufio.NewWriter(finalDomainFile)
for _, domain := range resultSlice {
domainReader.WriteString(fmt.Sprintf("%s\n", domain))
}
domainReader.Flush()
// Sort IP slice
sort.Strings(ipSlice)
// Write IP results to ip.txt file
ipReader := bufio.NewWriter(finalIPfile)
for _, ip := range ipSlice {
ipReader.WriteString(fmt.Sprintf("%s\n", ip))
}
ipReader.Flush()
} | resultSlice := make([]string, 0, len(resultMap))
ipSlice := make([]string, 0, len(resultMap))
ipReg := regexp.MustCompile(c.Filter.Regexp.IP) | random_line_split |
program.go | package main
import (
"bufio"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"github.com/PuerkitoBio/goquery"
"gopkg.in/yaml.v2"
"github.com/Loyalsoldier/cn-blocked-domain/crawler"
"github.com/Loyalsoldier/cn-blocked-domain/utils"
)
var (
ErrConfigFormatNotSupported = errors.New("config format not supported")
ErrConfigIsEmpty = errors.New("config is empty")
ErrCrawlConfigIsEmpty = errors.New("crawl config is empty")
ErrFilterConfigIsEmpty = errors.New("filter config is empty")
ErrCustomizeConfigIsEmpty = errors.New("Customize config is empty")
ErrInvalidPageNumber = errors.New("invalid page number")
)
type URL struct {
BaseURL string `yaml:"base_url,omitempty" json:"base_url,omitempty"`
InitSuffixURL string `yaml:"init_suffix_url,omitempty" json:"init_suffix_url,omitempty"`
SuffixURL string `yaml:"suffix_url,omitempty" json:"suffix_url,omitempty"`
}
type Type struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
TypeURL string `yaml:"type_url,omitempty" json:"type_url,omitempty"`
Referer string `yaml:"referer,omitempty" json:"referer,omitempty"`
IsCrawl bool `yaml:"is_crawl,omitempty" json:"is_crawl,omitempty"`
From int `yaml:"from,omitempty" json:"from,omitempty"`
To int `yaml:"to,omitempty" json:"to,omitempty"`
}
type Elem struct {
Container string `yaml:"container,omitempty" json:"container,omitempty"`
Content string `yaml:"content,omitempty" json:"content,omitempty"`
Condition string `yaml:"condition,omitempty" json:"condition,omitempty"`
Attr string `yaml:"attr,omitempty" json:"attr,omitempty"`
Splitter string `yaml:"splitter,omitempty" json:"splitter,omitempty"`
}
type Crawl struct {
*URL
Types []*Type `yaml:"types,omitempty" json:"types,omitempty"`
InitElement *Elem `yaml:"init_element,omitempty" json:"init_element,omitempty"`
CrawlElement *Elem `yaml:"crawl_element,omitempty" json:"crawl_element,omitempty"`
}
type FilterType struct {
Domain string `yaml:"domain,omitempty" json:"domain,omitempty"`
IP string `yaml:"ip,omitempty" json:"ip,omitempty"`
}
type Filter struct {
Regexp *FilterType `yaml:"regexp,omitempty" json:"regexp,omitempty"`
Percent int `yaml:"percent,omitempty" json:"percent,omitempty"`
}
type Customize struct {
CPUCores int `yaml:"cpu_cores,omitempty" json:"cpu_cores,omitempty"`
MaxCapacity int `yaml:"max_capacity,omitempty" json:"max_capacity,omitempty"`
OutputDir string `yaml:"output_dir,omitempty" json:"output_dir,omitempty"`
RawFilename string `yaml:"raw_filename,omitempty" json:"raw_filename,omitempty"`
DomainFilename string `yaml:"domain_filename,omitempty" json:"domain_filename,omitempty"`
IPFilename string `yaml:"ip_filename,omitempty" json:"ip_filename,omitempty"`
}
// RawConfig defines configuration from config files
type RawConfig struct {
*Crawl
*Filter
*Customize
}
func (r *RawConfig) ParseRawConfig(configFile string) error {
switch {
case strings.HasSuffix(configFile, ".yaml"), strings.HasSuffix(configFile, ".yml"):
configBytes, err := os.ReadFile(configFile)
if err != nil {
return err
}
if err := yaml.Unmarshal(configBytes, &r); err != nil {
return err
}
case strings.HasSuffix(configFile, ".json"):
configBytes, err := json.Marshal(configFile)
if err != nil {
return err
}
if err := json.Unmarshal(configBytes, &r); err != nil {
return err
}
default:
return ErrConfigFormatNotSupported
}
return nil
}
// GreatFireURL defines the structure of the format of URL
type GreatFireURL struct {
BaseURL string
TypeURL string
SuffixURL string
InitSuffixURL string
}
// CrawlType defines the structure of AlexaTop1000 type of URLs and list
type CrawlType struct {
*GreatFireURL
Name string
IsCrawl bool
MaxPage int
From, To int
InitElement *Elem
CrawlElement *Elem
CrawlReferer string
CrawlList []string
}
// Config defines the real configuration used in the program
type Config struct {
*Filter
*Customize
Types []*CrawlType
}
// GenerateConfig generates raw config to config that can be used in the program
func (c *Config) GenerateConfig(r *RawConfig) error {
if r != nil {
if r.Filter != nil {
c.Filter = r.Filter
} else {
return ErrFilterConfigIsEmpty
}
if r.Customize != nil {
c.Customize = r.Customize
} else {
return ErrCustomizeConfigIsEmpty
}
if r.Crawl != nil && r.Crawl.Types != nil {
c.Types = make([]*CrawlType, len(r.Crawl.Types))
for i := 0; i < len(r.Crawl.Types); i++ {
rawType := r.Crawl.Types[i]
c.Types[i] = &CrawlType{
GreatFireURL: &GreatFireURL{
BaseURL: r.Crawl.URL.BaseURL,
TypeURL: rawType.TypeURL,
SuffixURL: r.Crawl.URL.SuffixURL,
InitSuffixURL: r.Crawl.URL.InitSuffixURL,
},
Name: rawType.Name,
IsCrawl: rawType.IsCrawl,
From: rawType.From,
To: rawType.To,
InitElement: r.Crawl.InitElement,
CrawlElement: r.Crawl.CrawlElement,
CrawlReferer: rawType.Referer,
}
}
return nil
} else {
return ErrCrawlConfigIsEmpty
}
}
return ErrConfigIsEmpty
}
// SetNumCPU sets the maximum number of Goroutines
func (c *Config) SetNumCPU() error {
if c.Customize != nil {
setNum := c.Customize.CPUCores
originalNumCPU := runtime.NumCPU()
log.Println("Original CPU cores:", originalNumCPU)
if setNum > originalNumCPU {
runtime.GOMAXPROCS(setNum)
log.Println("Now CPU cores:", setNum)
return nil
}
switch {
case originalNumCPU == 1:
originalNumCPU = 3
case originalNumCPU == 2:
originalNumCPU *= 3
case originalNumCPU == 3:
originalNumCPU *= 2
case originalNumCPU == 4:
originalNumCPU = 10
default:
originalNumCPU += int(0.5 * float64(originalNumCPU))
}
runtime.GOMAXPROCS(originalNumCPU)
c.Customize.CPUCores = originalNumCPU
log.Println("Now CPU cores:", originalNumCPU)
return nil
} else {
return ErrCustomizeConfigIsEmpty
}
}
// CrawlMaxPage gets the max page of crawl type
func (c *Config) CrawlMaxPage() chan error {
var wg sync.WaitGroup
wg.Add(len(c.Types))
e := make(chan error, len(c.Types))
for idx, crawlType := range c.Types {
go func(idx int, crawlType *CrawlType) {
crawlInitURL := crawlType.BaseURL + crawlType.TypeURL + crawlType.InitSuffixURL
crawlName := crawlType.Name
crawlContent := crawlType.InitElement.Content
switch crawlType.IsCrawl {
case false:
log.Printf("Type %s has been disabled to crawl.\n", crawlName)
default:
resp, err := crawler.Crawl(crawlInitURL, crawlType.CrawlReferer)
if err != nil {
e <- err
return
}
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
if err != nil {
e <- err
return
}
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
if err != nil {
e <- err
return
}
// Find items
doc.Find(crawlType.InitElement.Container).Each(func(i int, s *goquery.Selection) {
// For each item found, get contents
if lastPageHref, exists := s.Find(crawlContent).Attr(crawlType.InitElement.Attr); !exists {
log.Printf("Cannot find HTML element `%s`\n", crawlContent)
} else {
matchedSlice := strings.Split(lastPageHref, crawlType.InitElement.Splitter)
if len(matchedSlice) == 2 {
maxPageString := matchedSlice[1]
if maxpage, err := strconv.Atoi(maxPageString); err != nil {
log.Printf("Failed to get max page of type %s.\n", crawlName)
} else {
c.Types[idx].MaxPage = maxpage
log.Printf("Type %s has pages: %d\n", crawlName, maxpage+1)
}
}
}
})
}
wg.Done()
}(idx, crawlType)
}
wg.Wait()
defer close(e)
return e
}
// GenerateCrawlList generates lists for each crawl type to be crawled latter
func (c *Config) GenerateCrawlList() error {
for idx, crawlType := range c.Types {
if !crawlType.IsCrawl {
continue
}
maxpage := crawlType.MaxPage
from := crawlType.From
to := crawlType.To
if to < 0 {
to = maxpage
}
if from < 0 || from > maxpage || to > maxpage || from > to {
return ErrInvalidPageNumber
}
log.Printf("Type %s will be crawled from page %d to %d", crawlType.Name, from, to)
list := make([]string, 0, maxpage)
for i := from; i <= to; i++ {
url := crawlType.BaseURL + crawlType.TypeURL + crawlType.SuffixURL + strconv.Itoa(i)
list = append(list, url)
}
c.Types[idx].CrawlList = list
}
return nil
}
// Crawl gets HTML content for crawl types
func (c *Config) | (rawResultChan chan map[*string]int) {
var wg sync.WaitGroup
workerPool := make(chan struct{}, c.Customize.CPUCores)
for _, crawlType := range c.Types {
for _, url := range crawlType.CrawlList {
workerPool <- struct{}{}
wg.Add(1)
go func(url string, crawlType *CrawlType) {
defer func() {
if err := recover(); err != nil {
log.Printf("Goroutine panic: fetching %v : %v\n", url, err)
}
}()
container := crawlType.CrawlElement.Container
content := crawlType.CrawlElement.Content
attr := crawlType.CrawlElement.Attr
condition := crawlType.CrawlElement.Condition
log.Println("Crawling:", url)
resp, err := crawler.Crawl(url, crawlType.CrawlReferer)
utils.Must(err)
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
utils.Must(err)
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
utils.Must(err)
// Find items
doc.Find(container).Each(func(i int, s *goquery.Selection) {
percent := 0
// For each item found, get contents
rawDomain, _ := s.Find(content).Attr(attr)
if blockedPercentage := strings.TrimSpace(s.Find(condition).Text()); blockedPercentage != "" {
percent, _ = strconv.Atoi(blockedPercentage[:len(blockedPercentage)-1])
}
rawResult := make(map[*string]int)
rawResult[&rawDomain] = percent
rawResultChan <- rawResult
})
wg.Done()
<-workerPool
}(url, crawlType)
}
}
wg.Wait()
close(rawResultChan)
}
// FilterAndWrite filters HTML conent and write results to files
func (c *Config) FilterAndWrite(rawResultChan chan map[*string]int) {
defer func() {
if err := recover(); err != nil {
log.Printf("Runtime panic: %v\n", err)
}
}()
// Make output dir
utils.Must(os.MkdirAll(filepath.Join("./", c.Customize.OutputDir), 0755))
rawDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.RawFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer rawDomainFile.Close()
finalDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.DomainFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalDomainFile.Close()
finalIPfile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.IPFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalIPfile.Close()
resultMap := make(map[string]struct{})
domainReg := regexp.MustCompile(c.Filter.Regexp.Domain)
rawReader := bufio.NewWriter(rawDomainFile)
for result := range rawResultChan {
for url, percent := range result {
url := strings.ToLower(*url)
// Write raw results to raw.txt file
rawReader.WriteString(fmt.Sprintf("%s | %d\n", url, percent))
if percent >= c.Filter.Percent {
matchList := domainReg.FindStringSubmatch(url)
if len(matchList) > 0 {
domain := matchList[len(matchList)-2]
// Write filtered results to console
fmt.Printf("%s | %d\n", domain, percent)
// Write filtered results to map to make them unique
resultMap[domain] = struct{}{}
}
}
}
}
rawReader.Flush()
resultSlice := make([]string, 0, len(resultMap))
ipSlice := make([]string, 0, len(resultMap))
ipReg := regexp.MustCompile(c.Filter.Regexp.IP)
for domainOrIP := range resultMap {
ipElem := ipReg.FindStringSubmatch(domainOrIP)
if len(ipElem) > 0 {
ipSlice = append(ipSlice, ipElem[0])
continue
}
resultSlice = append(resultSlice, domainOrIP)
}
// Unique and sort domain slice
sort.SliceStable(resultSlice, func(i, j int) bool {
return len(strings.Split(resultSlice[i], ".")) < len(strings.Split(resultSlice[j], "."))
})
resultSlice = buildTreeAndUnique(resultSlice)
sort.Strings(resultSlice)
// Write filtered result to domains.txt file
domainReader := bufio.NewWriter(finalDomainFile)
for _, domain := range resultSlice {
domainReader.WriteString(fmt.Sprintf("%s\n", domain))
}
domainReader.Flush()
// Sort IP slice
sort.Strings(ipSlice)
// Write IP results to ip.txt file
ipReader := bufio.NewWriter(finalIPfile)
for _, ip := range ipSlice {
ipReader.WriteString(fmt.Sprintf("%s\n", ip))
}
ipReader.Flush()
}
| Crawl | identifier_name |
program.go | package main
import (
"bufio"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"github.com/PuerkitoBio/goquery"
"gopkg.in/yaml.v2"
"github.com/Loyalsoldier/cn-blocked-domain/crawler"
"github.com/Loyalsoldier/cn-blocked-domain/utils"
)
var (
ErrConfigFormatNotSupported = errors.New("config format not supported")
ErrConfigIsEmpty = errors.New("config is empty")
ErrCrawlConfigIsEmpty = errors.New("crawl config is empty")
ErrFilterConfigIsEmpty = errors.New("filter config is empty")
ErrCustomizeConfigIsEmpty = errors.New("Customize config is empty")
ErrInvalidPageNumber = errors.New("invalid page number")
)
type URL struct {
BaseURL string `yaml:"base_url,omitempty" json:"base_url,omitempty"`
InitSuffixURL string `yaml:"init_suffix_url,omitempty" json:"init_suffix_url,omitempty"`
SuffixURL string `yaml:"suffix_url,omitempty" json:"suffix_url,omitempty"`
}
type Type struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
TypeURL string `yaml:"type_url,omitempty" json:"type_url,omitempty"`
Referer string `yaml:"referer,omitempty" json:"referer,omitempty"`
IsCrawl bool `yaml:"is_crawl,omitempty" json:"is_crawl,omitempty"`
From int `yaml:"from,omitempty" json:"from,omitempty"`
To int `yaml:"to,omitempty" json:"to,omitempty"`
}
type Elem struct {
Container string `yaml:"container,omitempty" json:"container,omitempty"`
Content string `yaml:"content,omitempty" json:"content,omitempty"`
Condition string `yaml:"condition,omitempty" json:"condition,omitempty"`
Attr string `yaml:"attr,omitempty" json:"attr,omitempty"`
Splitter string `yaml:"splitter,omitempty" json:"splitter,omitempty"`
}
type Crawl struct {
*URL
Types []*Type `yaml:"types,omitempty" json:"types,omitempty"`
InitElement *Elem `yaml:"init_element,omitempty" json:"init_element,omitempty"`
CrawlElement *Elem `yaml:"crawl_element,omitempty" json:"crawl_element,omitempty"`
}
type FilterType struct {
Domain string `yaml:"domain,omitempty" json:"domain,omitempty"`
IP string `yaml:"ip,omitempty" json:"ip,omitempty"`
}
type Filter struct {
Regexp *FilterType `yaml:"regexp,omitempty" json:"regexp,omitempty"`
Percent int `yaml:"percent,omitempty" json:"percent,omitempty"`
}
type Customize struct {
CPUCores int `yaml:"cpu_cores,omitempty" json:"cpu_cores,omitempty"`
MaxCapacity int `yaml:"max_capacity,omitempty" json:"max_capacity,omitempty"`
OutputDir string `yaml:"output_dir,omitempty" json:"output_dir,omitempty"`
RawFilename string `yaml:"raw_filename,omitempty" json:"raw_filename,omitempty"`
DomainFilename string `yaml:"domain_filename,omitempty" json:"domain_filename,omitempty"`
IPFilename string `yaml:"ip_filename,omitempty" json:"ip_filename,omitempty"`
}
// RawConfig defines configuration from config files
type RawConfig struct {
*Crawl
*Filter
*Customize
}
func (r *RawConfig) ParseRawConfig(configFile string) error {
switch {
case strings.HasSuffix(configFile, ".yaml"), strings.HasSuffix(configFile, ".yml"):
configBytes, err := os.ReadFile(configFile)
if err != nil {
return err
}
if err := yaml.Unmarshal(configBytes, &r); err != nil {
return err
}
case strings.HasSuffix(configFile, ".json"):
configBytes, err := json.Marshal(configFile)
if err != nil {
return err
}
if err := json.Unmarshal(configBytes, &r); err != nil {
return err
}
default:
return ErrConfigFormatNotSupported
}
return nil
}
// GreatFireURL defines the structure of the format of URL
type GreatFireURL struct {
BaseURL string
TypeURL string
SuffixURL string
InitSuffixURL string
}
// CrawlType defines the structure of AlexaTop1000 type of URLs and list
type CrawlType struct {
*GreatFireURL
Name string
IsCrawl bool
MaxPage int
From, To int
InitElement *Elem
CrawlElement *Elem
CrawlReferer string
CrawlList []string
}
// Config defines the real configuration used in the program
type Config struct {
*Filter
*Customize
Types []*CrawlType
}
// GenerateConfig generates raw config to config that can be used in the program
func (c *Config) GenerateConfig(r *RawConfig) error {
if r != nil {
if r.Filter != nil {
c.Filter = r.Filter
} else {
return ErrFilterConfigIsEmpty
}
if r.Customize != nil {
c.Customize = r.Customize
} else {
return ErrCustomizeConfigIsEmpty
}
if r.Crawl != nil && r.Crawl.Types != nil {
c.Types = make([]*CrawlType, len(r.Crawl.Types))
for i := 0; i < len(r.Crawl.Types); i++ {
rawType := r.Crawl.Types[i]
c.Types[i] = &CrawlType{
GreatFireURL: &GreatFireURL{
BaseURL: r.Crawl.URL.BaseURL,
TypeURL: rawType.TypeURL,
SuffixURL: r.Crawl.URL.SuffixURL,
InitSuffixURL: r.Crawl.URL.InitSuffixURL,
},
Name: rawType.Name,
IsCrawl: rawType.IsCrawl,
From: rawType.From,
To: rawType.To,
InitElement: r.Crawl.InitElement,
CrawlElement: r.Crawl.CrawlElement,
CrawlReferer: rawType.Referer,
}
}
return nil
} else {
return ErrCrawlConfigIsEmpty
}
}
return ErrConfigIsEmpty
}
// SetNumCPU sets the maximum number of Goroutines
func (c *Config) SetNumCPU() error {
if c.Customize != nil {
setNum := c.Customize.CPUCores
originalNumCPU := runtime.NumCPU()
log.Println("Original CPU cores:", originalNumCPU)
if setNum > originalNumCPU {
runtime.GOMAXPROCS(setNum)
log.Println("Now CPU cores:", setNum)
return nil
}
switch {
case originalNumCPU == 1:
originalNumCPU = 3
case originalNumCPU == 2:
originalNumCPU *= 3
case originalNumCPU == 3:
originalNumCPU *= 2
case originalNumCPU == 4:
originalNumCPU = 10
default:
originalNumCPU += int(0.5 * float64(originalNumCPU))
}
runtime.GOMAXPROCS(originalNumCPU)
c.Customize.CPUCores = originalNumCPU
log.Println("Now CPU cores:", originalNumCPU)
return nil
} else {
return ErrCustomizeConfigIsEmpty
}
}
// CrawlMaxPage gets the max page of crawl type
func (c *Config) CrawlMaxPage() chan error {
var wg sync.WaitGroup
wg.Add(len(c.Types))
e := make(chan error, len(c.Types))
for idx, crawlType := range c.Types {
go func(idx int, crawlType *CrawlType) {
crawlInitURL := crawlType.BaseURL + crawlType.TypeURL + crawlType.InitSuffixURL
crawlName := crawlType.Name
crawlContent := crawlType.InitElement.Content
switch crawlType.IsCrawl {
case false:
log.Printf("Type %s has been disabled to crawl.\n", crawlName)
default:
resp, err := crawler.Crawl(crawlInitURL, crawlType.CrawlReferer)
if err != nil {
e <- err
return
}
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
if err != nil {
e <- err
return
}
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
if err != nil {
e <- err
return
}
// Find items
doc.Find(crawlType.InitElement.Container).Each(func(i int, s *goquery.Selection) {
// For each item found, get contents
if lastPageHref, exists := s.Find(crawlContent).Attr(crawlType.InitElement.Attr); !exists {
log.Printf("Cannot find HTML element `%s`\n", crawlContent)
} else {
matchedSlice := strings.Split(lastPageHref, crawlType.InitElement.Splitter)
if len(matchedSlice) == 2 {
maxPageString := matchedSlice[1]
if maxpage, err := strconv.Atoi(maxPageString); err != nil {
log.Printf("Failed to get max page of type %s.\n", crawlName)
} else {
c.Types[idx].MaxPage = maxpage
log.Printf("Type %s has pages: %d\n", crawlName, maxpage+1)
}
}
}
})
}
wg.Done()
}(idx, crawlType)
}
wg.Wait()
defer close(e)
return e
}
// GenerateCrawlList generates lists for each crawl type to be crawled latter
func (c *Config) GenerateCrawlList() error {
for idx, crawlType := range c.Types {
if !crawlType.IsCrawl {
continue
}
maxpage := crawlType.MaxPage
from := crawlType.From
to := crawlType.To
if to < 0 {
to = maxpage
}
if from < 0 || from > maxpage || to > maxpage || from > to {
return ErrInvalidPageNumber
}
log.Printf("Type %s will be crawled from page %d to %d", crawlType.Name, from, to)
list := make([]string, 0, maxpage)
for i := from; i <= to; i++ {
url := crawlType.BaseURL + crawlType.TypeURL + crawlType.SuffixURL + strconv.Itoa(i)
list = append(list, url)
}
c.Types[idx].CrawlList = list
}
return nil
}
// Crawl gets HTML content for crawl types
func (c *Config) Crawl(rawResultChan chan map[*string]int) {
var wg sync.WaitGroup
workerPool := make(chan struct{}, c.Customize.CPUCores)
for _, crawlType := range c.Types {
for _, url := range crawlType.CrawlList |
}
wg.Wait()
close(rawResultChan)
}
// FilterAndWrite filters HTML conent and write results to files
func (c *Config) FilterAndWrite(rawResultChan chan map[*string]int) {
defer func() {
if err := recover(); err != nil {
log.Printf("Runtime panic: %v\n", err)
}
}()
// Make output dir
utils.Must(os.MkdirAll(filepath.Join("./", c.Customize.OutputDir), 0755))
rawDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.RawFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer rawDomainFile.Close()
finalDomainFile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.DomainFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalDomainFile.Close()
finalIPfile, err := os.OpenFile(filepath.Join(c.Customize.OutputDir, c.Customize.IPFilename), os.O_WRONLY|os.O_CREATE, 0644)
utils.Must(err)
defer finalIPfile.Close()
resultMap := make(map[string]struct{})
domainReg := regexp.MustCompile(c.Filter.Regexp.Domain)
rawReader := bufio.NewWriter(rawDomainFile)
for result := range rawResultChan {
for url, percent := range result {
url := strings.ToLower(*url)
// Write raw results to raw.txt file
rawReader.WriteString(fmt.Sprintf("%s | %d\n", url, percent))
if percent >= c.Filter.Percent {
matchList := domainReg.FindStringSubmatch(url)
if len(matchList) > 0 {
domain := matchList[len(matchList)-2]
// Write filtered results to console
fmt.Printf("%s | %d\n", domain, percent)
// Write filtered results to map to make them unique
resultMap[domain] = struct{}{}
}
}
}
}
rawReader.Flush()
resultSlice := make([]string, 0, len(resultMap))
ipSlice := make([]string, 0, len(resultMap))
ipReg := regexp.MustCompile(c.Filter.Regexp.IP)
for domainOrIP := range resultMap {
ipElem := ipReg.FindStringSubmatch(domainOrIP)
if len(ipElem) > 0 {
ipSlice = append(ipSlice, ipElem[0])
continue
}
resultSlice = append(resultSlice, domainOrIP)
}
// Unique and sort domain slice
sort.SliceStable(resultSlice, func(i, j int) bool {
return len(strings.Split(resultSlice[i], ".")) < len(strings.Split(resultSlice[j], "."))
})
resultSlice = buildTreeAndUnique(resultSlice)
sort.Strings(resultSlice)
// Write filtered result to domains.txt file
domainReader := bufio.NewWriter(finalDomainFile)
for _, domain := range resultSlice {
domainReader.WriteString(fmt.Sprintf("%s\n", domain))
}
domainReader.Flush()
// Sort IP slice
sort.Strings(ipSlice)
// Write IP results to ip.txt file
ipReader := bufio.NewWriter(finalIPfile)
for _, ip := range ipSlice {
ipReader.WriteString(fmt.Sprintf("%s\n", ip))
}
ipReader.Flush()
}
| {
workerPool <- struct{}{}
wg.Add(1)
go func(url string, crawlType *CrawlType) {
defer func() {
if err := recover(); err != nil {
log.Printf("Goroutine panic: fetching %v : %v\n", url, err)
}
}()
container := crawlType.CrawlElement.Container
content := crawlType.CrawlElement.Content
attr := crawlType.CrawlElement.Attr
condition := crawlType.CrawlElement.Condition
log.Println("Crawling:", url)
resp, err := crawler.Crawl(url, crawlType.CrawlReferer)
utils.Must(err)
defer resp.Body.Close()
gzipReader, err := gzip.NewReader(resp.Body)
utils.Must(err)
defer gzipReader.Close()
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(gzipReader)
utils.Must(err)
// Find items
doc.Find(container).Each(func(i int, s *goquery.Selection) {
percent := 0
// For each item found, get contents
rawDomain, _ := s.Find(content).Attr(attr)
if blockedPercentage := strings.TrimSpace(s.Find(condition).Text()); blockedPercentage != "" {
percent, _ = strconv.Atoi(blockedPercentage[:len(blockedPercentage)-1])
}
rawResult := make(map[*string]int)
rawResult[&rawDomain] = percent
rawResultChan <- rawResult
})
wg.Done()
<-workerPool
}(url, crawlType)
} | conditional_block |
Querybox.ts | import { Initialization } from '../Base/Initialization';
import { Component } from '../Base/Component';
import { IComponentBindings } from '../Base/ComponentBindings';
import { ComponentOptions } from '../Base/ComponentOptions';
import { QueryEvents, IBuildingQueryEventArgs } from '../../events/QueryEvents';
import { MODEL_EVENTS, IAttributeChangedEventArg } from '../../models/Model';
import { QUERY_STATE_ATTRIBUTES, QueryStateModel } from '../../models/QueryStateModel';
import { StandaloneSearchInterfaceEvents } from '../../events/StandaloneSearchInterfaceEvents';
import { IAnalyticsNoMeta, analyticsActionCauseList } from '../Analytics/AnalyticsActionListMeta';
import { $$ } from '../../utils/Dom';
import { Assert } from '../../misc/Assert';
import { QueryboxQueryParameters } from './QueryboxQueryParameters';
export interface IQueryboxOptions {
enableSearchAsYouType?: boolean;
searchAsYouTypeDelay?: number;
enableQuerySyntax?: boolean;
enableWildcards?: boolean;
enableQuestionMarks?: boolean;
enableLowercaseOperators?: boolean;
enablePartialMatch?: boolean;
partialMatchKeywords?: number;
partialMatchThreshold?: string;
autoFocus?: boolean;
placeholder?: string;
triggerQueryOnClear?: boolean;
}
/**
* The Querybox component renders an input that the end user can interact with to enter and submit a query.
*
* When the user submits a query by hitting the **Enter** key, the Querybox component triggers a query and logs the
* corresponding usage analytics data.
*
* For technical reasons, it is necessary to instantiate this component on a `div` element rather than directly on an
* `input` element (i.e., `<div class='CoveoQuerybox'></div>` will work, but `<input class='CoveoQuerybox'></input>`
* will not).
*
* See also the {@link Searchbox} component, which can automatically instantiate a Querybox component along with an
* optional {@link SearchButton} component.
*/
export class Querybox extends Component {
static ID = 'Querybox';
/**
* The options for the Querybox.
* @componentOptions
*/
public static options: IQueryboxOptions = {
/**
* Specifies whether to enable the search-as-you-type feature.
*
* Default value is `false`.
*/
enableSearchAsYouType: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* If {@link Querybox.options.enableSearchAsYouType} is `true`, specifies the delay (in milliseconds) between a
* key press and a query being triggered.
*
* Default value is `500`. Minimum value is `0`
*/
searchAsYouTypeDelay: ComponentOptions.buildNumberOption({ defaultValue: 500, min: 0 }),
/**
* Specifies whether the Coveo Platform should try to interpret special query syntax such as field references in the
* query that the user enters in the Querybox (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Setting this option to `true` also causes the query syntax in the Querybox to highlight.
*
* Default value is `true`.
*/
enableQuerySyntax: ComponentOptions.buildBooleanOption({ defaultValue: true }),
/**
* Specifies whether the Coveo Platform should expand keywords containing wildcard characters (`*`) to the possible
* matching keywords in order to broaden the query (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*/
enableWildcards: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* Specifies whether the Coveo Platform should expand keywords containing question mark characters (`?`) to the
* possible matching keywords in order to broaden the query (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*/
enableQuestionMarks: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* If {@link Querybox.options.enableQuerySyntax} is `true`, specifies whether to treat the `AND`, `NOT`, `OR` and
* `NEAR` keywords in the Querybox as query operators in the query even when the end user types them in lowercase.
* This option applies to all query operators (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*
* **Example:**
* > If this option and the enableQuerySyntax option are both `true`, then the Coveo Platform interprets the `near`
* > keyword in a query such as `service center near me` as a query operator (not as a query term).
*
* > Otherwise, if the enableQuerySyntax option is `true` and this option is `false`, the end user has to type the
* > `NEAR` keyword in uppercase in order for the Coveo Platform to interpret it as a query operator.
*/
enableLowercaseOperators: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* Specifies whether to automatically convert a basic expression containing at least a certain number of keywords
* (see {@link Querybox.options.partialMatchKeywords}) to a partial match expression, so that documents containing
* at least a certain subset of those keywords (see {@link Querybox.options.partialMatchThreshold}) will match the
* query.
*
* Default value is `false`.
*
* **Example:**
*
* With the following markup configuration, if a basic expression contains at least 4 keywords, then documents
* containing at least 75% of those keywords (round up) will match the query.
*
* For instance, if the basic expression is `Coveo custom component configuration help`, then documents containing
* all 5 of those keywords, or 4 of them (75% of 5 rounded up) will match the query.
*
* ```html
* <div class='CoveoQuerybox' data-enable-partial-match='true' data-partial-match-keywords='4' data-partial-match-threshold='75%'></div>
* ```
*/
enablePartialMatch: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* When {@link Querybox.options.enablePartialMatch} is `true`, specifies the minimum number of keywords that need to
* be present in the basic expression to convert this expression to a partial match expression.
*
* See also {@link Querybox.options.partialMatchThreshold}.
*
* Default value is `5`.
*
* **Note:**
* > Only the basic expression of the query (see {@link q}) can be converted to a partial match expression.
*
* **Example:**
* > If the partialMatchKeywords option is `7`, the basic expression will have to contain at least 7 keywords
* > to be converted to a partial match expression.
*/
partialMatchKeywords: ComponentOptions.buildNumberOption({ defaultValue: 5, min: 1 }),
/**
* When {@link Querybox.options.enablePartialMatch} is `true`, specifies an absolute or relative (percentage) value
* indicating the minimum number of partial match expression keywords a document must contain in order to match the
* query.
*
* See also {@link Querybox.options.partialMatchKeywords}.
*
* Default value is `50%`.
*
* **Note:**
* > The relative threshold is always rounded up to the nearest integer.
*
* **Examples:**
* > If the partialMatchThreshold option is `50%` and the partial match expression contains exactly 9 keywords, then
* > documents will have to contain at least 5 of those keywords to match the query (50% of 9, rounded up).
*
* > With the same configuration, if the partial match expression contains exactly 12 keywords, then documents will
* > have to contain at least 6 of those keywords to match the query (50% of 12).
*
* > If the partialMatchThreshold option is `2`, then documents will always have to contain at least 2 of the
* > partial match expression keywords to match the query, no matter how many keywords the partial match expression
* > actually contains.
*/
partialMatchThreshold: ComponentOptions.buildStringOption({ defaultValue: '50%' }),
/**
* Specifies whether to trigger a query when the Querybox is cleared.
*
* Default value is `true`.
*/
triggerQueryOnClear: ComponentOptions.buildBooleanOption({ defaultValue: true }),
/**
* Specifies whether the Querybox should get auto focus and selection upon initialization.
*
* Default value is `true`.
*/
autoFocus: ComponentOptions.buildBooleanOption({ defaultValue: true })
};
public magicBox: Coveo.MagicBox.Instance;
private lastQuery: string;
private searchAsYouTypeTimeout: number;
/**
* Creates a new Querybox. Creates a new `Coveo.Magicbox` instance and wraps the Magicbox methods (`onblur`,
* `onsubmit` etc.). Binds event on `buildingQuery` and on redirection (for standalone box).
* @param element The HTMLElement on which to instantiate the component. This cannot be an HTMLInputElement for
* technical reasons.
* @param options The options for the ResultLayout component.
* @param bindings The bindings that the component requires to function normally. If not set, these will be
* automatically resolved (with a slower execution time).
*/
constructor(public element: HTMLElement, public options?: IQueryboxOptions, public bindings?: IComponentBindings) {
super(element, Querybox.ID, bindings);
if (element instanceof HTMLInputElement) {
this.logger.error('Querybox cannot be used on an HTMLInputElement');
}
this.options = ComponentOptions.initComponentOptions(element, Querybox, options);
this.magicBox = Coveo.MagicBox.create(element, new Coveo.MagicBox.Grammar('Query', {
Query: '[Term*][Spaces?]',
Term: '[Spaces?][Word]',
Spaces: / +/,
Word: /[^ ]+/
}), {
inline: true
});
this.bind.onRootElement(QueryEvents.buildingQuery, (args: IBuildingQueryEventArgs) => this.handleBuildingQuery(args));
this.bind.onRootElement(StandaloneSearchInterfaceEvents.beforeRedirect, () => this.updateQueryState());
this.bind.onQueryState(MODEL_EVENTS.CHANGE_ONE, QUERY_STATE_ATTRIBUTES.Q, (args: IAttributeChangedEventArg) => this.handleQueryStateChanged(args));
if (this.options.enableSearchAsYouType) {
$$(this.element).addClass('coveo-search-as-you-type');
this.magicBox.onchange = () => {
this.searchAsYouType();
};
}
this.magicBox.onsubmit = () => {
this.submit();
};
this.magicBox.onblur = () => {
this.updateQueryState();
};
this.magicBox.onclear = () => {
this.updateQueryState();
if (this.options.triggerQueryOnClear) {
this.usageAnalytics.logSearchEvent<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxClear, {});
this.triggerNewQuery(false);
}
};
if (this.options.autoFocus) {
this.magicBox.focus();
}
}
/**
* Adds the current content of the input to the query and triggers a query if the current content of the input has
* changed since last submit.
*
* Also logs the `serachboxSubmit` event in the usage analytics.
*/
public submit(): void {
this.magicBox.clearSuggestion();
this.updateQueryState();
this.usageAnalytics.logSearchEvent<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxSubmit, {});
this.triggerNewQuery(false);
}
/**
* Sets the content of the input.
*
* @param text The string to set in the input.
*/
public setText(text: string): void {
this.magicBox.setText(text);
this.updateQueryState();
}
/**
* Clears the content of the input.
*/
public clear(): void {
this.magicBox.clear();
}
/**
* Gets the content of the input.
*
* @returns {string} The content of the input.
*/
public getText(): string {
return this.magicBox.getText();
}
/**
* Gets the result from the input.
*
* @returns {Result} The result.
*/
public getResult() {
return this.magicBox.getResult();
}
/**
* Gets the displayed result from the input.
*
* @returns {Result} The displayed result.
*/
public getDisplayedResult() {
return this.magicBox.getDisplayedResult();
}
/**
* Gets the current cursor position in the input.
*
* @returns {number} The cursor position (index starts at 0).
*/
public | (): number {
return this.magicBox.getCursor();
}
/**
* Gets the result at cursor position.
*
* @param match {string | { (result): boolean }} The match condition.
*
* @returns {Result[]} The result.
*/
public resultAtCursor(match?: string | { (result): boolean; }) {
return this.magicBox.resultAtCursor(match);
}
private handleBuildingQuery(args: IBuildingQueryEventArgs): void {
Assert.exists(args);
Assert.exists(args.queryBuilder);
this.updateQueryState();
this.lastQuery = this.magicBox.getText();
new QueryboxQueryParameters(this.options).addParameters(args.queryBuilder, this.lastQuery);
}
private triggerNewQuery(searchAsYouType: boolean): void {
clearTimeout(this.searchAsYouTypeTimeout);
let text = this.magicBox.getText();
if (this.lastQuery != text && text != null) {
this.lastQuery = text;
this.queryController.executeQuery({
searchAsYouType: searchAsYouType,
logInActionsHistory: true
});
}
}
private updateQueryState(): void {
this.queryStateModel.set(QueryStateModel.attributesEnum.q, this.magicBox.getText());
}
private handleQueryStateChanged(args: IAttributeChangedEventArg): void {
Assert.exists(args);
let q = <string>args.value;
if (q != this.magicBox.getText()) {
this.magicBox.setText(q);
}
}
private searchAsYouType(): void {
clearTimeout(this.searchAsYouTypeTimeout);
this.searchAsYouTypeTimeout = setTimeout(() => {
this.usageAnalytics.logSearchAsYouType<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxAsYouType, {});
this.triggerNewQuery(true);
}, this.options.searchAsYouTypeDelay);
}
}
Initialization.registerAutoCreateComponent(Querybox);
| getCursor | identifier_name |
Querybox.ts | import { Initialization } from '../Base/Initialization';
import { Component } from '../Base/Component';
import { IComponentBindings } from '../Base/ComponentBindings';
import { ComponentOptions } from '../Base/ComponentOptions';
import { QueryEvents, IBuildingQueryEventArgs } from '../../events/QueryEvents';
import { MODEL_EVENTS, IAttributeChangedEventArg } from '../../models/Model';
import { QUERY_STATE_ATTRIBUTES, QueryStateModel } from '../../models/QueryStateModel';
import { StandaloneSearchInterfaceEvents } from '../../events/StandaloneSearchInterfaceEvents';
import { IAnalyticsNoMeta, analyticsActionCauseList } from '../Analytics/AnalyticsActionListMeta';
import { $$ } from '../../utils/Dom';
import { Assert } from '../../misc/Assert';
import { QueryboxQueryParameters } from './QueryboxQueryParameters';
export interface IQueryboxOptions {
enableSearchAsYouType?: boolean;
searchAsYouTypeDelay?: number;
enableQuerySyntax?: boolean;
enableWildcards?: boolean;
enableQuestionMarks?: boolean;
enableLowercaseOperators?: boolean;
enablePartialMatch?: boolean;
partialMatchKeywords?: number;
partialMatchThreshold?: string;
autoFocus?: boolean;
placeholder?: string;
triggerQueryOnClear?: boolean;
}
/**
* The Querybox component renders an input that the end user can interact with to enter and submit a query.
*
* When the user submits a query by hitting the **Enter** key, the Querybox component triggers a query and logs the
* corresponding usage analytics data.
*
* For technical reasons, it is necessary to instantiate this component on a `div` element rather than directly on an
* `input` element (i.e., `<div class='CoveoQuerybox'></div>` will work, but `<input class='CoveoQuerybox'></input>`
* will not).
*
* See also the {@link Searchbox} component, which can automatically instantiate a Querybox component along with an
* optional {@link SearchButton} component.
*/
export class Querybox extends Component {
static ID = 'Querybox';
/**
* The options for the Querybox.
* @componentOptions
*/
public static options: IQueryboxOptions = {
/**
* Specifies whether to enable the search-as-you-type feature.
*
* Default value is `false`.
*/
enableSearchAsYouType: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* If {@link Querybox.options.enableSearchAsYouType} is `true`, specifies the delay (in milliseconds) between a
* key press and a query being triggered.
*
* Default value is `500`. Minimum value is `0`
*/
searchAsYouTypeDelay: ComponentOptions.buildNumberOption({ defaultValue: 500, min: 0 }),
/**
* Specifies whether the Coveo Platform should try to interpret special query syntax such as field references in the
* query that the user enters in the Querybox (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Setting this option to `true` also causes the query syntax in the Querybox to highlight.
*
* Default value is `true`.
*/
enableQuerySyntax: ComponentOptions.buildBooleanOption({ defaultValue: true }),
/**
* Specifies whether the Coveo Platform should expand keywords containing wildcard characters (`*`) to the possible
* matching keywords in order to broaden the query (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*/
enableWildcards: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* Specifies whether the Coveo Platform should expand keywords containing question mark characters (`?`) to the
* possible matching keywords in order to broaden the query (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*/
enableQuestionMarks: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* If {@link Querybox.options.enableQuerySyntax} is `true`, specifies whether to treat the `AND`, `NOT`, `OR` and
* `NEAR` keywords in the Querybox as query operators in the query even when the end user types them in lowercase.
* This option applies to all query operators (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*
* **Example:**
* > If this option and the enableQuerySyntax option are both `true`, then the Coveo Platform interprets the `near`
* > keyword in a query such as `service center near me` as a query operator (not as a query term).
*
* > Otherwise, if the enableQuerySyntax option is `true` and this option is `false`, the end user has to type the
* > `NEAR` keyword in uppercase in order for the Coveo Platform to interpret it as a query operator.
*/
enableLowercaseOperators: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* Specifies whether to automatically convert a basic expression containing at least a certain number of keywords
* (see {@link Querybox.options.partialMatchKeywords}) to a partial match expression, so that documents containing
* at least a certain subset of those keywords (see {@link Querybox.options.partialMatchThreshold}) will match the
* query.
*
* Default value is `false`.
*
* **Example:**
*
* With the following markup configuration, if a basic expression contains at least 4 keywords, then documents
* containing at least 75% of those keywords (round up) will match the query.
*
* For instance, if the basic expression is `Coveo custom component configuration help`, then documents containing
* all 5 of those keywords, or 4 of them (75% of 5 rounded up) will match the query.
*
* ```html
* <div class='CoveoQuerybox' data-enable-partial-match='true' data-partial-match-keywords='4' data-partial-match-threshold='75%'></div>
* ```
*/
enablePartialMatch: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* When {@link Querybox.options.enablePartialMatch} is `true`, specifies the minimum number of keywords that need to
* be present in the basic expression to convert this expression to a partial match expression. | * **Note:**
* > Only the basic expression of the query (see {@link q}) can be converted to a partial match expression.
*
* **Example:**
* > If the partialMatchKeywords option is `7`, the basic expression will have to contain at least 7 keywords
* > to be converted to a partial match expression.
*/
partialMatchKeywords: ComponentOptions.buildNumberOption({ defaultValue: 5, min: 1 }),
/**
* When {@link Querybox.options.enablePartialMatch} is `true`, specifies an absolute or relative (percentage) value
* indicating the minimum number of partial match expression keywords a document must contain in order to match the
* query.
*
* See also {@link Querybox.options.partialMatchKeywords}.
*
* Default value is `50%`.
*
* **Note:**
* > The relative threshold is always rounded up to the nearest integer.
*
* **Examples:**
* > If the partialMatchThreshold option is `50%` and the partial match expression contains exactly 9 keywords, then
* > documents will have to contain at least 5 of those keywords to match the query (50% of 9, rounded up).
*
* > With the same configuration, if the partial match expression contains exactly 12 keywords, then documents will
* > have to contain at least 6 of those keywords to match the query (50% of 12).
*
* > If the partialMatchThreshold option is `2`, then documents will always have to contain at least 2 of the
* > partial match expression keywords to match the query, no matter how many keywords the partial match expression
* > actually contains.
*/
partialMatchThreshold: ComponentOptions.buildStringOption({ defaultValue: '50%' }),
/**
* Specifies whether to trigger a query when the Querybox is cleared.
*
* Default value is `true`.
*/
triggerQueryOnClear: ComponentOptions.buildBooleanOption({ defaultValue: true }),
/**
* Specifies whether the Querybox should get auto focus and selection upon initialization.
*
* Default value is `true`.
*/
autoFocus: ComponentOptions.buildBooleanOption({ defaultValue: true })
};
public magicBox: Coveo.MagicBox.Instance;
private lastQuery: string;
private searchAsYouTypeTimeout: number;
/**
* Creates a new Querybox. Creates a new `Coveo.Magicbox` instance and wraps the Magicbox methods (`onblur`,
* `onsubmit` etc.). Binds event on `buildingQuery` and on redirection (for standalone box).
* @param element The HTMLElement on which to instantiate the component. This cannot be an HTMLInputElement for
* technical reasons.
* @param options The options for the ResultLayout component.
* @param bindings The bindings that the component requires to function normally. If not set, these will be
* automatically resolved (with a slower execution time).
*/
constructor(public element: HTMLElement, public options?: IQueryboxOptions, public bindings?: IComponentBindings) {
super(element, Querybox.ID, bindings);
if (element instanceof HTMLInputElement) {
this.logger.error('Querybox cannot be used on an HTMLInputElement');
}
this.options = ComponentOptions.initComponentOptions(element, Querybox, options);
this.magicBox = Coveo.MagicBox.create(element, new Coveo.MagicBox.Grammar('Query', {
Query: '[Term*][Spaces?]',
Term: '[Spaces?][Word]',
Spaces: / +/,
Word: /[^ ]+/
}), {
inline: true
});
this.bind.onRootElement(QueryEvents.buildingQuery, (args: IBuildingQueryEventArgs) => this.handleBuildingQuery(args));
this.bind.onRootElement(StandaloneSearchInterfaceEvents.beforeRedirect, () => this.updateQueryState());
this.bind.onQueryState(MODEL_EVENTS.CHANGE_ONE, QUERY_STATE_ATTRIBUTES.Q, (args: IAttributeChangedEventArg) => this.handleQueryStateChanged(args));
if (this.options.enableSearchAsYouType) {
$$(this.element).addClass('coveo-search-as-you-type');
this.magicBox.onchange = () => {
this.searchAsYouType();
};
}
this.magicBox.onsubmit = () => {
this.submit();
};
this.magicBox.onblur = () => {
this.updateQueryState();
};
this.magicBox.onclear = () => {
this.updateQueryState();
if (this.options.triggerQueryOnClear) {
this.usageAnalytics.logSearchEvent<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxClear, {});
this.triggerNewQuery(false);
}
};
if (this.options.autoFocus) {
this.magicBox.focus();
}
}
/**
* Adds the current content of the input to the query and triggers a query if the current content of the input has
* changed since last submit.
*
* Also logs the `serachboxSubmit` event in the usage analytics.
*/
public submit(): void {
this.magicBox.clearSuggestion();
this.updateQueryState();
this.usageAnalytics.logSearchEvent<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxSubmit, {});
this.triggerNewQuery(false);
}
/**
* Sets the content of the input.
*
* @param text The string to set in the input.
*/
public setText(text: string): void {
this.magicBox.setText(text);
this.updateQueryState();
}
/**
* Clears the content of the input.
*/
public clear(): void {
this.magicBox.clear();
}
/**
* Gets the content of the input.
*
* @returns {string} The content of the input.
*/
public getText(): string {
return this.magicBox.getText();
}
/**
* Gets the result from the input.
*
* @returns {Result} The result.
*/
public getResult() {
return this.magicBox.getResult();
}
/**
* Gets the displayed result from the input.
*
* @returns {Result} The displayed result.
*/
public getDisplayedResult() {
return this.magicBox.getDisplayedResult();
}
/**
* Gets the current cursor position in the input.
*
* @returns {number} The cursor position (index starts at 0).
*/
public getCursor(): number {
return this.magicBox.getCursor();
}
/**
* Gets the result at cursor position.
*
* @param match {string | { (result): boolean }} The match condition.
*
* @returns {Result[]} The result.
*/
public resultAtCursor(match?: string | { (result): boolean; }) {
return this.magicBox.resultAtCursor(match);
}
private handleBuildingQuery(args: IBuildingQueryEventArgs): void {
Assert.exists(args);
Assert.exists(args.queryBuilder);
this.updateQueryState();
this.lastQuery = this.magicBox.getText();
new QueryboxQueryParameters(this.options).addParameters(args.queryBuilder, this.lastQuery);
}
private triggerNewQuery(searchAsYouType: boolean): void {
clearTimeout(this.searchAsYouTypeTimeout);
let text = this.magicBox.getText();
if (this.lastQuery != text && text != null) {
this.lastQuery = text;
this.queryController.executeQuery({
searchAsYouType: searchAsYouType,
logInActionsHistory: true
});
}
}
private updateQueryState(): void {
this.queryStateModel.set(QueryStateModel.attributesEnum.q, this.magicBox.getText());
}
private handleQueryStateChanged(args: IAttributeChangedEventArg): void {
Assert.exists(args);
let q = <string>args.value;
if (q != this.magicBox.getText()) {
this.magicBox.setText(q);
}
}
private searchAsYouType(): void {
clearTimeout(this.searchAsYouTypeTimeout);
this.searchAsYouTypeTimeout = setTimeout(() => {
this.usageAnalytics.logSearchAsYouType<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxAsYouType, {});
this.triggerNewQuery(true);
}, this.options.searchAsYouTypeDelay);
}
}
Initialization.registerAutoCreateComponent(Querybox); | *
* See also {@link Querybox.options.partialMatchThreshold}.
*
* Default value is `5`.
* | random_line_split |
Querybox.ts | import { Initialization } from '../Base/Initialization';
import { Component } from '../Base/Component';
import { IComponentBindings } from '../Base/ComponentBindings';
import { ComponentOptions } from '../Base/ComponentOptions';
import { QueryEvents, IBuildingQueryEventArgs } from '../../events/QueryEvents';
import { MODEL_EVENTS, IAttributeChangedEventArg } from '../../models/Model';
import { QUERY_STATE_ATTRIBUTES, QueryStateModel } from '../../models/QueryStateModel';
import { StandaloneSearchInterfaceEvents } from '../../events/StandaloneSearchInterfaceEvents';
import { IAnalyticsNoMeta, analyticsActionCauseList } from '../Analytics/AnalyticsActionListMeta';
import { $$ } from '../../utils/Dom';
import { Assert } from '../../misc/Assert';
import { QueryboxQueryParameters } from './QueryboxQueryParameters';
export interface IQueryboxOptions {
enableSearchAsYouType?: boolean;
searchAsYouTypeDelay?: number;
enableQuerySyntax?: boolean;
enableWildcards?: boolean;
enableQuestionMarks?: boolean;
enableLowercaseOperators?: boolean;
enablePartialMatch?: boolean;
partialMatchKeywords?: number;
partialMatchThreshold?: string;
autoFocus?: boolean;
placeholder?: string;
triggerQueryOnClear?: boolean;
}
/**
* The Querybox component renders an input that the end user can interact with to enter and submit a query.
*
* When the user submits a query by hitting the **Enter** key, the Querybox component triggers a query and logs the
* corresponding usage analytics data.
*
* For technical reasons, it is necessary to instantiate this component on a `div` element rather than directly on an
* `input` element (i.e., `<div class='CoveoQuerybox'></div>` will work, but `<input class='CoveoQuerybox'></input>`
* will not).
*
* See also the {@link Searchbox} component, which can automatically instantiate a Querybox component along with an
* optional {@link SearchButton} component.
*/
export class Querybox extends Component {
static ID = 'Querybox';
/**
* The options for the Querybox.
* @componentOptions
*/
public static options: IQueryboxOptions = {
/**
* Specifies whether to enable the search-as-you-type feature.
*
* Default value is `false`.
*/
enableSearchAsYouType: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* If {@link Querybox.options.enableSearchAsYouType} is `true`, specifies the delay (in milliseconds) between a
* key press and a query being triggered.
*
* Default value is `500`. Minimum value is `0`
*/
searchAsYouTypeDelay: ComponentOptions.buildNumberOption({ defaultValue: 500, min: 0 }),
/**
* Specifies whether the Coveo Platform should try to interpret special query syntax such as field references in the
* query that the user enters in the Querybox (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Setting this option to `true` also causes the query syntax in the Querybox to highlight.
*
* Default value is `true`.
*/
enableQuerySyntax: ComponentOptions.buildBooleanOption({ defaultValue: true }),
/**
* Specifies whether the Coveo Platform should expand keywords containing wildcard characters (`*`) to the possible
* matching keywords in order to broaden the query (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*/
enableWildcards: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* Specifies whether the Coveo Platform should expand keywords containing question mark characters (`?`) to the
* possible matching keywords in order to broaden the query (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*/
enableQuestionMarks: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* If {@link Querybox.options.enableQuerySyntax} is `true`, specifies whether to treat the `AND`, `NOT`, `OR` and
* `NEAR` keywords in the Querybox as query operators in the query even when the end user types them in lowercase.
* This option applies to all query operators (see
* [Coveo Query Syntax Reference](http://www.coveo.com/go?dest=adminhelp70&lcid=9&context=10005)).
*
* Default value is `false`.
*
* **Example:**
* > If this option and the enableQuerySyntax option are both `true`, then the Coveo Platform interprets the `near`
* > keyword in a query such as `service center near me` as a query operator (not as a query term).
*
* > Otherwise, if the enableQuerySyntax option is `true` and this option is `false`, the end user has to type the
* > `NEAR` keyword in uppercase in order for the Coveo Platform to interpret it as a query operator.
*/
enableLowercaseOperators: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* Specifies whether to automatically convert a basic expression containing at least a certain number of keywords
* (see {@link Querybox.options.partialMatchKeywords}) to a partial match expression, so that documents containing
* at least a certain subset of those keywords (see {@link Querybox.options.partialMatchThreshold}) will match the
* query.
*
* Default value is `false`.
*
* **Example:**
*
* With the following markup configuration, if a basic expression contains at least 4 keywords, then documents
* containing at least 75% of those keywords (round up) will match the query.
*
* For instance, if the basic expression is `Coveo custom component configuration help`, then documents containing
* all 5 of those keywords, or 4 of them (75% of 5 rounded up) will match the query.
*
* ```html
* <div class='CoveoQuerybox' data-enable-partial-match='true' data-partial-match-keywords='4' data-partial-match-threshold='75%'></div>
* ```
*/
enablePartialMatch: ComponentOptions.buildBooleanOption({ defaultValue: false }),
/**
* When {@link Querybox.options.enablePartialMatch} is `true`, specifies the minimum number of keywords that need to
* be present in the basic expression to convert this expression to a partial match expression.
*
* See also {@link Querybox.options.partialMatchThreshold}.
*
* Default value is `5`.
*
* **Note:**
* > Only the basic expression of the query (see {@link q}) can be converted to a partial match expression.
*
* **Example:**
* > If the partialMatchKeywords option is `7`, the basic expression will have to contain at least 7 keywords
* > to be converted to a partial match expression.
*/
partialMatchKeywords: ComponentOptions.buildNumberOption({ defaultValue: 5, min: 1 }),
/**
* When {@link Querybox.options.enablePartialMatch} is `true`, specifies an absolute or relative (percentage) value
* indicating the minimum number of partial match expression keywords a document must contain in order to match the
* query.
*
* See also {@link Querybox.options.partialMatchKeywords}.
*
* Default value is `50%`.
*
* **Note:**
* > The relative threshold is always rounded up to the nearest integer.
*
* **Examples:**
* > If the partialMatchThreshold option is `50%` and the partial match expression contains exactly 9 keywords, then
* > documents will have to contain at least 5 of those keywords to match the query (50% of 9, rounded up).
*
* > With the same configuration, if the partial match expression contains exactly 12 keywords, then documents will
* > have to contain at least 6 of those keywords to match the query (50% of 12).
*
* > If the partialMatchThreshold option is `2`, then documents will always have to contain at least 2 of the
* > partial match expression keywords to match the query, no matter how many keywords the partial match expression
* > actually contains.
*/
partialMatchThreshold: ComponentOptions.buildStringOption({ defaultValue: '50%' }),
/**
* Specifies whether to trigger a query when the Querybox is cleared.
*
* Default value is `true`.
*/
triggerQueryOnClear: ComponentOptions.buildBooleanOption({ defaultValue: true }),
/**
* Specifies whether the Querybox should get auto focus and selection upon initialization.
*
* Default value is `true`.
*/
autoFocus: ComponentOptions.buildBooleanOption({ defaultValue: true })
};
public magicBox: Coveo.MagicBox.Instance;
private lastQuery: string;
private searchAsYouTypeTimeout: number;
/**
* Creates a new Querybox. Creates a new `Coveo.Magicbox` instance and wraps the Magicbox methods (`onblur`,
* `onsubmit` etc.). Binds event on `buildingQuery` and on redirection (for standalone box).
* @param element The HTMLElement on which to instantiate the component. This cannot be an HTMLInputElement for
* technical reasons.
* @param options The options for the ResultLayout component.
* @param bindings The bindings that the component requires to function normally. If not set, these will be
* automatically resolved (with a slower execution time).
*/
constructor(public element: HTMLElement, public options?: IQueryboxOptions, public bindings?: IComponentBindings) {
super(element, Querybox.ID, bindings);
if (element instanceof HTMLInputElement) {
this.logger.error('Querybox cannot be used on an HTMLInputElement');
}
this.options = ComponentOptions.initComponentOptions(element, Querybox, options);
this.magicBox = Coveo.MagicBox.create(element, new Coveo.MagicBox.Grammar('Query', {
Query: '[Term*][Spaces?]',
Term: '[Spaces?][Word]',
Spaces: / +/,
Word: /[^ ]+/
}), {
inline: true
});
this.bind.onRootElement(QueryEvents.buildingQuery, (args: IBuildingQueryEventArgs) => this.handleBuildingQuery(args));
this.bind.onRootElement(StandaloneSearchInterfaceEvents.beforeRedirect, () => this.updateQueryState());
this.bind.onQueryState(MODEL_EVENTS.CHANGE_ONE, QUERY_STATE_ATTRIBUTES.Q, (args: IAttributeChangedEventArg) => this.handleQueryStateChanged(args));
if (this.options.enableSearchAsYouType) {
$$(this.element).addClass('coveo-search-as-you-type');
this.magicBox.onchange = () => {
this.searchAsYouType();
};
}
this.magicBox.onsubmit = () => {
this.submit();
};
this.magicBox.onblur = () => {
this.updateQueryState();
};
this.magicBox.onclear = () => {
this.updateQueryState();
if (this.options.triggerQueryOnClear) |
};
if (this.options.autoFocus) {
this.magicBox.focus();
}
}
/**
* Adds the current content of the input to the query and triggers a query if the current content of the input has
* changed since last submit.
*
* Also logs the `serachboxSubmit` event in the usage analytics.
*/
public submit(): void {
this.magicBox.clearSuggestion();
this.updateQueryState();
this.usageAnalytics.logSearchEvent<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxSubmit, {});
this.triggerNewQuery(false);
}
/**
* Sets the content of the input.
*
* @param text The string to set in the input.
*/
public setText(text: string): void {
this.magicBox.setText(text);
this.updateQueryState();
}
/**
* Clears the content of the input.
*/
public clear(): void {
this.magicBox.clear();
}
/**
* Gets the content of the input.
*
* @returns {string} The content of the input.
*/
public getText(): string {
return this.magicBox.getText();
}
/**
* Gets the result from the input.
*
* @returns {Result} The result.
*/
public getResult() {
return this.magicBox.getResult();
}
/**
* Gets the displayed result from the input.
*
* @returns {Result} The displayed result.
*/
public getDisplayedResult() {
return this.magicBox.getDisplayedResult();
}
/**
* Gets the current cursor position in the input.
*
* @returns {number} The cursor position (index starts at 0).
*/
public getCursor(): number {
return this.magicBox.getCursor();
}
/**
* Gets the result at cursor position.
*
* @param match {string | { (result): boolean }} The match condition.
*
* @returns {Result[]} The result.
*/
public resultAtCursor(match?: string | { (result): boolean; }) {
return this.magicBox.resultAtCursor(match);
}
private handleBuildingQuery(args: IBuildingQueryEventArgs): void {
Assert.exists(args);
Assert.exists(args.queryBuilder);
this.updateQueryState();
this.lastQuery = this.magicBox.getText();
new QueryboxQueryParameters(this.options).addParameters(args.queryBuilder, this.lastQuery);
}
private triggerNewQuery(searchAsYouType: boolean): void {
clearTimeout(this.searchAsYouTypeTimeout);
let text = this.magicBox.getText();
if (this.lastQuery != text && text != null) {
this.lastQuery = text;
this.queryController.executeQuery({
searchAsYouType: searchAsYouType,
logInActionsHistory: true
});
}
}
private updateQueryState(): void {
this.queryStateModel.set(QueryStateModel.attributesEnum.q, this.magicBox.getText());
}
private handleQueryStateChanged(args: IAttributeChangedEventArg): void {
Assert.exists(args);
let q = <string>args.value;
if (q != this.magicBox.getText()) {
this.magicBox.setText(q);
}
}
private searchAsYouType(): void {
clearTimeout(this.searchAsYouTypeTimeout);
this.searchAsYouTypeTimeout = setTimeout(() => {
this.usageAnalytics.logSearchAsYouType<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxAsYouType, {});
this.triggerNewQuery(true);
}, this.options.searchAsYouTypeDelay);
}
}
Initialization.registerAutoCreateComponent(Querybox);
| {
this.usageAnalytics.logSearchEvent<IAnalyticsNoMeta>(analyticsActionCauseList.searchboxClear, {});
this.triggerNewQuery(false);
} | conditional_block |
battle.py | from __future__ import division
import random
import math
from collections import namedtuple
from idleiss.ship import Ship
from idleiss.ship import ShipAttributes
from idleiss.ship import ShipLibrary
SHIELD_BOUNCE_ZONE = 0.01 # max percentage damage to shield for bounce.
HULL_DANGER_ZONE = 0.70 # percentage remaining.
AttackResult = namedtuple('AttackResult',
['attacker_fleet', 'damaged_fleet', 'shots_taken', 'damage_taken'])
Fleet = namedtuple('Fleet',
['ships', 'ship_count'])
RoundResult = namedtuple('RoundResult',
['ship_count', 'shots_taken', 'damage_taken'])
def hull_breach(hull, max_hull, damage,
hull_danger_zone=HULL_DANGER_ZONE):
"""
Hull has a chance of being breached if less than the dangerzone.
Chance of survival is determined by how much % hull remains.
Returns input hull amount if RNG thinks it should, otherwise 0.
"""
damaged_hull = hull - damage
chance_of_survival = damaged_hull / max_hull
return not (chance_of_survival < hull_danger_zone and
chance_of_survival < random.random()) and damaged_hull or 0
def shield_bounce(shield, max_shield, damage,
shield_bounce_zone=SHIELD_BOUNCE_ZONE):
"""
Check whether the damage has enough power to damage the shield or
just harmlessly bounce off it, only if there is a shield available.
Shield will be returned if the above conditions are not met,
otherwise the current shield less damage taken will be returned.
Returns the new shield value.
"""
# really, shield can't become negative unless some external factors
# hacked it into one.
return ((damage < shield * shield_bounce_zone) and shield > 0 and
shield or shield - damage)
def size_damage_factor(weapon_size, target_size):
"""
Calculates damage factor on size. If weapon size is greater than
the target size, then only the area that falls within the target
will the damage be applied.
"""
if weapon_size <= target_size:
return damage
return (target_size ** 2) / (weapon_size ** 2)
def true_damage(damage, weapon_size, target_size, source_debuff, target_debuff):
"""
Calculates true damage based on parameters.
"""
# source_debuffs: tracking disruption
tracking_disrupt = 1 + source_debuff.get('active', {}).get(
'tracking_disruption', 0)
# target_debuffs: target painter, web
target_painter = 1 + target_debuff.get('active', {}).get(
'target_painter', 0)
web = 1 - target_debuff.get('active', {}).get(
'web', 0)
# painters gives > 1 multiplier to the target_size against target
# reason - painters expand the target to make it easier to hit.
# webbers give < 1 multiplier to the weapon_size against target
# reason - weapons can focus more damage on a webbed target
if web == 0 or weapon_size / web * tracking_disrupt <= \
target_size * target_painter:
return damage
true_weapon_size = (weapon_size / web) * tracking_disrupt
true_target_size = target_size * target_painter
damage_factor = size_damage_factor(true_weapon_size, true_target_size)
return int(math.ceil(damage_factor * damage))
def is_ship_alive(ship):
"""
Simple check to see if ship is alive.
"""
# If and when flag systems become advanced enough **FUN** things can
# be applied to make this check more hilarious.
return ship.attributes.hull > 0 # though it can't be < 0
def grab_debuffs(source, target_in):
"""
Retuns a dict of applied debufs calculated from ship schema
as well as ship attributes.
Source is ShipSchema
target_in is a Ship
"""
inactive = {}
sensor_str = target_in.schema.sensor_strength
target = target_in
# I'm sure there's a list comprehension thing that could be used
# to clean this up but I have no idea what
if source.target_painter:
if target.debuffs.get('inactive', {}).get('target_painter', 0) < \
source.target_painter:
inactive['target_painter'] = source.target_painter
if source.tracking_disruption:
if target.debuffs.get('inactive', {}).get('tracking_disruption', 0) < \
source.tracking_disruption:
inactive['tracking_disruption'] = source.tracking_disruption
if source.ECM:
if not target.debuffs.get('inactive', {}).get('ECM', 0):
if sensor_str == 0 or \
random.random() < (float(source.ECM) / sensor_str):
inactive['ECM'] = source.ECM
if source.web:
if target.debuffs.get('inactive', {}).get('web', 0) < source.web:
inactive['web'] = source.web
result = {}
if inactive:
result['inactive'] = inactive
if target.debuffs.get('active'):
result['active'] = target.debuffs.get('active')
return result
def ship_attack(attacker_ship, victim_ship):
"""
Do a ship attack.
Apply the attacker's schema onto the victim_ship as an attack
and return a new Ship object as the result.
"""
if not is_ship_alive(victim_ship):
# save us some time, it should be the same dead ship.
return victim_ship
if attacker_ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# attacker is jammed can't attack or apply debuffs
return victim_ship
debuffs = grab_debuffs(attacker_ship.schema, victim_ship)
if attacker_ship.schema.firepower <= 0:
# damage doesn't need to be calculated, but debuffs do
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
damage = true_damage(attacker_ship.schema.firepower,
attacker_ship.schema.weapon_size,
victim_ship.schema.size,
attacker_ship.debuffs,
victim_ship.debuffs
)
shield = shield_bounce(victim_ship.attributes.shield,
victim_ship.schema.shield, damage)
if shield == victim_ship.attributes.shield:
# it glanced off, don't need to worry about hull breaches when
# the weapon didn't even hit
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
armor = victim_ship.attributes.armor + min(shield, 0)
hull = hull_breach(victim_ship.attributes.hull,
victim_ship.schema.hull, - min(armor, 0))
return Ship(
victim_ship.schema,
ShipAttributes(max(0, shield), max(0, armor), max(0, hull)),
debuffs,
)
def multishot(attacker_schema, victim_schema):
"""
Calculate multishot result based on the schemas.
"""
multishot = attacker_schema.multishot.get(victim_schema.name, 0)
return multishot > 0 and (multishot - 1.0) / multishot > random.random()
def expand_fleet(ship_count, library):
# for the listing of numbers of ship we need to expand to each ship
# having it's own value for shield, armor, and hull
# TO DO: Make sure fleet when expanded is ordered by size
# From smallest to largest to make explode chance and
# shield bounce effects work out properly.
ships = []
for ship_type in ship_count:
schema = library.get_ship_schemata(ship_type)
ships.extend([Ship(
schema, # it's just a pointer...
ShipAttributes(schema.shield, schema.armor, schema.hull),
) for i in range(ship_count[ship_type])])
return Fleet(ships, ship_count)
def prune_fleet(attack_result):
"""
Prune an AttackResult of dead ships and restore shields/armor.
Returns the pruned fleet and a count of ships.
"""
fleet = []
count = {}
damage_taken = 0
for ship in attack_result.damaged_fleet:
if not ship.attributes.hull > 0:
continue
updated_debuffs = {}
if ship.debuffs.get('inactive'):
updated_debuffs['active'] = ship.debuffs.get('inactive')
# switch the inactive debuffs to active and drop current active ones
fleet.append(Ship(
ship.schema,
ShipAttributes(
min(ship.schema.shield,
(ship.attributes.shield + ship.schema.shield_recharge)
),
min(ship.schema.armor,
(ship.attributes.armor + ship.schema.armor_local_repair)
),
ship.attributes.hull,
),
updated_debuffs,
))
count[ship.schema.name] = count.get(ship.schema.name, 0) + 1
return Fleet(fleet, count)
def logi_subfleet(input_fleet):
"""
returns two sub_fleets of logi ships that can rep
[0]: shield
[1]: armor
ships which are jammed this turn are not entered into the list
"""
logi_shield = []
logi_armor = []
for ship in input_fleet:
if ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# can't target to apply repairs
continue
if ship.schema.remote_shield:
logi_shield.append(ship)
if ship.schema.remote_armor:
|
else:
continue
return [logi_shield, logi_armor]
def repair_fleet(input_fleet):
"""
Have logistics ships do their job and repair other ships in the fleet
"""
logistics = logi_subfleet(input_fleet)
logi_shield = logistics[0]
logi_armor = logistics[1]
if (logi_shield == []) and (logi_armor == []):
return input_fleet
damaged_shield = []
# I have a bad feeling that this function won't last longer
# than a single commit :ohdear:
# also this has a slight bug that ships can rep themselves
# and a ship might get over repped, but that's actually intended
# shield first
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.shield != input_fleet[x].schema.shield:
damaged_shield.append(x)
if damaged_shield != []:
for ship in logi_shield:
rep_target = random.choice(damaged_shield)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
min(input_fleet[rep_target].schema.shield,
(input_fleet[rep_target].attributes.shield
+ ship.schema.remote_shield)
),
input_fleet[rep_target].attributes.armor,
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
damaged_armor = []
#armor second
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.armor != input_fleet[x].schema.armor:
damaged_armor.append(int(x))
if damaged_armor != []:
for ship in logi_armor:
rep_target = random.choice(damaged_armor)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
input_fleet[rep_target].attributes.shield,
min(input_fleet[rep_target].schema.armor,
(input_fleet[rep_target].attributes.armor
+ ship.schema.remote_armor)
),
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
return input_fleet
def fleet_attack(fleet_a, fleet_b):
"""
Do a round of fleet attack calculation.
Send an attack from fleet_a to fleet_b.
Appends the hit_by attribute on the victim ship in fleet_b for
each ship in fleet_a.
"""
# if fleet b is empty
if not fleet_b.ships:
return AttackResult(fleet_a, fleet_b.ships, 0, 0)
result = []
result.extend(fleet_b.ships)
shots = 0
damage = 0
for ship in fleet_a.ships:
firing = True
# I kind of wanted to do apply an "attacked_by" attribute to
# the target, but let's wait for that and just mutate this
# into the new ship. Something something hidden running
# complexity when dealing with a list (it's an array).
while firing:
target_id = random.randrange(0, len(result))
result[target_id] = ship_attack(ship, result[target_id])
firing = multishot(ship.schema, result[target_id].schema)
shots += 1
damage = sum((
(fresh.attributes.shield - damaged.attributes.shield) +
(fresh.attributes.armor - damaged.attributes.armor) +
(fresh.attributes.hull - damaged.attributes.hull)
for fresh, damaged in zip(fleet_b.ships, result)))
return AttackResult(fleet_a.ships, result, shots, damage)
class Battle(object):
"""
Battle between two fleets.
To implement joint fleets, simply convert the attackers to a list of
fleets, and create two lists and extend all member fleets into each
one for the two respective sides. Prune them separately for results.
"""
def __init__(self, attacker, defender, rounds, *a, **kw):
self.rounds = rounds
# attacker and defender are dictionaries with "ship_type": number
self.attacker_count = attacker
self.defender_count = defender
self.attacker_fleet = self.defender_fleet = None
self.round_results = []
def prepare(self, library):
# do all the fleet preparation pre-battle using this game
# library. Could be called initialize.
self.attacker_fleet = expand_fleet(self.attacker_count, library)
self.defender_fleet = expand_fleet(self.defender_count, library)
def calculate_round(self):
defender_damaged = fleet_attack(
self.attacker_fleet, self.defender_fleet)
attacker_damaged = fleet_attack(
self.defender_fleet, self.attacker_fleet)
attacker_repaired = repair_fleet(attacker_damaged.damaged_fleet)
defender_repaired = repair_fleet(defender_damaged.damaged_fleet)
defender_results = prune_fleet(defender_damaged)
attacker_results = prune_fleet(attacker_damaged)
# TODO figure out a better way to store round information that
# can accommodate multiple fleets.
self.round_results.append((
RoundResult(attacker_results.ship_count,
attacker_damaged.shots_taken, attacker_damaged.damage_taken),
RoundResult(defender_results.ship_count,
defender_damaged.shots_taken, defender_damaged.damage_taken),
))
self.defender_fleet = defender_results
self.attacker_fleet = attacker_results
def calculate_battle(self):
# avoid using round as variable name as it's a predefined method
# that might be useful when working with numbers.
for r in xrange(self.rounds):
if not (self.defender_fleet.ships and self.attacker_fleet.ships):
break
self.calculate_round()
# when/if we implement more than 1v1 then this will need to change
self.attacker_result = self.round_results[-1][0].ship_count
self.defender_result = self.round_results[-1][1].ship_count
| logi_armor.append(ship) | conditional_block |
battle.py | from __future__ import division
import random
import math
from collections import namedtuple
from idleiss.ship import Ship
from idleiss.ship import ShipAttributes
from idleiss.ship import ShipLibrary
SHIELD_BOUNCE_ZONE = 0.01 # max percentage damage to shield for bounce.
HULL_DANGER_ZONE = 0.70 # percentage remaining.
AttackResult = namedtuple('AttackResult',
['attacker_fleet', 'damaged_fleet', 'shots_taken', 'damage_taken'])
Fleet = namedtuple('Fleet',
['ships', 'ship_count'])
RoundResult = namedtuple('RoundResult',
['ship_count', 'shots_taken', 'damage_taken'])
def hull_breach(hull, max_hull, damage,
hull_danger_zone=HULL_DANGER_ZONE):
"""
Hull has a chance of being breached if less than the dangerzone.
Chance of survival is determined by how much % hull remains.
Returns input hull amount if RNG thinks it should, otherwise 0.
"""
damaged_hull = hull - damage
chance_of_survival = damaged_hull / max_hull
return not (chance_of_survival < hull_danger_zone and
chance_of_survival < random.random()) and damaged_hull or 0
def shield_bounce(shield, max_shield, damage,
shield_bounce_zone=SHIELD_BOUNCE_ZONE):
"""
Check whether the damage has enough power to damage the shield or
just harmlessly bounce off it, only if there is a shield available.
Shield will be returned if the above conditions are not met,
otherwise the current shield less damage taken will be returned.
Returns the new shield value.
"""
# really, shield can't become negative unless some external factors
# hacked it into one.
return ((damage < shield * shield_bounce_zone) and shield > 0 and
shield or shield - damage)
def size_damage_factor(weapon_size, target_size):
"""
Calculates damage factor on size. If weapon size is greater than
the target size, then only the area that falls within the target
will the damage be applied.
"""
if weapon_size <= target_size:
return damage
return (target_size ** 2) / (weapon_size ** 2)
def true_damage(damage, weapon_size, target_size, source_debuff, target_debuff):
"""
Calculates true damage based on parameters.
"""
# source_debuffs: tracking disruption
tracking_disrupt = 1 + source_debuff.get('active', {}).get(
'tracking_disruption', 0)
# target_debuffs: target painter, web
target_painter = 1 + target_debuff.get('active', {}).get(
'target_painter', 0)
web = 1 - target_debuff.get('active', {}).get(
'web', 0)
# painters gives > 1 multiplier to the target_size against target
# reason - painters expand the target to make it easier to hit.
# webbers give < 1 multiplier to the weapon_size against target
# reason - weapons can focus more damage on a webbed target
if web == 0 or weapon_size / web * tracking_disrupt <= \
target_size * target_painter:
return damage
true_weapon_size = (weapon_size / web) * tracking_disrupt
true_target_size = target_size * target_painter
damage_factor = size_damage_factor(true_weapon_size, true_target_size)
return int(math.ceil(damage_factor * damage))
def is_ship_alive(ship):
"""
Simple check to see if ship is alive.
"""
# If and when flag systems become advanced enough **FUN** things can
# be applied to make this check more hilarious.
return ship.attributes.hull > 0 # though it can't be < 0
def grab_debuffs(source, target_in):
"""
Retuns a dict of applied debufs calculated from ship schema
as well as ship attributes.
Source is ShipSchema
target_in is a Ship
"""
inactive = {}
sensor_str = target_in.schema.sensor_strength
target = target_in
# I'm sure there's a list comprehension thing that could be used
# to clean this up but I have no idea what
if source.target_painter:
if target.debuffs.get('inactive', {}).get('target_painter', 0) < \
source.target_painter:
inactive['target_painter'] = source.target_painter
if source.tracking_disruption:
if target.debuffs.get('inactive', {}).get('tracking_disruption', 0) < \
source.tracking_disruption:
inactive['tracking_disruption'] = source.tracking_disruption
if source.ECM:
if not target.debuffs.get('inactive', {}).get('ECM', 0):
if sensor_str == 0 or \
random.random() < (float(source.ECM) / sensor_str):
inactive['ECM'] = source.ECM
if source.web:
if target.debuffs.get('inactive', {}).get('web', 0) < source.web:
inactive['web'] = source.web
result = {}
if inactive:
result['inactive'] = inactive
if target.debuffs.get('active'):
result['active'] = target.debuffs.get('active')
return result
def | (attacker_ship, victim_ship):
"""
Do a ship attack.
Apply the attacker's schema onto the victim_ship as an attack
and return a new Ship object as the result.
"""
if not is_ship_alive(victim_ship):
# save us some time, it should be the same dead ship.
return victim_ship
if attacker_ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# attacker is jammed can't attack or apply debuffs
return victim_ship
debuffs = grab_debuffs(attacker_ship.schema, victim_ship)
if attacker_ship.schema.firepower <= 0:
# damage doesn't need to be calculated, but debuffs do
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
damage = true_damage(attacker_ship.schema.firepower,
attacker_ship.schema.weapon_size,
victim_ship.schema.size,
attacker_ship.debuffs,
victim_ship.debuffs
)
shield = shield_bounce(victim_ship.attributes.shield,
victim_ship.schema.shield, damage)
if shield == victim_ship.attributes.shield:
# it glanced off, don't need to worry about hull breaches when
# the weapon didn't even hit
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
armor = victim_ship.attributes.armor + min(shield, 0)
hull = hull_breach(victim_ship.attributes.hull,
victim_ship.schema.hull, - min(armor, 0))
return Ship(
victim_ship.schema,
ShipAttributes(max(0, shield), max(0, armor), max(0, hull)),
debuffs,
)
def multishot(attacker_schema, victim_schema):
"""
Calculate multishot result based on the schemas.
"""
multishot = attacker_schema.multishot.get(victim_schema.name, 0)
return multishot > 0 and (multishot - 1.0) / multishot > random.random()
def expand_fleet(ship_count, library):
# for the listing of numbers of ship we need to expand to each ship
# having it's own value for shield, armor, and hull
# TO DO: Make sure fleet when expanded is ordered by size
# From smallest to largest to make explode chance and
# shield bounce effects work out properly.
ships = []
for ship_type in ship_count:
schema = library.get_ship_schemata(ship_type)
ships.extend([Ship(
schema, # it's just a pointer...
ShipAttributes(schema.shield, schema.armor, schema.hull),
) for i in range(ship_count[ship_type])])
return Fleet(ships, ship_count)
def prune_fleet(attack_result):
"""
Prune an AttackResult of dead ships and restore shields/armor.
Returns the pruned fleet and a count of ships.
"""
fleet = []
count = {}
damage_taken = 0
for ship in attack_result.damaged_fleet:
if not ship.attributes.hull > 0:
continue
updated_debuffs = {}
if ship.debuffs.get('inactive'):
updated_debuffs['active'] = ship.debuffs.get('inactive')
# switch the inactive debuffs to active and drop current active ones
fleet.append(Ship(
ship.schema,
ShipAttributes(
min(ship.schema.shield,
(ship.attributes.shield + ship.schema.shield_recharge)
),
min(ship.schema.armor,
(ship.attributes.armor + ship.schema.armor_local_repair)
),
ship.attributes.hull,
),
updated_debuffs,
))
count[ship.schema.name] = count.get(ship.schema.name, 0) + 1
return Fleet(fleet, count)
def logi_subfleet(input_fleet):
"""
returns two sub_fleets of logi ships that can rep
[0]: shield
[1]: armor
ships which are jammed this turn are not entered into the list
"""
logi_shield = []
logi_armor = []
for ship in input_fleet:
if ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# can't target to apply repairs
continue
if ship.schema.remote_shield:
logi_shield.append(ship)
if ship.schema.remote_armor:
logi_armor.append(ship)
else:
continue
return [logi_shield, logi_armor]
def repair_fleet(input_fleet):
"""
Have logistics ships do their job and repair other ships in the fleet
"""
logistics = logi_subfleet(input_fleet)
logi_shield = logistics[0]
logi_armor = logistics[1]
if (logi_shield == []) and (logi_armor == []):
return input_fleet
damaged_shield = []
# I have a bad feeling that this function won't last longer
# than a single commit :ohdear:
# also this has a slight bug that ships can rep themselves
# and a ship might get over repped, but that's actually intended
# shield first
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.shield != input_fleet[x].schema.shield:
damaged_shield.append(x)
if damaged_shield != []:
for ship in logi_shield:
rep_target = random.choice(damaged_shield)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
min(input_fleet[rep_target].schema.shield,
(input_fleet[rep_target].attributes.shield
+ ship.schema.remote_shield)
),
input_fleet[rep_target].attributes.armor,
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
damaged_armor = []
#armor second
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.armor != input_fleet[x].schema.armor:
damaged_armor.append(int(x))
if damaged_armor != []:
for ship in logi_armor:
rep_target = random.choice(damaged_armor)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
input_fleet[rep_target].attributes.shield,
min(input_fleet[rep_target].schema.armor,
(input_fleet[rep_target].attributes.armor
+ ship.schema.remote_armor)
),
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
return input_fleet
def fleet_attack(fleet_a, fleet_b):
"""
Do a round of fleet attack calculation.
Send an attack from fleet_a to fleet_b.
Appends the hit_by attribute on the victim ship in fleet_b for
each ship in fleet_a.
"""
# if fleet b is empty
if not fleet_b.ships:
return AttackResult(fleet_a, fleet_b.ships, 0, 0)
result = []
result.extend(fleet_b.ships)
shots = 0
damage = 0
for ship in fleet_a.ships:
firing = True
# I kind of wanted to do apply an "attacked_by" attribute to
# the target, but let's wait for that and just mutate this
# into the new ship. Something something hidden running
# complexity when dealing with a list (it's an array).
while firing:
target_id = random.randrange(0, len(result))
result[target_id] = ship_attack(ship, result[target_id])
firing = multishot(ship.schema, result[target_id].schema)
shots += 1
damage = sum((
(fresh.attributes.shield - damaged.attributes.shield) +
(fresh.attributes.armor - damaged.attributes.armor) +
(fresh.attributes.hull - damaged.attributes.hull)
for fresh, damaged in zip(fleet_b.ships, result)))
return AttackResult(fleet_a.ships, result, shots, damage)
class Battle(object):
"""
Battle between two fleets.
To implement joint fleets, simply convert the attackers to a list of
fleets, and create two lists and extend all member fleets into each
one for the two respective sides. Prune them separately for results.
"""
def __init__(self, attacker, defender, rounds, *a, **kw):
self.rounds = rounds
# attacker and defender are dictionaries with "ship_type": number
self.attacker_count = attacker
self.defender_count = defender
self.attacker_fleet = self.defender_fleet = None
self.round_results = []
def prepare(self, library):
# do all the fleet preparation pre-battle using this game
# library. Could be called initialize.
self.attacker_fleet = expand_fleet(self.attacker_count, library)
self.defender_fleet = expand_fleet(self.defender_count, library)
def calculate_round(self):
defender_damaged = fleet_attack(
self.attacker_fleet, self.defender_fleet)
attacker_damaged = fleet_attack(
self.defender_fleet, self.attacker_fleet)
attacker_repaired = repair_fleet(attacker_damaged.damaged_fleet)
defender_repaired = repair_fleet(defender_damaged.damaged_fleet)
defender_results = prune_fleet(defender_damaged)
attacker_results = prune_fleet(attacker_damaged)
# TODO figure out a better way to store round information that
# can accommodate multiple fleets.
self.round_results.append((
RoundResult(attacker_results.ship_count,
attacker_damaged.shots_taken, attacker_damaged.damage_taken),
RoundResult(defender_results.ship_count,
defender_damaged.shots_taken, defender_damaged.damage_taken),
))
self.defender_fleet = defender_results
self.attacker_fleet = attacker_results
def calculate_battle(self):
# avoid using round as variable name as it's a predefined method
# that might be useful when working with numbers.
for r in xrange(self.rounds):
if not (self.defender_fleet.ships and self.attacker_fleet.ships):
break
self.calculate_round()
# when/if we implement more than 1v1 then this will need to change
self.attacker_result = self.round_results[-1][0].ship_count
self.defender_result = self.round_results[-1][1].ship_count
| ship_attack | identifier_name |
battle.py | from __future__ import division
import random
import math
from collections import namedtuple
from idleiss.ship import Ship
from idleiss.ship import ShipAttributes
from idleiss.ship import ShipLibrary
SHIELD_BOUNCE_ZONE = 0.01 # max percentage damage to shield for bounce.
HULL_DANGER_ZONE = 0.70 # percentage remaining.
AttackResult = namedtuple('AttackResult',
['attacker_fleet', 'damaged_fleet', 'shots_taken', 'damage_taken'])
Fleet = namedtuple('Fleet',
['ships', 'ship_count'])
RoundResult = namedtuple('RoundResult',
['ship_count', 'shots_taken', 'damage_taken'])
def hull_breach(hull, max_hull, damage,
hull_danger_zone=HULL_DANGER_ZONE):
"""
Hull has a chance of being breached if less than the dangerzone.
Chance of survival is determined by how much % hull remains.
Returns input hull amount if RNG thinks it should, otherwise 0.
"""
damaged_hull = hull - damage
chance_of_survival = damaged_hull / max_hull
return not (chance_of_survival < hull_danger_zone and
chance_of_survival < random.random()) and damaged_hull or 0
def shield_bounce(shield, max_shield, damage,
shield_bounce_zone=SHIELD_BOUNCE_ZONE):
"""
Check whether the damage has enough power to damage the shield or
just harmlessly bounce off it, only if there is a shield available.
Shield will be returned if the above conditions are not met,
otherwise the current shield less damage taken will be returned.
Returns the new shield value.
"""
# really, shield can't become negative unless some external factors
# hacked it into one.
return ((damage < shield * shield_bounce_zone) and shield > 0 and
shield or shield - damage)
def size_damage_factor(weapon_size, target_size):
"""
Calculates damage factor on size. If weapon size is greater than
the target size, then only the area that falls within the target
will the damage be applied.
"""
if weapon_size <= target_size:
return damage
return (target_size ** 2) / (weapon_size ** 2)
def true_damage(damage, weapon_size, target_size, source_debuff, target_debuff):
"""
Calculates true damage based on parameters.
"""
# source_debuffs: tracking disruption
tracking_disrupt = 1 + source_debuff.get('active', {}).get(
'tracking_disruption', 0)
# target_debuffs: target painter, web
target_painter = 1 + target_debuff.get('active', {}).get(
'target_painter', 0)
web = 1 - target_debuff.get('active', {}).get(
'web', 0)
# painters gives > 1 multiplier to the target_size against target
# reason - painters expand the target to make it easier to hit.
# webbers give < 1 multiplier to the weapon_size against target
# reason - weapons can focus more damage on a webbed target
if web == 0 or weapon_size / web * tracking_disrupt <= \
target_size * target_painter:
return damage
true_weapon_size = (weapon_size / web) * tracking_disrupt
true_target_size = target_size * target_painter
damage_factor = size_damage_factor(true_weapon_size, true_target_size)
return int(math.ceil(damage_factor * damage))
def is_ship_alive(ship):
"""
Simple check to see if ship is alive.
""" | return ship.attributes.hull > 0 # though it can't be < 0
def grab_debuffs(source, target_in):
"""
Retuns a dict of applied debufs calculated from ship schema
as well as ship attributes.
Source is ShipSchema
target_in is a Ship
"""
inactive = {}
sensor_str = target_in.schema.sensor_strength
target = target_in
# I'm sure there's a list comprehension thing that could be used
# to clean this up but I have no idea what
if source.target_painter:
if target.debuffs.get('inactive', {}).get('target_painter', 0) < \
source.target_painter:
inactive['target_painter'] = source.target_painter
if source.tracking_disruption:
if target.debuffs.get('inactive', {}).get('tracking_disruption', 0) < \
source.tracking_disruption:
inactive['tracking_disruption'] = source.tracking_disruption
if source.ECM:
if not target.debuffs.get('inactive', {}).get('ECM', 0):
if sensor_str == 0 or \
random.random() < (float(source.ECM) / sensor_str):
inactive['ECM'] = source.ECM
if source.web:
if target.debuffs.get('inactive', {}).get('web', 0) < source.web:
inactive['web'] = source.web
result = {}
if inactive:
result['inactive'] = inactive
if target.debuffs.get('active'):
result['active'] = target.debuffs.get('active')
return result
def ship_attack(attacker_ship, victim_ship):
"""
Do a ship attack.
Apply the attacker's schema onto the victim_ship as an attack
and return a new Ship object as the result.
"""
if not is_ship_alive(victim_ship):
# save us some time, it should be the same dead ship.
return victim_ship
if attacker_ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# attacker is jammed can't attack or apply debuffs
return victim_ship
debuffs = grab_debuffs(attacker_ship.schema, victim_ship)
if attacker_ship.schema.firepower <= 0:
# damage doesn't need to be calculated, but debuffs do
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
damage = true_damage(attacker_ship.schema.firepower,
attacker_ship.schema.weapon_size,
victim_ship.schema.size,
attacker_ship.debuffs,
victim_ship.debuffs
)
shield = shield_bounce(victim_ship.attributes.shield,
victim_ship.schema.shield, damage)
if shield == victim_ship.attributes.shield:
# it glanced off, don't need to worry about hull breaches when
# the weapon didn't even hit
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
armor = victim_ship.attributes.armor + min(shield, 0)
hull = hull_breach(victim_ship.attributes.hull,
victim_ship.schema.hull, - min(armor, 0))
return Ship(
victim_ship.schema,
ShipAttributes(max(0, shield), max(0, armor), max(0, hull)),
debuffs,
)
def multishot(attacker_schema, victim_schema):
"""
Calculate multishot result based on the schemas.
"""
multishot = attacker_schema.multishot.get(victim_schema.name, 0)
return multishot > 0 and (multishot - 1.0) / multishot > random.random()
def expand_fleet(ship_count, library):
# for the listing of numbers of ship we need to expand to each ship
# having it's own value for shield, armor, and hull
# TO DO: Make sure fleet when expanded is ordered by size
# From smallest to largest to make explode chance and
# shield bounce effects work out properly.
ships = []
for ship_type in ship_count:
schema = library.get_ship_schemata(ship_type)
ships.extend([Ship(
schema, # it's just a pointer...
ShipAttributes(schema.shield, schema.armor, schema.hull),
) for i in range(ship_count[ship_type])])
return Fleet(ships, ship_count)
def prune_fleet(attack_result):
"""
Prune an AttackResult of dead ships and restore shields/armor.
Returns the pruned fleet and a count of ships.
"""
fleet = []
count = {}
damage_taken = 0
for ship in attack_result.damaged_fleet:
if not ship.attributes.hull > 0:
continue
updated_debuffs = {}
if ship.debuffs.get('inactive'):
updated_debuffs['active'] = ship.debuffs.get('inactive')
# switch the inactive debuffs to active and drop current active ones
fleet.append(Ship(
ship.schema,
ShipAttributes(
min(ship.schema.shield,
(ship.attributes.shield + ship.schema.shield_recharge)
),
min(ship.schema.armor,
(ship.attributes.armor + ship.schema.armor_local_repair)
),
ship.attributes.hull,
),
updated_debuffs,
))
count[ship.schema.name] = count.get(ship.schema.name, 0) + 1
return Fleet(fleet, count)
def logi_subfleet(input_fleet):
"""
returns two sub_fleets of logi ships that can rep
[0]: shield
[1]: armor
ships which are jammed this turn are not entered into the list
"""
logi_shield = []
logi_armor = []
for ship in input_fleet:
if ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# can't target to apply repairs
continue
if ship.schema.remote_shield:
logi_shield.append(ship)
if ship.schema.remote_armor:
logi_armor.append(ship)
else:
continue
return [logi_shield, logi_armor]
def repair_fleet(input_fleet):
"""
Have logistics ships do their job and repair other ships in the fleet
"""
logistics = logi_subfleet(input_fleet)
logi_shield = logistics[0]
logi_armor = logistics[1]
if (logi_shield == []) and (logi_armor == []):
return input_fleet
damaged_shield = []
# I have a bad feeling that this function won't last longer
# than a single commit :ohdear:
# also this has a slight bug that ships can rep themselves
# and a ship might get over repped, but that's actually intended
# shield first
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.shield != input_fleet[x].schema.shield:
damaged_shield.append(x)
if damaged_shield != []:
for ship in logi_shield:
rep_target = random.choice(damaged_shield)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
min(input_fleet[rep_target].schema.shield,
(input_fleet[rep_target].attributes.shield
+ ship.schema.remote_shield)
),
input_fleet[rep_target].attributes.armor,
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
damaged_armor = []
#armor second
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.armor != input_fleet[x].schema.armor:
damaged_armor.append(int(x))
if damaged_armor != []:
for ship in logi_armor:
rep_target = random.choice(damaged_armor)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
input_fleet[rep_target].attributes.shield,
min(input_fleet[rep_target].schema.armor,
(input_fleet[rep_target].attributes.armor
+ ship.schema.remote_armor)
),
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
return input_fleet
def fleet_attack(fleet_a, fleet_b):
"""
Do a round of fleet attack calculation.
Send an attack from fleet_a to fleet_b.
Appends the hit_by attribute on the victim ship in fleet_b for
each ship in fleet_a.
"""
# if fleet b is empty
if not fleet_b.ships:
return AttackResult(fleet_a, fleet_b.ships, 0, 0)
result = []
result.extend(fleet_b.ships)
shots = 0
damage = 0
for ship in fleet_a.ships:
firing = True
# I kind of wanted to do apply an "attacked_by" attribute to
# the target, but let's wait for that and just mutate this
# into the new ship. Something something hidden running
# complexity when dealing with a list (it's an array).
while firing:
target_id = random.randrange(0, len(result))
result[target_id] = ship_attack(ship, result[target_id])
firing = multishot(ship.schema, result[target_id].schema)
shots += 1
damage = sum((
(fresh.attributes.shield - damaged.attributes.shield) +
(fresh.attributes.armor - damaged.attributes.armor) +
(fresh.attributes.hull - damaged.attributes.hull)
for fresh, damaged in zip(fleet_b.ships, result)))
return AttackResult(fleet_a.ships, result, shots, damage)
class Battle(object):
"""
Battle between two fleets.
To implement joint fleets, simply convert the attackers to a list of
fleets, and create two lists and extend all member fleets into each
one for the two respective sides. Prune them separately for results.
"""
def __init__(self, attacker, defender, rounds, *a, **kw):
self.rounds = rounds
# attacker and defender are dictionaries with "ship_type": number
self.attacker_count = attacker
self.defender_count = defender
self.attacker_fleet = self.defender_fleet = None
self.round_results = []
def prepare(self, library):
# do all the fleet preparation pre-battle using this game
# library. Could be called initialize.
self.attacker_fleet = expand_fleet(self.attacker_count, library)
self.defender_fleet = expand_fleet(self.defender_count, library)
def calculate_round(self):
defender_damaged = fleet_attack(
self.attacker_fleet, self.defender_fleet)
attacker_damaged = fleet_attack(
self.defender_fleet, self.attacker_fleet)
attacker_repaired = repair_fleet(attacker_damaged.damaged_fleet)
defender_repaired = repair_fleet(defender_damaged.damaged_fleet)
defender_results = prune_fleet(defender_damaged)
attacker_results = prune_fleet(attacker_damaged)
# TODO figure out a better way to store round information that
# can accommodate multiple fleets.
self.round_results.append((
RoundResult(attacker_results.ship_count,
attacker_damaged.shots_taken, attacker_damaged.damage_taken),
RoundResult(defender_results.ship_count,
defender_damaged.shots_taken, defender_damaged.damage_taken),
))
self.defender_fleet = defender_results
self.attacker_fleet = attacker_results
def calculate_battle(self):
# avoid using round as variable name as it's a predefined method
# that might be useful when working with numbers.
for r in xrange(self.rounds):
if not (self.defender_fleet.ships and self.attacker_fleet.ships):
break
self.calculate_round()
# when/if we implement more than 1v1 then this will need to change
self.attacker_result = self.round_results[-1][0].ship_count
self.defender_result = self.round_results[-1][1].ship_count |
# If and when flag systems become advanced enough **FUN** things can
# be applied to make this check more hilarious. | random_line_split |
battle.py | from __future__ import division
import random
import math
from collections import namedtuple
from idleiss.ship import Ship
from idleiss.ship import ShipAttributes
from idleiss.ship import ShipLibrary
SHIELD_BOUNCE_ZONE = 0.01 # max percentage damage to shield for bounce.
HULL_DANGER_ZONE = 0.70 # percentage remaining.
AttackResult = namedtuple('AttackResult',
['attacker_fleet', 'damaged_fleet', 'shots_taken', 'damage_taken'])
Fleet = namedtuple('Fleet',
['ships', 'ship_count'])
RoundResult = namedtuple('RoundResult',
['ship_count', 'shots_taken', 'damage_taken'])
def hull_breach(hull, max_hull, damage,
hull_danger_zone=HULL_DANGER_ZONE):
"""
Hull has a chance of being breached if less than the dangerzone.
Chance of survival is determined by how much % hull remains.
Returns input hull amount if RNG thinks it should, otherwise 0.
"""
damaged_hull = hull - damage
chance_of_survival = damaged_hull / max_hull
return not (chance_of_survival < hull_danger_zone and
chance_of_survival < random.random()) and damaged_hull or 0
def shield_bounce(shield, max_shield, damage,
shield_bounce_zone=SHIELD_BOUNCE_ZONE):
"""
Check whether the damage has enough power to damage the shield or
just harmlessly bounce off it, only if there is a shield available.
Shield will be returned if the above conditions are not met,
otherwise the current shield less damage taken will be returned.
Returns the new shield value.
"""
# really, shield can't become negative unless some external factors
# hacked it into one.
return ((damage < shield * shield_bounce_zone) and shield > 0 and
shield or shield - damage)
def size_damage_factor(weapon_size, target_size):
"""
Calculates damage factor on size. If weapon size is greater than
the target size, then only the area that falls within the target
will the damage be applied.
"""
if weapon_size <= target_size:
return damage
return (target_size ** 2) / (weapon_size ** 2)
def true_damage(damage, weapon_size, target_size, source_debuff, target_debuff):
"""
Calculates true damage based on parameters.
"""
# source_debuffs: tracking disruption
tracking_disrupt = 1 + source_debuff.get('active', {}).get(
'tracking_disruption', 0)
# target_debuffs: target painter, web
target_painter = 1 + target_debuff.get('active', {}).get(
'target_painter', 0)
web = 1 - target_debuff.get('active', {}).get(
'web', 0)
# painters gives > 1 multiplier to the target_size against target
# reason - painters expand the target to make it easier to hit.
# webbers give < 1 multiplier to the weapon_size against target
# reason - weapons can focus more damage on a webbed target
if web == 0 or weapon_size / web * tracking_disrupt <= \
target_size * target_painter:
return damage
true_weapon_size = (weapon_size / web) * tracking_disrupt
true_target_size = target_size * target_painter
damage_factor = size_damage_factor(true_weapon_size, true_target_size)
return int(math.ceil(damage_factor * damage))
def is_ship_alive(ship):
"""
Simple check to see if ship is alive.
"""
# If and when flag systems become advanced enough **FUN** things can
# be applied to make this check more hilarious.
return ship.attributes.hull > 0 # though it can't be < 0
def grab_debuffs(source, target_in):
|
def ship_attack(attacker_ship, victim_ship):
"""
Do a ship attack.
Apply the attacker's schema onto the victim_ship as an attack
and return a new Ship object as the result.
"""
if not is_ship_alive(victim_ship):
# save us some time, it should be the same dead ship.
return victim_ship
if attacker_ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# attacker is jammed can't attack or apply debuffs
return victim_ship
debuffs = grab_debuffs(attacker_ship.schema, victim_ship)
if attacker_ship.schema.firepower <= 0:
# damage doesn't need to be calculated, but debuffs do
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
damage = true_damage(attacker_ship.schema.firepower,
attacker_ship.schema.weapon_size,
victim_ship.schema.size,
attacker_ship.debuffs,
victim_ship.debuffs
)
shield = shield_bounce(victim_ship.attributes.shield,
victim_ship.schema.shield, damage)
if shield == victim_ship.attributes.shield:
# it glanced off, don't need to worry about hull breaches when
# the weapon didn't even hit
return Ship(
victim_ship.schema,
ShipAttributes(
victim_ship.attributes.shield,
victim_ship.attributes.armor,
victim_ship.attributes.hull,
),
debuffs,
)
armor = victim_ship.attributes.armor + min(shield, 0)
hull = hull_breach(victim_ship.attributes.hull,
victim_ship.schema.hull, - min(armor, 0))
return Ship(
victim_ship.schema,
ShipAttributes(max(0, shield), max(0, armor), max(0, hull)),
debuffs,
)
def multishot(attacker_schema, victim_schema):
"""
Calculate multishot result based on the schemas.
"""
multishot = attacker_schema.multishot.get(victim_schema.name, 0)
return multishot > 0 and (multishot - 1.0) / multishot > random.random()
def expand_fleet(ship_count, library):
# for the listing of numbers of ship we need to expand to each ship
# having it's own value for shield, armor, and hull
# TO DO: Make sure fleet when expanded is ordered by size
# From smallest to largest to make explode chance and
# shield bounce effects work out properly.
ships = []
for ship_type in ship_count:
schema = library.get_ship_schemata(ship_type)
ships.extend([Ship(
schema, # it's just a pointer...
ShipAttributes(schema.shield, schema.armor, schema.hull),
) for i in range(ship_count[ship_type])])
return Fleet(ships, ship_count)
def prune_fleet(attack_result):
"""
Prune an AttackResult of dead ships and restore shields/armor.
Returns the pruned fleet and a count of ships.
"""
fleet = []
count = {}
damage_taken = 0
for ship in attack_result.damaged_fleet:
if not ship.attributes.hull > 0:
continue
updated_debuffs = {}
if ship.debuffs.get('inactive'):
updated_debuffs['active'] = ship.debuffs.get('inactive')
# switch the inactive debuffs to active and drop current active ones
fleet.append(Ship(
ship.schema,
ShipAttributes(
min(ship.schema.shield,
(ship.attributes.shield + ship.schema.shield_recharge)
),
min(ship.schema.armor,
(ship.attributes.armor + ship.schema.armor_local_repair)
),
ship.attributes.hull,
),
updated_debuffs,
))
count[ship.schema.name] = count.get(ship.schema.name, 0) + 1
return Fleet(fleet, count)
def logi_subfleet(input_fleet):
"""
returns two sub_fleets of logi ships that can rep
[0]: shield
[1]: armor
ships which are jammed this turn are not entered into the list
"""
logi_shield = []
logi_armor = []
for ship in input_fleet:
if ship.debuffs.get('active', {}).get('ECM', 0) != 0:
# can't target to apply repairs
continue
if ship.schema.remote_shield:
logi_shield.append(ship)
if ship.schema.remote_armor:
logi_armor.append(ship)
else:
continue
return [logi_shield, logi_armor]
def repair_fleet(input_fleet):
"""
Have logistics ships do their job and repair other ships in the fleet
"""
logistics = logi_subfleet(input_fleet)
logi_shield = logistics[0]
logi_armor = logistics[1]
if (logi_shield == []) and (logi_armor == []):
return input_fleet
damaged_shield = []
# I have a bad feeling that this function won't last longer
# than a single commit :ohdear:
# also this has a slight bug that ships can rep themselves
# and a ship might get over repped, but that's actually intended
# shield first
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.shield != input_fleet[x].schema.shield:
damaged_shield.append(x)
if damaged_shield != []:
for ship in logi_shield:
rep_target = random.choice(damaged_shield)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
min(input_fleet[rep_target].schema.shield,
(input_fleet[rep_target].attributes.shield
+ ship.schema.remote_shield)
),
input_fleet[rep_target].attributes.armor,
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
damaged_armor = []
#armor second
for x in xrange(len(input_fleet)):
if input_fleet[x].attributes.armor != input_fleet[x].schema.armor:
damaged_armor.append(int(x))
if damaged_armor != []:
for ship in logi_armor:
rep_target = random.choice(damaged_armor)
input_fleet[rep_target] = Ship(
input_fleet[rep_target].schema,
ShipAttributes(
input_fleet[rep_target].attributes.shield,
min(input_fleet[rep_target].schema.armor,
(input_fleet[rep_target].attributes.armor
+ ship.schema.remote_armor)
),
input_fleet[rep_target].attributes.hull,
),
input_fleet[rep_target].debuffs,
)
return input_fleet
def fleet_attack(fleet_a, fleet_b):
"""
Do a round of fleet attack calculation.
Send an attack from fleet_a to fleet_b.
Appends the hit_by attribute on the victim ship in fleet_b for
each ship in fleet_a.
"""
# if fleet b is empty
if not fleet_b.ships:
return AttackResult(fleet_a, fleet_b.ships, 0, 0)
result = []
result.extend(fleet_b.ships)
shots = 0
damage = 0
for ship in fleet_a.ships:
firing = True
# I kind of wanted to do apply an "attacked_by" attribute to
# the target, but let's wait for that and just mutate this
# into the new ship. Something something hidden running
# complexity when dealing with a list (it's an array).
while firing:
target_id = random.randrange(0, len(result))
result[target_id] = ship_attack(ship, result[target_id])
firing = multishot(ship.schema, result[target_id].schema)
shots += 1
damage = sum((
(fresh.attributes.shield - damaged.attributes.shield) +
(fresh.attributes.armor - damaged.attributes.armor) +
(fresh.attributes.hull - damaged.attributes.hull)
for fresh, damaged in zip(fleet_b.ships, result)))
return AttackResult(fleet_a.ships, result, shots, damage)
class Battle(object):
"""
Battle between two fleets.
To implement joint fleets, simply convert the attackers to a list of
fleets, and create two lists and extend all member fleets into each
one for the two respective sides. Prune them separately for results.
"""
def __init__(self, attacker, defender, rounds, *a, **kw):
self.rounds = rounds
# attacker and defender are dictionaries with "ship_type": number
self.attacker_count = attacker
self.defender_count = defender
self.attacker_fleet = self.defender_fleet = None
self.round_results = []
def prepare(self, library):
# do all the fleet preparation pre-battle using this game
# library. Could be called initialize.
self.attacker_fleet = expand_fleet(self.attacker_count, library)
self.defender_fleet = expand_fleet(self.defender_count, library)
def calculate_round(self):
defender_damaged = fleet_attack(
self.attacker_fleet, self.defender_fleet)
attacker_damaged = fleet_attack(
self.defender_fleet, self.attacker_fleet)
attacker_repaired = repair_fleet(attacker_damaged.damaged_fleet)
defender_repaired = repair_fleet(defender_damaged.damaged_fleet)
defender_results = prune_fleet(defender_damaged)
attacker_results = prune_fleet(attacker_damaged)
# TODO figure out a better way to store round information that
# can accommodate multiple fleets.
self.round_results.append((
RoundResult(attacker_results.ship_count,
attacker_damaged.shots_taken, attacker_damaged.damage_taken),
RoundResult(defender_results.ship_count,
defender_damaged.shots_taken, defender_damaged.damage_taken),
))
self.defender_fleet = defender_results
self.attacker_fleet = attacker_results
def calculate_battle(self):
# avoid using round as variable name as it's a predefined method
# that might be useful when working with numbers.
for r in xrange(self.rounds):
if not (self.defender_fleet.ships and self.attacker_fleet.ships):
break
self.calculate_round()
# when/if we implement more than 1v1 then this will need to change
self.attacker_result = self.round_results[-1][0].ship_count
self.defender_result = self.round_results[-1][1].ship_count
| """
Retuns a dict of applied debufs calculated from ship schema
as well as ship attributes.
Source is ShipSchema
target_in is a Ship
"""
inactive = {}
sensor_str = target_in.schema.sensor_strength
target = target_in
# I'm sure there's a list comprehension thing that could be used
# to clean this up but I have no idea what
if source.target_painter:
if target.debuffs.get('inactive', {}).get('target_painter', 0) < \
source.target_painter:
inactive['target_painter'] = source.target_painter
if source.tracking_disruption:
if target.debuffs.get('inactive', {}).get('tracking_disruption', 0) < \
source.tracking_disruption:
inactive['tracking_disruption'] = source.tracking_disruption
if source.ECM:
if not target.debuffs.get('inactive', {}).get('ECM', 0):
if sensor_str == 0 or \
random.random() < (float(source.ECM) / sensor_str):
inactive['ECM'] = source.ECM
if source.web:
if target.debuffs.get('inactive', {}).get('web', 0) < source.web:
inactive['web'] = source.web
result = {}
if inactive:
result['inactive'] = inactive
if target.debuffs.get('active'):
result['active'] = target.debuffs.get('active')
return result | identifier_body |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
| // write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if !self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_repeatability(&mut self, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 {
// 20
// }
// }
| self.i2c
}
| identifier_body |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
// write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if !self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_repeatability(&mut self, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 { | // } | // 20
// } | random_line_split |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
| ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
// write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if !self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_repeatability(&mut self, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 {
// 20
// }
// }
| match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
| conditional_block |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
// write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if !self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_rep | elf, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 {
// 20
// }
// }
| eatability(&mut s | identifier_name |
enhance.py | #!/usr/bin/env python
# * coding: utf8 *
"""
enhance.py
A module that handles appending information to the geocoded csv files
"""
try:
import arcpy
except:
pass
import csv
from pathlib import Path
from timeit import default_timer
import pandas as pd
UTM = "PROJCS['NAD_1983_UTM_Zone_12N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-111.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-5120900 -9998100 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision"
GDB_NAME = 'enhance.gdb'
enhancement_layers = [{
'table': 'political.senate_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['senate_district']
}, {
'table': 'political.house_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['house_district']
}, {
'table': 'boundaries.county_boundaries',
'fields': ['name'],
'rename': ['county_name']
}, {
'table': 'demographic.census_tracts_2020',
'fields': ['geoid20'],
'rename': ['census_id']
}]
def create_enhancement_gdb(parent_folder):
"""Creates the file geodatabase that will be used to store the enhanced layers
:param parent_folder: The parent path to the file geodatabase to create
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
gdb_path = parent_folder / GDB_NAME
if gdb_path.exists():
print(f'{GDB_NAME} exists. deleting and recreating with fresh data')
arcpy.management.Delete(str(gdb_path))
print('creating file geodatabase')
start = default_timer()
arcpy.management.CreateFileGDB(str(parent_folder), GDB_NAME)
print(f'file geodatabase created in {default_timer() - start} seconds')
add_enhancement_layers(parent_folder / GDB_NAME)
def add_enhancement_layers(output_gdb):
"""Adds the enhancement layers to the file geodatabase
:param output_gdb: The path to the file geodatabase to add the enhancement layers to
:type output_gdb: Path
"""
print('adding enhancement layers')
start = default_timer()
maps = Path(__file__).parent.parent.parent / 'maps'
workspace = (maps / 'opensgid.agrc.utah.gov.sde').resolve()
with arcpy.EnvManager(workspace=str(workspace)):
for layer in enhancement_layers:
table_start = default_timer()
print(f' adding {layer["table"]}')
mapping = arcpy.FieldMappings()
mapping.addTable(layer['table'])
fields = arcpy.ListFields(layer['table'])
filter_mapping(mapping, fields, layer)
arcpy.conversion.FeatureClassToFeatureClass(
in_features=layer['table'],
out_path=str(output_gdb),
out_name=layer['table'].split('.')[1],
field_mapping=mapping
)
print(f' {layer["table"]} finished in {default_timer() - table_start} seconds')
print(f'enhancement layers added in {default_timer() - start} seconds')
def merge(parent_folder):
"""Creates a single csv file containing all the enhanced data
:param parent_folder: The parent path to the results folder
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))
frames = []
#: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL
for address_csv_file in address_csv_files:
temp = pd.read_csv(
address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']
)
frames.append(temp)
#: merge all csv's
merged = pd.concat(frames)
merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')
def filter_mapping(mapping, fields, table_metadata):
"""Filters the field mapping to only include the fields that are needed
:param mapping: The field mapping to filter
:type mapping: arcpy.FieldMappings
:param fields: The fields on the table
:type fields: list[arcpy.Field]
:param table_metadata: The table metadata to use to filter the field mapping
:type table_metadata: dict
"""
table_metadata['fields'].append('shape')
for field in fields:
index = mapping.findFieldMapIndex(field.name)
if index == -1:
continue
if field.name.lower() not in table_metadata['fields']:
try:
mapping.removeFieldMap(index)
except Exception as ex:
print(field.name.lower())
raise ex
else:
if field.name.lower() == 'shape':
continue
field_map = mapping.getFieldMap(index)
output_field = field_map.outputField
output_field.name = table_metadata['rename'][0]
field_map.outputField = output_field
mapping.replaceFieldMap(index, field_map)
def enhance(parent_folder):
"""enhances the csv table data from the identity tables
:param parent_folder: The parent path to the csv files to enhance
:type parent_folder: Path
"""
parent_folder = Path(parent_folder).resolve()
address_csv_files = sorted(parent_folder.glob('*.csv'))
print(f'enhancing {len(address_csv_files)} csv files in {parent_folder}')
data = Path(__file__).parent.parent.parent / 'data'
workspace = (data / 'enhanced' / GDB_NAME).resolve()
arcpy.env.workspace = str(workspace)
for address_csv in address_csv_files:
job = enhance_data(address_csv)
prepare_output(job)
convert_to_csv(job)
remove_temp_tables(job)
def enhance_data(address_csv):
"""enhance the data in the csv file
"""
table_name = address_csv.stem
| table=str(address_csv),
in_x_field='x',
in_y_field='y',
out_layer=f'{table_name}_temp',
spatial_reference=UTM,
in_z_field=None
)
else:
print(' skipping')
print(' creating feature class')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.XYTableToPoint(
in_table=f'{table_name}_temp',
out_feature_class=f'{table_name}_step_1',
x_field='x',
y_field='y',
z_field=None,
coordinate_system=UTM
)
else:
print(' skipping')
print(' selecting match addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.SelectLayerByAttribute(
in_layer_or_view=f'{table_name}_step_1', selection_type='NEW_SELECTION', where_clause='score>0'
)
else:
print(' skipping')
print(' separating matched addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.CopyFeatures(in_features=f'{table_name}_step_1', out_feature_class=f'{table_name}_step_2')
else:
print(' skipping')
step = 2
for identity in enhancement_layers:
start = default_timer()
fields = "'".join(identity['fields'])
print(f'{step}. enhancing data with {fields} from {identity["table"]}')
enhance_table_name = identity['table'].split('.')[1]
if not arcpy.Exists(f'{table_name}_step_{step + 1}'):
arcpy.analysis.Identity(
in_features=f'{table_name}_step_{step}',
identity_features=enhance_table_name,
out_feature_class=f'{table_name}_step_{step + 1}',
join_attributes='NO_FID',
cluster_tolerance=None,
relationship='NO_RELATIONSHIPS'
)
else:
print(' skipping')
step = step + 1
continue
step = step + 1
print(f'completed: {default_timer() - start}')
return f'{table_name}_step_{step}'
def prepare_output(table):
"""prepares the output by splitting the primary key and the other field
"""
print('adding type field')
absolute_table = str(Path(arcpy.env.workspace) / table)
fields = arcpy.ListFields(absolute_table)
if 'type' in [field.name.lower() for field in fields]:
print(' skipping')
return
arcpy.management.AddField(absolute_table, 'type', 'TEXT', '', '', '1')
print('splitting type and id')
arcpy.management.CalculateField(
in_table=table, field='type', expression='left($feature.primary_key, 1)', expression_type='ARCADE'
)
arcpy.management.CalculateField(
in_table=table, field='primary_key', expression='mid($feature.primary_key, 1, 20)', expression_type='ARCADE'
)
def convert_to_csv(table):
"""writes table to csv
"""
print(f'writing {table} to csv')
destination = Path(__file__).parent.parent.parent / 'data' / 'results' / f'{table}.csv'
with arcpy.da.SearchCursor(
in_table=table,
field_names=['type', 'primary_key', 'county_name', 'senate_district', 'house_district', 'census_id'],
where_clause='message is null'
) as cursor, open(destination, 'w', encoding='utf-8', newline='') as result_file:
writer = csv.writer(result_file, delimiter='|', quoting=csv.QUOTE_MINIMAL)
for row in cursor:
writer.writerow(row)
def remove_temp_tables(table):
"""clean up method
"""
temp_tables = sorted(arcpy.ListFeatureClasses(wild_card=f'{table[:-1]}*', feature_type='Point'))
removed = False
print('removing ', ', '.join(temp_tables[:-1]))
try:
arcpy.management.Delete(temp_tables[:-1])
removed = True
except:
print('could not delete intermediate tables. trying one at a time')
if not removed: #: try pro < 2.9 style
for item in temp_tables[:-1]:
arcpy.management.Delete(item)
print('intermediate tables removed') | print(f'1. creating points from csv as {table_name}')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.MakeXYEventLayer( | random_line_split |
enhance.py | #!/usr/bin/env python
# * coding: utf8 *
"""
enhance.py
A module that handles appending information to the geocoded csv files
"""
try:
import arcpy
except:
pass
import csv
from pathlib import Path
from timeit import default_timer
import pandas as pd
UTM = "PROJCS['NAD_1983_UTM_Zone_12N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-111.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-5120900 -9998100 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision"
GDB_NAME = 'enhance.gdb'
enhancement_layers = [{
'table': 'political.senate_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['senate_district']
}, {
'table': 'political.house_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['house_district']
}, {
'table': 'boundaries.county_boundaries',
'fields': ['name'],
'rename': ['county_name']
}, {
'table': 'demographic.census_tracts_2020',
'fields': ['geoid20'],
'rename': ['census_id']
}]
def create_enhancement_gdb(parent_folder):
"""Creates the file geodatabase that will be used to store the enhanced layers
:param parent_folder: The parent path to the file geodatabase to create
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
gdb_path = parent_folder / GDB_NAME
if gdb_path.exists():
print(f'{GDB_NAME} exists. deleting and recreating with fresh data')
arcpy.management.Delete(str(gdb_path))
print('creating file geodatabase')
start = default_timer()
arcpy.management.CreateFileGDB(str(parent_folder), GDB_NAME)
print(f'file geodatabase created in {default_timer() - start} seconds')
add_enhancement_layers(parent_folder / GDB_NAME)
def add_enhancement_layers(output_gdb):
"""Adds the enhancement layers to the file geodatabase
:param output_gdb: The path to the file geodatabase to add the enhancement layers to
:type output_gdb: Path
"""
print('adding enhancement layers')
start = default_timer()
maps = Path(__file__).parent.parent.parent / 'maps'
workspace = (maps / 'opensgid.agrc.utah.gov.sde').resolve()
with arcpy.EnvManager(workspace=str(workspace)):
for layer in enhancement_layers:
table_start = default_timer()
print(f' adding {layer["table"]}')
mapping = arcpy.FieldMappings()
mapping.addTable(layer['table'])
fields = arcpy.ListFields(layer['table'])
filter_mapping(mapping, fields, layer)
arcpy.conversion.FeatureClassToFeatureClass(
in_features=layer['table'],
out_path=str(output_gdb),
out_name=layer['table'].split('.')[1],
field_mapping=mapping
)
print(f' {layer["table"]} finished in {default_timer() - table_start} seconds')
print(f'enhancement layers added in {default_timer() - start} seconds')
def merge(parent_folder):
"""Creates a single csv file containing all the enhanced data
:param parent_folder: The parent path to the results folder
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))
frames = []
#: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL
for address_csv_file in address_csv_files:
temp = pd.read_csv(
address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']
)
frames.append(temp)
#: merge all csv's
merged = pd.concat(frames)
merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')
def filter_mapping(mapping, fields, table_metadata):
"""Filters the field mapping to only include the fields that are needed
:param mapping: The field mapping to filter
:type mapping: arcpy.FieldMappings
:param fields: The fields on the table
:type fields: list[arcpy.Field]
:param table_metadata: The table metadata to use to filter the field mapping
:type table_metadata: dict
"""
table_metadata['fields'].append('shape')
for field in fields:
index = mapping.findFieldMapIndex(field.name)
if index == -1:
continue
if field.name.lower() not in table_metadata['fields']:
try:
mapping.removeFieldMap(index)
except Exception as ex:
print(field.name.lower())
raise ex
else:
if field.name.lower() == 'shape':
continue
field_map = mapping.getFieldMap(index)
output_field = field_map.outputField
output_field.name = table_metadata['rename'][0]
field_map.outputField = output_field
mapping.replaceFieldMap(index, field_map)
def | (parent_folder):
"""enhances the csv table data from the identity tables
:param parent_folder: The parent path to the csv files to enhance
:type parent_folder: Path
"""
parent_folder = Path(parent_folder).resolve()
address_csv_files = sorted(parent_folder.glob('*.csv'))
print(f'enhancing {len(address_csv_files)} csv files in {parent_folder}')
data = Path(__file__).parent.parent.parent / 'data'
workspace = (data / 'enhanced' / GDB_NAME).resolve()
arcpy.env.workspace = str(workspace)
for address_csv in address_csv_files:
job = enhance_data(address_csv)
prepare_output(job)
convert_to_csv(job)
remove_temp_tables(job)
def enhance_data(address_csv):
"""enhance the data in the csv file
"""
table_name = address_csv.stem
print(f'1. creating points from csv as {table_name}')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.MakeXYEventLayer(
table=str(address_csv),
in_x_field='x',
in_y_field='y',
out_layer=f'{table_name}_temp',
spatial_reference=UTM,
in_z_field=None
)
else:
print(' skipping')
print(' creating feature class')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.XYTableToPoint(
in_table=f'{table_name}_temp',
out_feature_class=f'{table_name}_step_1',
x_field='x',
y_field='y',
z_field=None,
coordinate_system=UTM
)
else:
print(' skipping')
print(' selecting match addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.SelectLayerByAttribute(
in_layer_or_view=f'{table_name}_step_1', selection_type='NEW_SELECTION', where_clause='score>0'
)
else:
print(' skipping')
print(' separating matched addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.CopyFeatures(in_features=f'{table_name}_step_1', out_feature_class=f'{table_name}_step_2')
else:
print(' skipping')
step = 2
for identity in enhancement_layers:
start = default_timer()
fields = "'".join(identity['fields'])
print(f'{step}. enhancing data with {fields} from {identity["table"]}')
enhance_table_name = identity['table'].split('.')[1]
if not arcpy.Exists(f'{table_name}_step_{step + 1}'):
arcpy.analysis.Identity(
in_features=f'{table_name}_step_{step}',
identity_features=enhance_table_name,
out_feature_class=f'{table_name}_step_{step + 1}',
join_attributes='NO_FID',
cluster_tolerance=None,
relationship='NO_RELATIONSHIPS'
)
else:
print(' skipping')
step = step + 1
continue
step = step + 1
print(f'completed: {default_timer() - start}')
return f'{table_name}_step_{step}'
def prepare_output(table):
"""prepares the output by splitting the primary key and the other field
"""
print('adding type field')
absolute_table = str(Path(arcpy.env.workspace) / table)
fields = arcpy.ListFields(absolute_table)
if 'type' in [field.name.lower() for field in fields]:
print(' skipping')
return
arcpy.management.AddField(absolute_table, 'type', 'TEXT', '', '', '1')
print('splitting type and id')
arcpy.management.CalculateField(
in_table=table, field='type', expression='left($feature.primary_key, 1)', expression_type='ARCADE'
)
arcpy.management.CalculateField(
in_table=table, field='primary_key', expression='mid($feature.primary_key, 1, 20)', expression_type='ARCADE'
)
def convert_to_csv(table):
"""writes table to csv
"""
print(f'writing {table} to csv')
destination = Path(__file__).parent.parent.parent / 'data' / 'results' / f'{table}.csv'
with arcpy.da.SearchCursor(
in_table=table,
field_names=['type', 'primary_key', 'county_name', 'senate_district', 'house_district', 'census_id'],
where_clause='message is null'
) as cursor, open(destination, 'w', encoding='utf-8', newline='') as result_file:
writer = csv.writer(result_file, delimiter='|', quoting=csv.QUOTE_MINIMAL)
for row in cursor:
writer.writerow(row)
def remove_temp_tables(table):
"""clean up method
"""
temp_tables = sorted(arcpy.ListFeatureClasses(wild_card=f'{table[:-1]}*', feature_type='Point'))
removed = False
print('removing ', ', '.join(temp_tables[:-1]))
try:
arcpy.management.Delete(temp_tables[:-1])
removed = True
except:
print('could not delete intermediate tables. trying one at a time')
if not removed: #: try pro < 2.9 style
for item in temp_tables[:-1]:
arcpy.management.Delete(item)
print('intermediate tables removed')
| enhance | identifier_name |
enhance.py | #!/usr/bin/env python
# * coding: utf8 *
"""
enhance.py
A module that handles appending information to the geocoded csv files
"""
try:
import arcpy
except:
pass
import csv
from pathlib import Path
from timeit import default_timer
import pandas as pd
UTM = "PROJCS['NAD_1983_UTM_Zone_12N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-111.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-5120900 -9998100 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision"
GDB_NAME = 'enhance.gdb'
enhancement_layers = [{
'table': 'political.senate_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['senate_district']
}, {
'table': 'political.house_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['house_district']
}, {
'table': 'boundaries.county_boundaries',
'fields': ['name'],
'rename': ['county_name']
}, {
'table': 'demographic.census_tracts_2020',
'fields': ['geoid20'],
'rename': ['census_id']
}]
def create_enhancement_gdb(parent_folder):
"""Creates the file geodatabase that will be used to store the enhanced layers
:param parent_folder: The parent path to the file geodatabase to create
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
gdb_path = parent_folder / GDB_NAME
if gdb_path.exists():
print(f'{GDB_NAME} exists. deleting and recreating with fresh data')
arcpy.management.Delete(str(gdb_path))
print('creating file geodatabase')
start = default_timer()
arcpy.management.CreateFileGDB(str(parent_folder), GDB_NAME)
print(f'file geodatabase created in {default_timer() - start} seconds')
add_enhancement_layers(parent_folder / GDB_NAME)
def add_enhancement_layers(output_gdb):
"""Adds the enhancement layers to the file geodatabase
:param output_gdb: The path to the file geodatabase to add the enhancement layers to
:type output_gdb: Path
"""
print('adding enhancement layers')
start = default_timer()
maps = Path(__file__).parent.parent.parent / 'maps'
workspace = (maps / 'opensgid.agrc.utah.gov.sde').resolve()
with arcpy.EnvManager(workspace=str(workspace)):
for layer in enhancement_layers:
table_start = default_timer()
print(f' adding {layer["table"]}')
mapping = arcpy.FieldMappings()
mapping.addTable(layer['table'])
fields = arcpy.ListFields(layer['table'])
filter_mapping(mapping, fields, layer)
arcpy.conversion.FeatureClassToFeatureClass(
in_features=layer['table'],
out_path=str(output_gdb),
out_name=layer['table'].split('.')[1],
field_mapping=mapping
)
print(f' {layer["table"]} finished in {default_timer() - table_start} seconds')
print(f'enhancement layers added in {default_timer() - start} seconds')
def merge(parent_folder):
"""Creates a single csv file containing all the enhanced data
:param parent_folder: The parent path to the results folder
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))
frames = []
#: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL
for address_csv_file in address_csv_files:
temp = pd.read_csv(
address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']
)
frames.append(temp)
#: merge all csv's
merged = pd.concat(frames)
merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')
def filter_mapping(mapping, fields, table_metadata):
"""Filters the field mapping to only include the fields that are needed
:param mapping: The field mapping to filter
:type mapping: arcpy.FieldMappings
:param fields: The fields on the table
:type fields: list[arcpy.Field]
:param table_metadata: The table metadata to use to filter the field mapping
:type table_metadata: dict
"""
table_metadata['fields'].append('shape')
for field in fields:
index = mapping.findFieldMapIndex(field.name)
if index == -1:
continue
if field.name.lower() not in table_metadata['fields']:
try:
mapping.removeFieldMap(index)
except Exception as ex:
print(field.name.lower())
raise ex
else:
if field.name.lower() == 'shape':
continue
field_map = mapping.getFieldMap(index)
output_field = field_map.outputField
output_field.name = table_metadata['rename'][0]
field_map.outputField = output_field
mapping.replaceFieldMap(index, field_map)
def enhance(parent_folder):
"""enhances the csv table data from the identity tables
:param parent_folder: The parent path to the csv files to enhance
:type parent_folder: Path
"""
parent_folder = Path(parent_folder).resolve()
address_csv_files = sorted(parent_folder.glob('*.csv'))
print(f'enhancing {len(address_csv_files)} csv files in {parent_folder}')
data = Path(__file__).parent.parent.parent / 'data'
workspace = (data / 'enhanced' / GDB_NAME).resolve()
arcpy.env.workspace = str(workspace)
for address_csv in address_csv_files:
job = enhance_data(address_csv)
prepare_output(job)
convert_to_csv(job)
remove_temp_tables(job)
def enhance_data(address_csv):
"""enhance the data in the csv file
"""
table_name = address_csv.stem
print(f'1. creating points from csv as {table_name}')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.MakeXYEventLayer(
table=str(address_csv),
in_x_field='x',
in_y_field='y',
out_layer=f'{table_name}_temp',
spatial_reference=UTM,
in_z_field=None
)
else:
print(' skipping')
print(' creating feature class')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.XYTableToPoint(
in_table=f'{table_name}_temp',
out_feature_class=f'{table_name}_step_1',
x_field='x',
y_field='y',
z_field=None,
coordinate_system=UTM
)
else:
print(' skipping')
print(' selecting match addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.SelectLayerByAttribute(
in_layer_or_view=f'{table_name}_step_1', selection_type='NEW_SELECTION', where_clause='score>0'
)
else:
print(' skipping')
print(' separating matched addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.CopyFeatures(in_features=f'{table_name}_step_1', out_feature_class=f'{table_name}_step_2')
else:
print(' skipping')
step = 2
for identity in enhancement_layers:
start = default_timer()
fields = "'".join(identity['fields'])
print(f'{step}. enhancing data with {fields} from {identity["table"]}')
enhance_table_name = identity['table'].split('.')[1]
if not arcpy.Exists(f'{table_name}_step_{step + 1}'):
|
else:
print(' skipping')
step = step + 1
continue
step = step + 1
print(f'completed: {default_timer() - start}')
return f'{table_name}_step_{step}'
def prepare_output(table):
"""prepares the output by splitting the primary key and the other field
"""
print('adding type field')
absolute_table = str(Path(arcpy.env.workspace) / table)
fields = arcpy.ListFields(absolute_table)
if 'type' in [field.name.lower() for field in fields]:
print(' skipping')
return
arcpy.management.AddField(absolute_table, 'type', 'TEXT', '', '', '1')
print('splitting type and id')
arcpy.management.CalculateField(
in_table=table, field='type', expression='left($feature.primary_key, 1)', expression_type='ARCADE'
)
arcpy.management.CalculateField(
in_table=table, field='primary_key', expression='mid($feature.primary_key, 1, 20)', expression_type='ARCADE'
)
def convert_to_csv(table):
"""writes table to csv
"""
print(f'writing {table} to csv')
destination = Path(__file__).parent.parent.parent / 'data' / 'results' / f'{table}.csv'
with arcpy.da.SearchCursor(
in_table=table,
field_names=['type', 'primary_key', 'county_name', 'senate_district', 'house_district', 'census_id'],
where_clause='message is null'
) as cursor, open(destination, 'w', encoding='utf-8', newline='') as result_file:
writer = csv.writer(result_file, delimiter='|', quoting=csv.QUOTE_MINIMAL)
for row in cursor:
writer.writerow(row)
def remove_temp_tables(table):
"""clean up method
"""
temp_tables = sorted(arcpy.ListFeatureClasses(wild_card=f'{table[:-1]}*', feature_type='Point'))
removed = False
print('removing ', ', '.join(temp_tables[:-1]))
try:
arcpy.management.Delete(temp_tables[:-1])
removed = True
except:
print('could not delete intermediate tables. trying one at a time')
if not removed: #: try pro < 2.9 style
for item in temp_tables[:-1]:
arcpy.management.Delete(item)
print('intermediate tables removed')
| arcpy.analysis.Identity(
in_features=f'{table_name}_step_{step}',
identity_features=enhance_table_name,
out_feature_class=f'{table_name}_step_{step + 1}',
join_attributes='NO_FID',
cluster_tolerance=None,
relationship='NO_RELATIONSHIPS'
) | conditional_block |
enhance.py | #!/usr/bin/env python
# * coding: utf8 *
"""
enhance.py
A module that handles appending information to the geocoded csv files
"""
try:
import arcpy
except:
pass
import csv
from pathlib import Path
from timeit import default_timer
import pandas as pd
UTM = "PROJCS['NAD_1983_UTM_Zone_12N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-111.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-5120900 -9998100 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision"
GDB_NAME = 'enhance.gdb'
enhancement_layers = [{
'table': 'political.senate_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['senate_district']
}, {
'table': 'political.house_districts_2022_to_2032',
'fields': ['dist'],
'rename': ['house_district']
}, {
'table': 'boundaries.county_boundaries',
'fields': ['name'],
'rename': ['county_name']
}, {
'table': 'demographic.census_tracts_2020',
'fields': ['geoid20'],
'rename': ['census_id']
}]
def create_enhancement_gdb(parent_folder):
"""Creates the file geodatabase that will be used to store the enhanced layers
:param parent_folder: The parent path to the file geodatabase to create
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
gdb_path = parent_folder / GDB_NAME
if gdb_path.exists():
print(f'{GDB_NAME} exists. deleting and recreating with fresh data')
arcpy.management.Delete(str(gdb_path))
print('creating file geodatabase')
start = default_timer()
arcpy.management.CreateFileGDB(str(parent_folder), GDB_NAME)
print(f'file geodatabase created in {default_timer() - start} seconds')
add_enhancement_layers(parent_folder / GDB_NAME)
def add_enhancement_layers(output_gdb):
"""Adds the enhancement layers to the file geodatabase
:param output_gdb: The path to the file geodatabase to add the enhancement layers to
:type output_gdb: Path
"""
print('adding enhancement layers')
start = default_timer()
maps = Path(__file__).parent.parent.parent / 'maps'
workspace = (maps / 'opensgid.agrc.utah.gov.sde').resolve()
with arcpy.EnvManager(workspace=str(workspace)):
for layer in enhancement_layers:
table_start = default_timer()
print(f' adding {layer["table"]}')
mapping = arcpy.FieldMappings()
mapping.addTable(layer['table'])
fields = arcpy.ListFields(layer['table'])
filter_mapping(mapping, fields, layer)
arcpy.conversion.FeatureClassToFeatureClass(
in_features=layer['table'],
out_path=str(output_gdb),
out_name=layer['table'].split('.')[1],
field_mapping=mapping
)
print(f' {layer["table"]} finished in {default_timer() - table_start} seconds')
print(f'enhancement layers added in {default_timer() - start} seconds')
def merge(parent_folder):
"""Creates a single csv file containing all the enhanced data
:param parent_folder: The parent path to the results folder
:type parent_folder: Path
"""
parent_folder = Path(parent_folder)
address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))
frames = []
#: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL
for address_csv_file in address_csv_files:
temp = pd.read_csv(
address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']
)
frames.append(temp)
#: merge all csv's
merged = pd.concat(frames)
merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')
def filter_mapping(mapping, fields, table_metadata):
"""Filters the field mapping to only include the fields that are needed
:param mapping: The field mapping to filter
:type mapping: arcpy.FieldMappings
:param fields: The fields on the table
:type fields: list[arcpy.Field]
:param table_metadata: The table metadata to use to filter the field mapping
:type table_metadata: dict
"""
table_metadata['fields'].append('shape')
for field in fields:
index = mapping.findFieldMapIndex(field.name)
if index == -1:
continue
if field.name.lower() not in table_metadata['fields']:
try:
mapping.removeFieldMap(index)
except Exception as ex:
print(field.name.lower())
raise ex
else:
if field.name.lower() == 'shape':
continue
field_map = mapping.getFieldMap(index)
output_field = field_map.outputField
output_field.name = table_metadata['rename'][0]
field_map.outputField = output_field
mapping.replaceFieldMap(index, field_map)
def enhance(parent_folder):
"""enhances the csv table data from the identity tables
:param parent_folder: The parent path to the csv files to enhance
:type parent_folder: Path
"""
parent_folder = Path(parent_folder).resolve()
address_csv_files = sorted(parent_folder.glob('*.csv'))
print(f'enhancing {len(address_csv_files)} csv files in {parent_folder}')
data = Path(__file__).parent.parent.parent / 'data'
workspace = (data / 'enhanced' / GDB_NAME).resolve()
arcpy.env.workspace = str(workspace)
for address_csv in address_csv_files:
job = enhance_data(address_csv)
prepare_output(job)
convert_to_csv(job)
remove_temp_tables(job)
def enhance_data(address_csv):
"""enhance the data in the csv file
"""
table_name = address_csv.stem
print(f'1. creating points from csv as {table_name}')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.MakeXYEventLayer(
table=str(address_csv),
in_x_field='x',
in_y_field='y',
out_layer=f'{table_name}_temp',
spatial_reference=UTM,
in_z_field=None
)
else:
print(' skipping')
print(' creating feature class')
if not arcpy.Exists(f'{table_name}_step_1'):
arcpy.management.XYTableToPoint(
in_table=f'{table_name}_temp',
out_feature_class=f'{table_name}_step_1',
x_field='x',
y_field='y',
z_field=None,
coordinate_system=UTM
)
else:
print(' skipping')
print(' selecting match addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.SelectLayerByAttribute(
in_layer_or_view=f'{table_name}_step_1', selection_type='NEW_SELECTION', where_clause='score>0'
)
else:
print(' skipping')
print(' separating matched addresses')
if not arcpy.Exists(f'{table_name}_step_2'):
arcpy.management.CopyFeatures(in_features=f'{table_name}_step_1', out_feature_class=f'{table_name}_step_2')
else:
print(' skipping')
step = 2
for identity in enhancement_layers:
start = default_timer()
fields = "'".join(identity['fields'])
print(f'{step}. enhancing data with {fields} from {identity["table"]}')
enhance_table_name = identity['table'].split('.')[1]
if not arcpy.Exists(f'{table_name}_step_{step + 1}'):
arcpy.analysis.Identity(
in_features=f'{table_name}_step_{step}',
identity_features=enhance_table_name,
out_feature_class=f'{table_name}_step_{step + 1}',
join_attributes='NO_FID',
cluster_tolerance=None,
relationship='NO_RELATIONSHIPS'
)
else:
print(' skipping')
step = step + 1
continue
step = step + 1
print(f'completed: {default_timer() - start}')
return f'{table_name}_step_{step}'
def prepare_output(table):
|
def convert_to_csv(table):
"""writes table to csv
"""
print(f'writing {table} to csv')
destination = Path(__file__).parent.parent.parent / 'data' / 'results' / f'{table}.csv'
with arcpy.da.SearchCursor(
in_table=table,
field_names=['type', 'primary_key', 'county_name', 'senate_district', 'house_district', 'census_id'],
where_clause='message is null'
) as cursor, open(destination, 'w', encoding='utf-8', newline='') as result_file:
writer = csv.writer(result_file, delimiter='|', quoting=csv.QUOTE_MINIMAL)
for row in cursor:
writer.writerow(row)
def remove_temp_tables(table):
"""clean up method
"""
temp_tables = sorted(arcpy.ListFeatureClasses(wild_card=f'{table[:-1]}*', feature_type='Point'))
removed = False
print('removing ', ', '.join(temp_tables[:-1]))
try:
arcpy.management.Delete(temp_tables[:-1])
removed = True
except:
print('could not delete intermediate tables. trying one at a time')
if not removed: #: try pro < 2.9 style
for item in temp_tables[:-1]:
arcpy.management.Delete(item)
print('intermediate tables removed')
| """prepares the output by splitting the primary key and the other field
"""
print('adding type field')
absolute_table = str(Path(arcpy.env.workspace) / table)
fields = arcpy.ListFields(absolute_table)
if 'type' in [field.name.lower() for field in fields]:
print(' skipping')
return
arcpy.management.AddField(absolute_table, 'type', 'TEXT', '', '', '1')
print('splitting type and id')
arcpy.management.CalculateField(
in_table=table, field='type', expression='left($feature.primary_key, 1)', expression_type='ARCADE'
)
arcpy.management.CalculateField(
in_table=table, field='primary_key', expression='mid($feature.primary_key, 1, 20)', expression_type='ARCADE'
) | identifier_body |
ViewFichaFinanceira.js | /**
* E-cidade Software Publico para Gestao Municipal
* Copyright (C) 2014 DBSeller Servicos de Informatica
* www.dbseller.com.br
* e-cidade@dbseller.com.br
*
* Este programa e software livre; voce pode redistribui-lo e/ou
* modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
* publicada pela Free Software Foundation; tanto a versao 2 da
* Licenca como (a seu criterio) qualquer versao mais nova.
*
* Este programa e distribuido na expectativa de ser util, mas SEM
* QUALQUER GARANTIA; sem mesmo a garantia implicita de
* COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
* PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
* detalhes.
*
* Voce deve ter recebido uma copia da Licenca Publica Geral GNU
* junto com este programa; se nao, escreva para a Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307, USA.
*
* Copia da licenca no diretorio licenca/licenca_en.txt
* licenca/licenca_pt.txt
*/
require_once('estilos/grid.style.css');
require_once('scripts/AjaxRequest.js');
require_once('scripts/widgets/windowAux.widget.js');
require_once('scripts/widgets/dbmessageBoard.widget.js'); | this.oWindowAux = null;
this.oGridValoresMensais = null;
this.oDadosFicha = {
sDescricao : null,
iOrgao : null,
iUnidade : null,
iRecurso : null,
iAnexo : null,
nValorOrcado : null,
nValorTotal : null,
nValorIndisponivel : null,
nValorProgramar : null,
aValoresMensais : []
};
this.fnCallback = null;
const URL_RPC = 'orc4_programacaofinanceira.RPC.php';
this.buildContainer = function() {
var self = this;
var oDivContainer = document.createElement('div');
oDivContainer.className = 'container';
oDivContainer.style.width = '98%';
var oFieldset = document.createElement('fieldset');
oFieldset.setAttribute('class', 'separator');
var oLegend = document.createElement('legend');
oLegend.setAttribute('class', 'bold');
oLegend.innerHTML = "Valores Mensais";
var oInputValorOrcado = document.createElement('input');
oInputValorOrcado.setAttribute('id', 'valorOrcado');
oInputValorOrcado.setAttribute('readonly', 'readonly');
oInputValorOrcado.setAttribute('class', 'readonly');
oInputValorOrcado.setAttribute('value', js_formatar(this.oDadosFicha.nValorOrcado, 'f'));
var oLabelValorOrcado = document.createElement('label');
oLabelValorOrcado.setAttribute('title', 'Valor Orçado');
oLabelValorOrcado.innerHTML = 'Valor Orçado: ';
var oInputValorTotal = document.createElement('input');
oInputValorTotal.setAttribute('id', 'valorTotal');
oInputValorTotal.setAttribute('readonly', 'readonly');
oInputValorTotal.setAttribute('class', 'readonly');
oInputValorTotal.setAttribute('value', js_formatar(this.oDadosFicha.nValorTotal, 'f'));
var oLabelValorTotal = document.createElement('label');
oLabelValorTotal.setAttribute('title', 'Valor Total');
oLabelValorTotal.innerHTML = 'Valor Total: ';
var oInputIndisponivel = document.createElement('input');
oInputIndisponivel.setAttribute('id', 'valorIndisponivel');
oInputIndisponivel.setAttribute('value', js_formatar(this.oDadosFicha.nValorIndisponivel, 'f'));
var oLabelIndisponivel = document.createElement('label');
oLabelIndisponivel.setAttribute('title', 'Indisponível');
oLabelIndisponivel.innerHTML = 'Indisponível: ';
var oInputProgramar = document.createElement('input');
oInputProgramar.setAttribute('id', 'valorProgramar');
oInputProgramar.setAttribute('value', js_formatar(this.oDadosFicha.nValorProgramar, 'f'));
var oLabelProgramar = document.createElement('label');
oLabelProgramar.setAttribute('title', 'A Programar');
oLabelProgramar.innerHTML = 'A Programar: ';
var oDivGrid = document.createElement('div');
oDivGrid.id = 'divGridValoresMensais';
var oBotaoSalvar = document.createElement('input');
oBotaoSalvar.setAttribute('type', 'button');
oBotaoSalvar.setAttribute('value', 'Salvar');
oBotaoSalvar.setAttribute('style', 'margin-top: 15px;');
oBotaoSalvar.setAttribute('onclick', sNomeInstancia + '.salvar()');
var oDivSalvar = document.createElement('div');
oDivSalvar.appendChild(oBotaoSalvar);
this.oGridValoresMensais = new DBGrid('valoresMensais');
this.oGridValoresMensais.sNameInstance = this.sNomeInstancia + '.oGridValoresMensais';
this.oGridValoresMensais.setHeader(['Mês', 'Valor']);
this.oGridValoresMensais.setCellAlign(['center', 'right']);
this.oGridValoresMensais.setCellWidth(['50%', '50%']);
this.oGridValoresMensais.setHeight(250);
oFieldset.appendChild(oLegend);
oFieldset.appendChild(oDivGrid);
var aCamposTabela = [
{ label: oLabelValorOrcado, input: oInputValorOrcado },
{ label: oLabelValorTotal, input: oInputValorTotal },
{ label: oLabelIndisponivel, input: oInputIndisponivel },
{ label: oLabelProgramar, input: oInputProgramar }
];
var oTabela = document.createElement('table');
oTabela.setAttribute('style', 'width: 100%;');
for (var iIndice = 0; iIndice < aCamposTabela.length; iIndice++) {
var oLabel = aCamposTabela[iIndice].label;
var oInput = aCamposTabela[iIndice].input;
if (iIndice % 2 == 0) {
var oLinhaTabela = document.createElement('tr');
}
oLabel.setAttribute('class', 'bold');
oLabel.setAttribute('for', oInput.getAttribute('id'));
oInput.setAttribute('type', 'text');''
oInput.setAttribute('style', 'text-align: right;');
if (oInput.getAttribute('id') != 'valorTotal' && oInput.getAttribute('id') != 'valorOrcado') {
$(oInput).observe('focus', function(){
this.value = js_strToFloat(this.value);
});
$(oInput).observe('blur', function(){
this.value = js_formatar(this.value, "f");
self.calcularValorTotal();
});
$(oInput).observe('input', function(){
var sLabel = $$('label[for="' + this.id + '"]').first().readAttribute('title');
js_ValidaCampos(this, 4, 'Campo ' + sLabel);
this.value = this.value.replace(/[^0-9\.\,]/g, "")
});
}
var oColunaLabel = document.createElement('td');
oColunaLabel.appendChild(oLabel);
var oColunaInput = document.createElement('td');
oColunaInput.appendChild(oInput);
oLinhaTabela.appendChild(oColunaLabel);
oLinhaTabela.appendChild(oColunaInput);
oTabela.appendChild(oLinhaTabela);
}
oDivContainer.appendChild(oTabela);
oDivContainer.appendChild(oFieldset);
oDivContainer.appendChild(oDivSalvar);
var sTituloJanela = 'Manutenção de Programação Financeira';
var sTitulo = 'Ficha Financeira';
var sHelp = this.oDadosFicha.sDescricao.urlDecode();
this.oWindowAux = new windowAux('oWindowAux', sTituloJanela, 550, 550);
this.oWindowAux.allowDrag(false);
this.oWindowAux.setContent(oDivContainer);
this.oWindowAux.setShutDownFunction(function() {
self.oWindowAux.destroy();
self.oWindowAux = null;
self.oDadosFicha = null;
self.iCodigo = null;
if (self.fnCallback != null) {
self.fnCallback();
}
});
var oMessageBoard = new DBMessageBoard('oMessageBoard', sTitulo, sHelp, this.oWindowAux.getContentContainer());
oMessageBoard.show();
this.oWindowAux.show();
this.oGridValoresMensais.show(oDivGrid);
this.oGridValoresMensais.clearAll(true);
for (iIndice = 0; iIndice < this.oDadosFicha.aValoresMensais.length; iIndice++) {
var oCampo = document.createElement('input');
var sLabel = this.oDadosFicha.aValoresMensais[iIndice].mes.urlDecode();
oCampo.setAttribute('name', this.sNomeInstancia + 'ValorMensal' + iIndice);
oCampo.setAttribute('class', this.sNomeInstancia + 'ValorMensal');
oCampo.setAttribute('value', js_formatar(this.oDadosFicha.aValoresMensais[iIndice].valor, 'f'));
oCampo.setAttribute('type', 'text');
oCampo.setAttribute('style', 'text-align: right; width: 100%;');
oCampo.setAttribute('onfocus', 'this.value = js_strToFloat(this.value)');
oCampo.setAttribute('onblur', 'this.value = js_formatar(this.value, "f");' + sNomeInstancia + '.calcularValorTotal()');
oCampo.setAttribute('oninput', 'js_ValidaCampos(this, 4, "Campo ' + sLabel + '"); this.value = this.value.replace(/[^0-9\.,]/g, "")');
var aLinha = [
this.oDadosFicha.aValoresMensais[iIndice].mes.urlDecode(),
oCampo.outerHTML
];
this.oGridValoresMensais.addRow(aLinha);
}
this.oGridValoresMensais.renderRows();
};
this.calcularValorTotal = function() {
var nValorTotal = 0;
var nValorIndisponivel = js_strToFloat($F('valorIndisponivel'));
var nValorProgramar = js_strToFloat($F('valorProgramar'));
$$('.' + this.sNomeInstancia + 'ValorMensal').each(function(oLinha) {
var nValor = js_strToFloat(oLinha.getValue());
nValorTotal += nValor;
});
nValorTotal = nValorTotal + nValorIndisponivel + nValorProgramar;
$('valorTotal').value = js_formatar(nValorTotal, 'f', 2);
};
this.salvar = function() {
var aMeses = [];
$$('.' + this.sNomeInstancia + 'ValorMensal').each(function(oLinha) {
aMeses.push(js_strToFloat(oLinha.getValue()));
});
var oParametros = {
'exec' : 'alterarFicha',
'iCodigoFicha' : this.iCodigo,
'nValorIndisponivel' : js_strToFloat($F('valorIndisponivel')),
'nValorProgramar' : js_strToFloat($F('valorProgramar')),
'aValoresMensais' : aMeses
};
new AjaxRequest(URL_RPC, oParametros, function(oRetorno, lErro) {
if (lErro) {
alert(oRetorno.mensagem.urlDecode());
return;
}
alert('Ficha alterada com sucesso.');
}).setMessage('Aguarde, salvando dados da ficha...')
.asynchronous(false)
.execute();
};
this.carregarDados = function() {
var lResultado = true;
var oParametros = {
'exec' : 'getDetalhesFicha',
'codigo' : this.iCodigo
};
var self = this;
new AjaxRequest(URL_RPC, oParametros, function(oRetorno, lErro) {
if (lErro) {
alert(oRetorno.mensagem.urlDecode());
lResultado = false;
return;
}
self.oDadosFicha = {
sDescricao : oRetorno.descricao,
iOrgao : oRetorno.orgao,
iUnidade : oRetorno.unidade,
iRecurso : oRetorno.recurso,
iAnexo : oRetorno.anexo,
nValorOrcado : oRetorno.valor_orcado,
nValorTotal : oRetorno.valor_total,
nValorIndisponivel : oRetorno.valor_indisponivel,
nValorProgramar : oRetorno.valor_programar,
aValoresMensais : oRetorno.meses
};
}).setMessage('Aguarde, carregando dados da ficha...')
.asynchronous(false)
.execute();
return lResultado;
};
this.show = function(iCodigoFicha) {
this.iCodigo = iCodigoFicha;
if (this.oWindowAux === null) {
if (this.carregarDados()) {
this.buildContainer();
}
}
};
/**
* Funcao de callback quando a janela for fechada
* @param fnCallback
*/
this.setCallBackFunction = function(fnCallback) {
this.fnCallback = fnCallback;
};
}; |
ViewFichaFinanceira = function(sNomeInstancia) {
this.sNomeInstancia = sNomeInstancia;
this.iCodigo = null; | random_line_split |
ViewFichaFinanceira.js | /**
* E-cidade Software Publico para Gestao Municipal
* Copyright (C) 2014 DBSeller Servicos de Informatica
* www.dbseller.com.br
* e-cidade@dbseller.com.br
*
* Este programa e software livre; voce pode redistribui-lo e/ou
* modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
* publicada pela Free Software Foundation; tanto a versao 2 da
* Licenca como (a seu criterio) qualquer versao mais nova.
*
* Este programa e distribuido na expectativa de ser util, mas SEM
* QUALQUER GARANTIA; sem mesmo a garantia implicita de
* COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
* PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
* detalhes.
*
* Voce deve ter recebido uma copia da Licenca Publica Geral GNU
* junto com este programa; se nao, escreva para a Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307, USA.
*
* Copia da licenca no diretorio licenca/licenca_en.txt
* licenca/licenca_pt.txt
*/
require_once('estilos/grid.style.css');
require_once('scripts/AjaxRequest.js');
require_once('scripts/widgets/windowAux.widget.js');
require_once('scripts/widgets/dbmessageBoard.widget.js');
ViewFichaFinanceira = function(sNomeInstancia) {
this.sNomeInstancia = sNomeInstancia;
this.iCodigo = null;
this.oWindowAux = null;
this.oGridValoresMensais = null;
this.oDadosFicha = {
sDescricao : null,
iOrgao : null,
iUnidade : null,
iRecurso : null,
iAnexo : null,
nValorOrcado : null,
nValorTotal : null,
nValorIndisponivel : null,
nValorProgramar : null,
aValoresMensais : []
};
this.fnCallback = null;
const URL_RPC = 'orc4_programacaofinanceira.RPC.php';
this.buildContainer = function() {
var self = this;
var oDivContainer = document.createElement('div');
oDivContainer.className = 'container';
oDivContainer.style.width = '98%';
var oFieldset = document.createElement('fieldset');
oFieldset.setAttribute('class', 'separator');
var oLegend = document.createElement('legend');
oLegend.setAttribute('class', 'bold');
oLegend.innerHTML = "Valores Mensais";
var oInputValorOrcado = document.createElement('input');
oInputValorOrcado.setAttribute('id', 'valorOrcado');
oInputValorOrcado.setAttribute('readonly', 'readonly');
oInputValorOrcado.setAttribute('class', 'readonly');
oInputValorOrcado.setAttribute('value', js_formatar(this.oDadosFicha.nValorOrcado, 'f'));
var oLabelValorOrcado = document.createElement('label');
oLabelValorOrcado.setAttribute('title', 'Valor Orçado');
oLabelValorOrcado.innerHTML = 'Valor Orçado: ';
var oInputValorTotal = document.createElement('input');
oInputValorTotal.setAttribute('id', 'valorTotal');
oInputValorTotal.setAttribute('readonly', 'readonly');
oInputValorTotal.setAttribute('class', 'readonly');
oInputValorTotal.setAttribute('value', js_formatar(this.oDadosFicha.nValorTotal, 'f'));
var oLabelValorTotal = document.createElement('label');
oLabelValorTotal.setAttribute('title', 'Valor Total');
oLabelValorTotal.innerHTML = 'Valor Total: ';
var oInputIndisponivel = document.createElement('input');
oInputIndisponivel.setAttribute('id', 'valorIndisponivel');
oInputIndisponivel.setAttribute('value', js_formatar(this.oDadosFicha.nValorIndisponivel, 'f'));
var oLabelIndisponivel = document.createElement('label');
oLabelIndisponivel.setAttribute('title', 'Indisponível');
oLabelIndisponivel.innerHTML = 'Indisponível: ';
var oInputProgramar = document.createElement('input');
oInputProgramar.setAttribute('id', 'valorProgramar');
oInputProgramar.setAttribute('value', js_formatar(this.oDadosFicha.nValorProgramar, 'f'));
var oLabelProgramar = document.createElement('label');
oLabelProgramar.setAttribute('title', 'A Programar');
oLabelProgramar.innerHTML = 'A Programar: ';
var oDivGrid = document.createElement('div');
oDivGrid.id = 'divGridValoresMensais';
var oBotaoSalvar = document.createElement('input');
oBotaoSalvar.setAttribute('type', 'button');
oBotaoSalvar.setAttribute('value', 'Salvar');
oBotaoSalvar.setAttribute('style', 'margin-top: 15px;');
oBotaoSalvar.setAttribute('onclick', sNomeInstancia + '.salvar()');
var oDivSalvar = document.createElement('div');
oDivSalvar.appendChild(oBotaoSalvar);
this.oGridValoresMensais = new DBGrid('valoresMensais');
this.oGridValoresMensais.sNameInstance = this.sNomeInstancia + '.oGridValoresMensais';
this.oGridValoresMensais.setHeader(['Mês', 'Valor']);
this.oGridValoresMensais.setCellAlign(['center', 'right']);
this.oGridValoresMensais.setCellWidth(['50%', '50%']);
this.oGridValoresMensais.setHeight(250);
oFieldset.appendChild(oLegend);
oFieldset.appendChild(oDivGrid);
var aCamposTabela = [
{ label: oLabelValorOrcado, input: oInputValorOrcado },
{ label: oLabelValorTotal, input: oInputValorTotal },
{ label: oLabelIndisponivel, input: oInputIndisponivel },
{ label: oLabelProgramar, input: oInputProgramar }
];
var oTabela = document.createElement('table');
oTabela.setAttribute('style', 'width: 100%;');
for (var iIndice = 0; iIndice < aCamposTabela.length; iIndice++) {
var oLabel = aCamposTabela[iIndice].label;
var oInput = aCamposTabela[iIndice].input;
if (iIndice % 2 == 0) {
var oLinhaTabela = document.createElement('tr');
}
oLabel.setAttribute('class', 'bold');
oLabel.setAttribute('for', oInput.getAttribute('id'));
oInput.setAttribute('type', 'text');''
oInput.setAttribute('style', 'text-align: right;');
if (oInput.getAttribute('id') != 'valorTotal' && oInput.getAttribute('id') != 'valorOrcado') {
$(oInput).observe('focus', function(){
this.value = js_strToFloat(this.value);
});
$(oInput).observe('blur', function(){
this.value = js_formatar(this.value, "f");
self.calcularValorTotal();
});
$(oInput).observe('input', function(){
var sLabel = $$('label[for="' + this.id + '"]').first().readAttribute('title');
js_ValidaCampos(this, 4, 'Campo ' + sLabel);
this.value = this.value.replace(/[^0-9\.\,]/g, "")
});
}
var oColunaLabel = document.createElement('td');
oColunaLabel.appendChild(oLabel);
var oColunaInput = document.createElement('td');
oColunaInput.appendChild(oInput);
oLinhaTabela.appendChild(oColunaLabel);
oLinhaTabela.appendChild(oColunaInput);
oTabela.appendChild(oLinhaTabela);
}
oDivContainer.appendChild(oTabela);
oDivContainer.appendChild(oFieldset);
oDivContainer.appendChild(oDivSalvar);
var sTituloJanela = 'Manutenção de Programação Financeira';
var sTitulo = 'Ficha Financeira';
var sHelp = this.oDadosFicha.sDescricao.urlDecode();
this.oWindowAux = new windowAux('oWindowAux', sTituloJanela, 550, 550);
this.oWindowAux.allowDrag(false);
this.oWindowAux.setContent(oDivContainer);
this.oWindowAux.setShutDownFunction(function() {
self.oWindowAux.destroy();
self.oWindowAux = null;
self.oDadosFicha = null;
self.iCodigo = null;
if (self.fnCallback != null) {
self.fnCallback();
}
});
var oMessageBoard = new DBMessageBoard('oMessageBoard', sTitulo, sHelp, this.oWindowAux.getContentContainer());
oMessageBoard.show();
this.oWindowAux.show();
this.oGridValoresMensais.show(oDivGrid);
this.oGridValoresMensais.clearAll(true);
for (iIndice = 0; iIndice < this.oDadosFicha.aValoresMensais.length; iIndice++) {
var oCampo = document.createElement('input');
var sLabel = this.oDadosFicha.aValoresMensais[iIndice].mes.urlDecode();
oCampo.setAttribute('name', this.sNomeInstancia + 'ValorMensal' + iIndice);
oCampo.setAttribute('class', this.sNomeInstancia + 'ValorMensal');
oCampo.setAttribute('value', js_formatar(this.oDadosFicha.aValoresMensais[iIndice].valor, 'f'));
oCampo.setAttribute('type', 'text');
oCampo.setAttribute('style', 'text-align: right; width: 100%;');
oCampo.setAttribute('onfocus', 'this.value = js_strToFloat(this.value)');
oCampo.setAttribute('onblur', 'this.value = js_formatar(this.value, "f");' + sNomeInstancia + '.calcularValorTotal()');
oCampo.setAttribute('oninput', 'js_ValidaCampos(this, 4, "Campo ' + sLabel + '"); this.value = this.value.replace(/[^0-9\.,]/g, "")');
var aLinha = [
this.oDadosFicha.aValoresMensais[iIndice].mes.urlDecode(),
oCampo.outerHTML
];
this.oGridValoresMensais.addRow(aLinha);
}
this.oGridValoresMensais.renderRows();
};
this.calcularValorTotal = function() {
var nValorTotal = 0;
var nValorIndisponivel = js_strToFloat($F('valorIndisponivel'));
var nValorProgramar = js_strToFloat($F('valorProgramar'));
$$('.' + this.sNomeInstancia + 'ValorMensal').each(function(oLinha) {
var nValor = js_strToFloat(oLinha.getValue());
nValorTotal += nValor;
});
nValorTotal = nValorTotal + nValorIndisponivel + nValorProgramar;
$('valorTotal').value = js_formatar(nValorTotal, 'f', 2);
};
this.salvar = function() {
var aMeses = [];
$$('.' + this.sNomeInstancia + 'ValorMensal').each(function(oLinha) {
aMeses.push(js_strToFloat(oLinha.getValue()));
});
var oParametros = {
'exec' : 'alterarFicha',
'iCodigoFicha' : this.iCodigo,
'nValorIndisponivel' : js_strToFloat($F('valorIndisponivel')),
'nValorProgramar' : js_strToFloat($F('valorProgramar')),
'aValoresMensais' : aMeses
};
new AjaxRequest(URL_RPC, oParametros, function(oRetorno, lErro) {
if (lErro) {
alert(oRetorno.mensagem.urlDecode());
return;
}
alert('Ficha alterada com sucesso.');
}).setMessage('Aguarde, salvando dados da ficha...')
.asynchronous(false)
.execute();
};
this.carregarDados = function() {
var lResultado = true;
var oParametros = {
'exec' : 'getDetalhesFicha',
'codigo' : this.iCodigo
};
var self = this;
new AjaxRequest(URL_RPC, oParametros, function(oRetorno, lErro) {
if (lErro) {
alert(oRetorno.mensagem.urlDecode());
lResultado = false;
return;
}
self.oDadosFicha = {
sDescricao : oRetorno.descricao,
iOrgao : oRetorno.orgao,
iUnidade : oRetorno.unidade,
iRecurso : oRetorno.recurso,
iAnexo : oRetorno.anexo,
nValorOrcado : oRetorno.valor_orcado,
nValorTotal : oRetorno.valor_total,
nValorIndisponivel : oRetorno.valor_indisponivel,
nValorProgramar : oRetorno.valor_programar,
aValoresMensais : oRetorno.meses
};
}).setMessage('Aguarde, carregando dados da ficha...')
.asynchronous(false)
.execute();
return lResultado;
};
this.show = function(iCodigoFicha) {
this.iCodigo = iCodigoFicha;
if (this.oWindowAux === null) {
if (this.carregarDados()) {
| };
/**
* Funcao de callback quando a janela for fechada
* @param fnCallback
*/
this.setCallBackFunction = function(fnCallback) {
this.fnCallback = fnCallback;
};
};
| this.buildContainer();
}
}
| conditional_block |
channelmessage.go | package models
import (
"encoding/json"
"fmt"
"socialapi/config"
"socialapi/request"
"strconv"
"sync"
"time"
ve "github.com/VerbalExpressions/GoVerbalExpressions"
"github.com/jinzhu/gorm"
"github.com/koding/bongo"
)
var mentionRegex = ve.New().
Find("@").
BeginCapture().
Word().
Maybe("-").
Maybe(".").
Word().
EndCapture().
Regex()
type ChannelMessage struct {
// unique identifier of the channel message
Id int64 `json:"id,string"`
// Token holds the uuid for interoperability with the bongo-client
Token string `json:"token"`
// Body of the mesage
Body string `json:"body"`
// Generated Slug for body
Slug string `json:"slug" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// type of the m essage
TypeConstant string `json:"typeConstant" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// Creator of the channel message
AccountId int64 `json:"accountId,string" sql:"NOT NULL"`
// in which channel this message is created
InitialChannelId int64 `json:"initialChannelId,string" sql:"NOT NULL"`
// holds troll, unsafe, etc
MetaBits MetaBits `json:"metaBits"`
// Creation date of the message
CreatedAt time.Time `json:"createdAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Modification date of the message
UpdatedAt time.Time `json:"updatedAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Deletion date of the channel message
DeletedAt time.Time `json:"deletedAt"`
// Extra data storage
Payload gorm.Hstore `json:"payload"`
// is required to identify to request in client side
ClientRequestId string `json:"clientRequestId,omitempty" sql:"-"`
}
const (
ChannelMessage_TYPE_POST = "post"
ChannelMessage_TYPE_REPLY = "reply"
ChannelMessage_TYPE_JOIN = "join"
ChannelMessage_TYPE_LEAVE = "leave"
ChannelMessage_TYPE_PRIVATE_MESSAGE = "privatemessage"
ChannelMessage_TYPE_BOT = "bot"
ChannelMessage_TYPE_SYSTEM = "system"
ChannelMessagePayloadKeyLocation = "location"
ChannelMessagePayloadKeyIntegration = "channelIntegrationId"
)
func (c *ChannelMessage) Location() *string {
return c.GetPayload(ChannelMessagePayloadKeyLocation)
}
func (c *ChannelMessage) MarkIfExempt() error {
isExempt, err := c.isExempt()
if err != nil {
return err
}
if isExempt {
c.MetaBits.Mark(Troll)
}
return nil
}
func (c *ChannelMessage) isExempt() (bool, error) {
if c.MetaBits.Is(Troll) {
return true, nil
}
accountId, err := c.getAccountId()
if err != nil {
return false, err
}
account, err := ResetAccountCache(accountId)
if err != nil {
return false, err
}
if account == nil {
return false, fmt.Errorf("account is nil, accountId:%d", c.AccountId)
}
if account.IsTroll {
return true, nil
}
return false, nil
}
// Tests are done
func (c *ChannelMessage) getAccountId() (int64, error) {
if c.AccountId != 0 {
return c.AccountId, nil
}
if c.Id == 0 {
return 0, fmt.Errorf("couldnt find accountId from content %+v", c)
}
cm := NewChannelMessage()
if err := cm.ById(c.Id); err != nil {
return 0, err
}
return cm.AccountId, nil
}
// Tests are done
func bodyLenCheck(body string) error {
if len(body) < config.MustGet().Limits.MessageBodyMinLen {
return fmt.Errorf("message body length should be greater than %d, yours is %d ", config.MustGet().Limits.MessageBodyMinLen, len(body))
}
return nil
}
type messageResponseStruct struct {
Index int
Message *ChannelMessageContainer
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessages(query *request.Query, messages []ChannelMessage) ([]*ChannelMessageContainer, error) {
containers := make([]*ChannelMessageContainer, len(messages))
if len(containers) == 0 {
return containers, nil
}
var onMessage = make(chan *messageResponseStruct, len(messages))
var onError = make(chan error, 1)
var wg sync.WaitGroup
for i, message := range messages {
wg.Add(1)
go func(i int, message ChannelMessage) {
defer wg.Done()
d := NewChannelMessage()
*d = message
data, err := d.BuildMessage(query)
if err != nil {
onError <- err
return
}
onMessage <- &messageResponseStruct{Index: i, Message: data}
}(i, message)
}
wg.Wait()
for i := 1; i <= len(messages); i++ {
select {
case messageResp := <-onMessage:
containers[messageResp.Index] = messageResp.Message
case err := <-onError:
return containers, err
}
}
return containers, nil
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessage(query *request.Query) (*ChannelMessageContainer, error) {
cmc := NewChannelMessageContainer()
if err := cmc.Fetch(c.Id, query); err != nil {
return nil, err
}
if cmc.Message == nil {
return cmc, nil
}
var err error
cmc.Message, err = cmc.Message.PopulatePayload()
if err != nil {
return nil, err
}
// return cmc, cmc.AddIsFollowed(query).AddIsInteracted(query).Err
return cmc, cmc.AddIsInteracted(query).Err
}
func (c *ChannelMessage) CheckIsMessageFollowed(query *request.Query) (bool, error) {
if query.AccountId == 0 {
return false, nil
}
channel := NewChannel()
if err := channel.FetchPinnedActivityChannel(query.AccountId, query.GroupName); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
cml := NewChannelMessageList()
q := &bongo.Query{
Selector: map[string]interface{}{
"channel_id": channel.Id,
"message_id": c.Id,
},
}
if err := cml.One(q); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Tests are done.
func (c *ChannelMessage) BuildEmptyMessageContainer() (*ChannelMessageContainer, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
container := NewChannelMessageContainer()
container.Message = c
if c.AccountId == 0 {
return container, nil
}
acc, err := Cache.Account.ById(c.AccountId)
if err != nil {
return nil, err
}
container.AccountOldId = acc.OldId
return container, nil
}
func generateMessageListQuery(q *request.Query) *bongo.Query {
messageType := q.Type
if messageType == "" {
messageType = ChannelMessage_TYPE_POST
}
query := &bongo.Query{
Selector: map[string]interface{}{
"type_constant": messageType,
},
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
}
if q.GroupChannelId != 0 {
query.Selector["initial_channel_id"] = q.GroupChannelId
}
if q.AccountId != 0 {
query.Selector["account_id"] = q.AccountId
}
query.AddScope(ExcludeFields(q.Exclude))
query.AddScope(StartFrom(q.From))
query.AddScope(TillTo(q.To))
return query
}
func (c *ChannelMessage) FetchMessagesByChannelId(channelId int64, q *request.Query) ([]ChannelMessage, error) {
q.GroupChannelId = channelId
query := generateMessageListQuery(q)
query.Sort = map[string]string{
"created_at": "DESC",
}
var messages []ChannelMessage
if err := c.Some(&messages, query); err != nil {
return nil, err
}
if messages == nil {
return make([]ChannelMessage, 0), nil
}
return messages, nil
}
func (c *ChannelMessage) GetMentionedUsernames() []string {
flattened := make([]string, 0)
res := mentionRegex.FindAllStringSubmatch(c.Body, -1)
if len(res) == 0 {
return flattened
}
participants := map[string]struct{}{}
// remove duplicate mentions
for _, ele := range res {
participants[ele[1]] = struct{}{}
}
for participant := range participants {
flattened = append(flattened, participant)
}
return flattened
}
// FetchTotalMessageCount fetch the count of all messages in the channel
func (c *ChannelMessage) FetchTotalMessageCount(q *request.Query) (int, error) {
query := generateMessageListQuery(q)
query.AddScope(RemoveTrollContent(
c, q.ShowExempt,
))
return c.CountWithQuery(query)
}
// FetchMessageIds fetch id of the messages in the channel
// sorts the messages by descending order
func (c *ChannelMessage) FetchMessageIds(q *request.Query) ([]int64, error) {
query := &bongo.Query{
Selector: map[string]interface{}{
"account_id": q.AccountId,
"type_constant": q.Type,
},
Pluck: "id",
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
Sort: map[string]string{
"created_at": "DESC",
},
}
query.AddScope(RemoveTrollContent(c, q.ShowExempt))
var messageIds []int64
if err := c.Some(&messageIds, query); err != nil {
return nil, err
}
if messageIds == nil {
return make([]int64, 0), nil
}
return messageIds, nil
}
// BySlug fetchs channel message by its slug
// checks if message is in the channel or not
func (c *ChannelMessage) BySlug(query *request.Query) error {
if query.Slug == "" {
return ErrSlugIsNotSet
}
// fetch message itself
q := &bongo.Query{
Selector: map[string]interface{}{
"slug": query.Slug,
},
}
q.AddScope(RemoveTrollContent(
c, query.ShowExempt,
))
if err := c.One(q); err != nil {
return err
}
query.Type = Channel_TYPE_GROUP
res, err := c.isInChannel(query, "public")
if err != nil {
return err
}
if res {
return nil
}
query.Type = Channel_TYPE_ANNOUNCEMENT
res, err = c.isInChannel(query, "changelog")
if err != nil {
return err
}
if !res {
return bongo.RecordNotFound
}
return nil
}
func (c *ChannelMessage) isInChannel(query *request.Query, channelName string) (bool, error) {
if c.Id == 0 {
return false, ErrChannelMessageIdIsNotSet
}
// fetch channel by group name
query.Name = query.GroupName
if query.GroupName == "koding" {
query.Name = channelName
}
ch := NewChannel()
channel, err := ch.ByName(query)
if err != nil {
return false, err
}
if channel.Id == 0 {
return false, ErrChannelIsNotSet
}
// check if message is in the channel
cml := NewChannelMessageList()
return cml.IsInChannel(c.Id, channel.Id)
}
// DeleteMessageDependencies deletes all records from the database that are
// dependencies of a given message. This includes interactions, optionally
// replies, and channel message lists.
func (c *ChannelMessage) DeleteMessageAndDependencies(deleteReplies bool) error {
// fetch interactions
i := NewInteraction()
i.MessageId = c.Id
interactions, err := i.FetchAll("like")
if err != nil {
return err
}
// delete interactions
for _, interaction := range interactions {
err := interaction.Delete()
if err != nil {
return err
}
}
if deleteReplies {
if err := c.DeleteReplies(); err != nil {
return err
}
}
// delete any associated channel message lists
if err = c.DeleteChannelMessageLists(); err != nil {
return err
}
err = NewMessageReply().DeleteByOrQuery(c.Id)
if err != nil {
return err
}
// delete channel message itself
return c.Delete()
}
// AddReply adds the reply message to db ,
// according to message id
func (c *ChannelMessage) AddReply(reply *ChannelMessage) (*MessageReply, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
mr := NewMessageReply()
mr.MessageId = c.Id
mr.ReplyId = reply.Id
mr.CreatedAt = reply.CreatedAt
if err := mr.Create(); err != nil {
return nil, err
}
return mr, nil
}
// DeleteReplies deletes all the replies of a given ChannelMessage, one level deep
func (c *ChannelMessage) DeleteReplies() error {
mr := NewMessageReply()
mr.MessageId = c.Id
// list returns ChannelMessage
messageReplies, err := mr.ListAll()
if err != nil {
return err
}
// delete message replies
for _, replyMessage := range messageReplies {
err := replyMessage.DeleteMessageAndDependencies(false)
if err != nil {
return err
}
}
return nil
}
func (c *ChannelMessage) GetChannelMessageLists() ([]ChannelMessageList, error) {
var listings []ChannelMessageList
q := &bongo.Query{
Selector: map[string]interface{}{
"message_id": c.Id,
},
}
if err := NewChannelMessageList().Some(&listings, q); err != nil {
return nil, err
}
return listings, nil
}
func (c *ChannelMessage) DeleteChannelMessageLists() error {
listings, err := c.GetChannelMessageLists()
if err != nil {
return err
}
for _, listing := range listings {
if err := listing.Delete(); err != nil {
return err
}
}
return nil
}
// FetchByIds fetchs given ids from database, it doesnt add any meta bits
// properties into query
func (c *ChannelMessage) FetchByIds(ids []int64) ([]ChannelMessage, error) {
var messages []ChannelMessage
if len(ids) == 0 {
return messages, nil
}
if err := bongo.B.FetchByIds(c, &messages, ids); err != nil {
return nil, err
}
return messages, nil
}
func (c *ChannelMessage) PopulatePayload() (*ChannelMessage, error) {
cm, err := c.PopulateAddedBy()
if err != nil {
return nil, err
}
i, err := cm.PopulateIntegration()
if err != nil {
return nil, err
}
return i.PopulateInitialParticipants()
}
func (c *ChannelMessage) PopulateAddedBy() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
addedByData, ok := c.Payload["addedBy"]
if !ok {
return c, nil
}
addedBy, err := strconv.ParseInt(*addedByData, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(addedBy)
if err != nil {
return c, err
}
*addedByData = a.Nick
newCm.Payload["addedBy"] = addedByData
return newCm, nil
}
func (c *ChannelMessage) PopulateIntegration() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
channelIntegration := c.GetPayload(ChannelMessagePayloadKeyIntegration)
if channelIntegration != nil && *channelIntegration != "" {
id, err := strconv.ParseInt(*channelIntegration, 10, 64)
if err != nil {
return c, err
}
i, err := Cache.Integration.ByChannelIntegrationId(id)
if err != nil {
return c, err
}
newCm.SetPayload("integrationTitle", i.Title)
newCm.SetPayload("integrationIconPath", i.IconPath)
return newCm, nil
}
return c, nil
}
func (c *ChannelMessage) PopulateInitialParticipants() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
initialParticipants, ok := c.Payload["initialParticipants"]
if !ok {
return c, nil
}
var participants []string
err := json.Unmarshal([]byte(*initialParticipants), &participants)
if err != nil {
return c, err
}
accountIds := make([]string, len(participants))
for i, participant := range participants {
accountId, err := strconv.ParseInt(participant, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(accountId)
if err != nil {
return c, err
}
accountIds[i] = a.Nick
}
participantNicks, err := json.Marshal(accountIds)
if err != nil {
return c, err
}
pns := string(participantNicks)
newCm.Payload["initialParticipants"] = &pns
return newCm, nil
}
// FetchParentChannel fetches the parent channel of the message. When
// initial channel is topic, it fetches the group channel, otherwise
// it just fetches the initial channel as parent.
func (cm *ChannelMessage) FetchParentChannel() (*Channel, error) {
c, err := Cache.Channel.ById(cm.InitialChannelId)
if err != nil {
return nil, err
}
if c.TypeConstant != Channel_TYPE_TOPIC {
return c, nil
}
ch, err := Cache.Channel.ByGroupName(c.GroupName)
if err != nil {
return nil, err
}
return ch, nil
}
func (cm *ChannelMessage) SetPayload(key string, value string) {
if cm.Payload == nil { |
cm.Payload[key] = &value
}
func (cm *ChannelMessage) GetPayload(key string) *string {
if cm.Payload == nil {
return nil
}
val, ok := cm.Payload[key]
if !ok {
return nil
}
return val
}
// SearchIndexable decides if message is indexable on search engine or not
func (c *ChannelMessage) SearchIndexable() bool {
return IsIn(c.TypeConstant,
ChannelMessage_TYPE_POST,
ChannelMessage_TYPE_REPLY,
ChannelMessage_TYPE_PRIVATE_MESSAGE,
)
}
|
cm.Payload = gorm.Hstore{}
}
| conditional_block |
channelmessage.go | package models
import (
"encoding/json"
"fmt"
"socialapi/config"
"socialapi/request"
"strconv"
"sync"
"time"
ve "github.com/VerbalExpressions/GoVerbalExpressions"
"github.com/jinzhu/gorm"
"github.com/koding/bongo"
)
var mentionRegex = ve.New().
Find("@").
BeginCapture().
Word().
Maybe("-").
Maybe(".").
Word().
EndCapture().
Regex()
type ChannelMessage struct {
// unique identifier of the channel message
Id int64 `json:"id,string"`
// Token holds the uuid for interoperability with the bongo-client
Token string `json:"token"`
// Body of the mesage
Body string `json:"body"`
// Generated Slug for body
Slug string `json:"slug" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// type of the m essage
TypeConstant string `json:"typeConstant" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// Creator of the channel message
AccountId int64 `json:"accountId,string" sql:"NOT NULL"`
// in which channel this message is created
InitialChannelId int64 `json:"initialChannelId,string" sql:"NOT NULL"`
// holds troll, unsafe, etc
MetaBits MetaBits `json:"metaBits"`
// Creation date of the message
CreatedAt time.Time `json:"createdAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Modification date of the message
UpdatedAt time.Time `json:"updatedAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Deletion date of the channel message
DeletedAt time.Time `json:"deletedAt"`
// Extra data storage
Payload gorm.Hstore `json:"payload"`
// is required to identify to request in client side
ClientRequestId string `json:"clientRequestId,omitempty" sql:"-"`
}
const (
ChannelMessage_TYPE_POST = "post"
ChannelMessage_TYPE_REPLY = "reply"
ChannelMessage_TYPE_JOIN = "join"
ChannelMessage_TYPE_LEAVE = "leave"
ChannelMessage_TYPE_PRIVATE_MESSAGE = "privatemessage"
ChannelMessage_TYPE_BOT = "bot"
ChannelMessage_TYPE_SYSTEM = "system"
ChannelMessagePayloadKeyLocation = "location"
ChannelMessagePayloadKeyIntegration = "channelIntegrationId"
)
func (c *ChannelMessage) Location() *string {
return c.GetPayload(ChannelMessagePayloadKeyLocation)
}
func (c *ChannelMessage) MarkIfExempt() error {
isExempt, err := c.isExempt()
if err != nil {
return err
}
if isExempt {
c.MetaBits.Mark(Troll)
}
return nil
}
func (c *ChannelMessage) isExempt() (bool, error) {
if c.MetaBits.Is(Troll) {
return true, nil
}
accountId, err := c.getAccountId()
if err != nil {
return false, err
}
account, err := ResetAccountCache(accountId)
if err != nil {
return false, err
}
if account == nil {
return false, fmt.Errorf("account is nil, accountId:%d", c.AccountId)
}
if account.IsTroll {
return true, nil
}
return false, nil
}
// Tests are done
func (c *ChannelMessage) getAccountId() (int64, error) {
if c.AccountId != 0 {
return c.AccountId, nil
}
if c.Id == 0 {
return 0, fmt.Errorf("couldnt find accountId from content %+v", c)
}
cm := NewChannelMessage()
if err := cm.ById(c.Id); err != nil {
return 0, err | }
// Tests are done
func bodyLenCheck(body string) error {
if len(body) < config.MustGet().Limits.MessageBodyMinLen {
return fmt.Errorf("message body length should be greater than %d, yours is %d ", config.MustGet().Limits.MessageBodyMinLen, len(body))
}
return nil
}
type messageResponseStruct struct {
Index int
Message *ChannelMessageContainer
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessages(query *request.Query, messages []ChannelMessage) ([]*ChannelMessageContainer, error) {
containers := make([]*ChannelMessageContainer, len(messages))
if len(containers) == 0 {
return containers, nil
}
var onMessage = make(chan *messageResponseStruct, len(messages))
var onError = make(chan error, 1)
var wg sync.WaitGroup
for i, message := range messages {
wg.Add(1)
go func(i int, message ChannelMessage) {
defer wg.Done()
d := NewChannelMessage()
*d = message
data, err := d.BuildMessage(query)
if err != nil {
onError <- err
return
}
onMessage <- &messageResponseStruct{Index: i, Message: data}
}(i, message)
}
wg.Wait()
for i := 1; i <= len(messages); i++ {
select {
case messageResp := <-onMessage:
containers[messageResp.Index] = messageResp.Message
case err := <-onError:
return containers, err
}
}
return containers, nil
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessage(query *request.Query) (*ChannelMessageContainer, error) {
cmc := NewChannelMessageContainer()
if err := cmc.Fetch(c.Id, query); err != nil {
return nil, err
}
if cmc.Message == nil {
return cmc, nil
}
var err error
cmc.Message, err = cmc.Message.PopulatePayload()
if err != nil {
return nil, err
}
// return cmc, cmc.AddIsFollowed(query).AddIsInteracted(query).Err
return cmc, cmc.AddIsInteracted(query).Err
}
func (c *ChannelMessage) CheckIsMessageFollowed(query *request.Query) (bool, error) {
if query.AccountId == 0 {
return false, nil
}
channel := NewChannel()
if err := channel.FetchPinnedActivityChannel(query.AccountId, query.GroupName); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
cml := NewChannelMessageList()
q := &bongo.Query{
Selector: map[string]interface{}{
"channel_id": channel.Id,
"message_id": c.Id,
},
}
if err := cml.One(q); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Tests are done.
func (c *ChannelMessage) BuildEmptyMessageContainer() (*ChannelMessageContainer, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
container := NewChannelMessageContainer()
container.Message = c
if c.AccountId == 0 {
return container, nil
}
acc, err := Cache.Account.ById(c.AccountId)
if err != nil {
return nil, err
}
container.AccountOldId = acc.OldId
return container, nil
}
func generateMessageListQuery(q *request.Query) *bongo.Query {
messageType := q.Type
if messageType == "" {
messageType = ChannelMessage_TYPE_POST
}
query := &bongo.Query{
Selector: map[string]interface{}{
"type_constant": messageType,
},
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
}
if q.GroupChannelId != 0 {
query.Selector["initial_channel_id"] = q.GroupChannelId
}
if q.AccountId != 0 {
query.Selector["account_id"] = q.AccountId
}
query.AddScope(ExcludeFields(q.Exclude))
query.AddScope(StartFrom(q.From))
query.AddScope(TillTo(q.To))
return query
}
func (c *ChannelMessage) FetchMessagesByChannelId(channelId int64, q *request.Query) ([]ChannelMessage, error) {
q.GroupChannelId = channelId
query := generateMessageListQuery(q)
query.Sort = map[string]string{
"created_at": "DESC",
}
var messages []ChannelMessage
if err := c.Some(&messages, query); err != nil {
return nil, err
}
if messages == nil {
return make([]ChannelMessage, 0), nil
}
return messages, nil
}
func (c *ChannelMessage) GetMentionedUsernames() []string {
flattened := make([]string, 0)
res := mentionRegex.FindAllStringSubmatch(c.Body, -1)
if len(res) == 0 {
return flattened
}
participants := map[string]struct{}{}
// remove duplicate mentions
for _, ele := range res {
participants[ele[1]] = struct{}{}
}
for participant := range participants {
flattened = append(flattened, participant)
}
return flattened
}
// FetchTotalMessageCount fetch the count of all messages in the channel
func (c *ChannelMessage) FetchTotalMessageCount(q *request.Query) (int, error) {
query := generateMessageListQuery(q)
query.AddScope(RemoveTrollContent(
c, q.ShowExempt,
))
return c.CountWithQuery(query)
}
// FetchMessageIds fetch id of the messages in the channel
// sorts the messages by descending order
func (c *ChannelMessage) FetchMessageIds(q *request.Query) ([]int64, error) {
query := &bongo.Query{
Selector: map[string]interface{}{
"account_id": q.AccountId,
"type_constant": q.Type,
},
Pluck: "id",
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
Sort: map[string]string{
"created_at": "DESC",
},
}
query.AddScope(RemoveTrollContent(c, q.ShowExempt))
var messageIds []int64
if err := c.Some(&messageIds, query); err != nil {
return nil, err
}
if messageIds == nil {
return make([]int64, 0), nil
}
return messageIds, nil
}
// BySlug fetchs channel message by its slug
// checks if message is in the channel or not
func (c *ChannelMessage) BySlug(query *request.Query) error {
if query.Slug == "" {
return ErrSlugIsNotSet
}
// fetch message itself
q := &bongo.Query{
Selector: map[string]interface{}{
"slug": query.Slug,
},
}
q.AddScope(RemoveTrollContent(
c, query.ShowExempt,
))
if err := c.One(q); err != nil {
return err
}
query.Type = Channel_TYPE_GROUP
res, err := c.isInChannel(query, "public")
if err != nil {
return err
}
if res {
return nil
}
query.Type = Channel_TYPE_ANNOUNCEMENT
res, err = c.isInChannel(query, "changelog")
if err != nil {
return err
}
if !res {
return bongo.RecordNotFound
}
return nil
}
func (c *ChannelMessage) isInChannel(query *request.Query, channelName string) (bool, error) {
if c.Id == 0 {
return false, ErrChannelMessageIdIsNotSet
}
// fetch channel by group name
query.Name = query.GroupName
if query.GroupName == "koding" {
query.Name = channelName
}
ch := NewChannel()
channel, err := ch.ByName(query)
if err != nil {
return false, err
}
if channel.Id == 0 {
return false, ErrChannelIsNotSet
}
// check if message is in the channel
cml := NewChannelMessageList()
return cml.IsInChannel(c.Id, channel.Id)
}
// DeleteMessageDependencies deletes all records from the database that are
// dependencies of a given message. This includes interactions, optionally
// replies, and channel message lists.
func (c *ChannelMessage) DeleteMessageAndDependencies(deleteReplies bool) error {
// fetch interactions
i := NewInteraction()
i.MessageId = c.Id
interactions, err := i.FetchAll("like")
if err != nil {
return err
}
// delete interactions
for _, interaction := range interactions {
err := interaction.Delete()
if err != nil {
return err
}
}
if deleteReplies {
if err := c.DeleteReplies(); err != nil {
return err
}
}
// delete any associated channel message lists
if err = c.DeleteChannelMessageLists(); err != nil {
return err
}
err = NewMessageReply().DeleteByOrQuery(c.Id)
if err != nil {
return err
}
// delete channel message itself
return c.Delete()
}
// AddReply adds the reply message to db ,
// according to message id
func (c *ChannelMessage) AddReply(reply *ChannelMessage) (*MessageReply, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
mr := NewMessageReply()
mr.MessageId = c.Id
mr.ReplyId = reply.Id
mr.CreatedAt = reply.CreatedAt
if err := mr.Create(); err != nil {
return nil, err
}
return mr, nil
}
// DeleteReplies deletes all the replies of a given ChannelMessage, one level deep
func (c *ChannelMessage) DeleteReplies() error {
mr := NewMessageReply()
mr.MessageId = c.Id
// list returns ChannelMessage
messageReplies, err := mr.ListAll()
if err != nil {
return err
}
// delete message replies
for _, replyMessage := range messageReplies {
err := replyMessage.DeleteMessageAndDependencies(false)
if err != nil {
return err
}
}
return nil
}
func (c *ChannelMessage) GetChannelMessageLists() ([]ChannelMessageList, error) {
var listings []ChannelMessageList
q := &bongo.Query{
Selector: map[string]interface{}{
"message_id": c.Id,
},
}
if err := NewChannelMessageList().Some(&listings, q); err != nil {
return nil, err
}
return listings, nil
}
func (c *ChannelMessage) DeleteChannelMessageLists() error {
listings, err := c.GetChannelMessageLists()
if err != nil {
return err
}
for _, listing := range listings {
if err := listing.Delete(); err != nil {
return err
}
}
return nil
}
// FetchByIds fetchs given ids from database, it doesnt add any meta bits
// properties into query
func (c *ChannelMessage) FetchByIds(ids []int64) ([]ChannelMessage, error) {
var messages []ChannelMessage
if len(ids) == 0 {
return messages, nil
}
if err := bongo.B.FetchByIds(c, &messages, ids); err != nil {
return nil, err
}
return messages, nil
}
func (c *ChannelMessage) PopulatePayload() (*ChannelMessage, error) {
cm, err := c.PopulateAddedBy()
if err != nil {
return nil, err
}
i, err := cm.PopulateIntegration()
if err != nil {
return nil, err
}
return i.PopulateInitialParticipants()
}
func (c *ChannelMessage) PopulateAddedBy() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
addedByData, ok := c.Payload["addedBy"]
if !ok {
return c, nil
}
addedBy, err := strconv.ParseInt(*addedByData, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(addedBy)
if err != nil {
return c, err
}
*addedByData = a.Nick
newCm.Payload["addedBy"] = addedByData
return newCm, nil
}
func (c *ChannelMessage) PopulateIntegration() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
channelIntegration := c.GetPayload(ChannelMessagePayloadKeyIntegration)
if channelIntegration != nil && *channelIntegration != "" {
id, err := strconv.ParseInt(*channelIntegration, 10, 64)
if err != nil {
return c, err
}
i, err := Cache.Integration.ByChannelIntegrationId(id)
if err != nil {
return c, err
}
newCm.SetPayload("integrationTitle", i.Title)
newCm.SetPayload("integrationIconPath", i.IconPath)
return newCm, nil
}
return c, nil
}
func (c *ChannelMessage) PopulateInitialParticipants() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
initialParticipants, ok := c.Payload["initialParticipants"]
if !ok {
return c, nil
}
var participants []string
err := json.Unmarshal([]byte(*initialParticipants), &participants)
if err != nil {
return c, err
}
accountIds := make([]string, len(participants))
for i, participant := range participants {
accountId, err := strconv.ParseInt(participant, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(accountId)
if err != nil {
return c, err
}
accountIds[i] = a.Nick
}
participantNicks, err := json.Marshal(accountIds)
if err != nil {
return c, err
}
pns := string(participantNicks)
newCm.Payload["initialParticipants"] = &pns
return newCm, nil
}
// FetchParentChannel fetches the parent channel of the message. When
// initial channel is topic, it fetches the group channel, otherwise
// it just fetches the initial channel as parent.
func (cm *ChannelMessage) FetchParentChannel() (*Channel, error) {
c, err := Cache.Channel.ById(cm.InitialChannelId)
if err != nil {
return nil, err
}
if c.TypeConstant != Channel_TYPE_TOPIC {
return c, nil
}
ch, err := Cache.Channel.ByGroupName(c.GroupName)
if err != nil {
return nil, err
}
return ch, nil
}
func (cm *ChannelMessage) SetPayload(key string, value string) {
if cm.Payload == nil {
cm.Payload = gorm.Hstore{}
}
cm.Payload[key] = &value
}
func (cm *ChannelMessage) GetPayload(key string) *string {
if cm.Payload == nil {
return nil
}
val, ok := cm.Payload[key]
if !ok {
return nil
}
return val
}
// SearchIndexable decides if message is indexable on search engine or not
func (c *ChannelMessage) SearchIndexable() bool {
return IsIn(c.TypeConstant,
ChannelMessage_TYPE_POST,
ChannelMessage_TYPE_REPLY,
ChannelMessage_TYPE_PRIVATE_MESSAGE,
)
} | }
return cm.AccountId, nil | random_line_split |
channelmessage.go | package models
import (
"encoding/json"
"fmt"
"socialapi/config"
"socialapi/request"
"strconv"
"sync"
"time"
ve "github.com/VerbalExpressions/GoVerbalExpressions"
"github.com/jinzhu/gorm"
"github.com/koding/bongo"
)
var mentionRegex = ve.New().
Find("@").
BeginCapture().
Word().
Maybe("-").
Maybe(".").
Word().
EndCapture().
Regex()
type ChannelMessage struct {
// unique identifier of the channel message
Id int64 `json:"id,string"`
// Token holds the uuid for interoperability with the bongo-client
Token string `json:"token"`
// Body of the mesage
Body string `json:"body"`
// Generated Slug for body
Slug string `json:"slug" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// type of the m essage
TypeConstant string `json:"typeConstant" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// Creator of the channel message
AccountId int64 `json:"accountId,string" sql:"NOT NULL"`
// in which channel this message is created
InitialChannelId int64 `json:"initialChannelId,string" sql:"NOT NULL"`
// holds troll, unsafe, etc
MetaBits MetaBits `json:"metaBits"`
// Creation date of the message
CreatedAt time.Time `json:"createdAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Modification date of the message
UpdatedAt time.Time `json:"updatedAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Deletion date of the channel message
DeletedAt time.Time `json:"deletedAt"`
// Extra data storage
Payload gorm.Hstore `json:"payload"`
// is required to identify to request in client side
ClientRequestId string `json:"clientRequestId,omitempty" sql:"-"`
}
const (
ChannelMessage_TYPE_POST = "post"
ChannelMessage_TYPE_REPLY = "reply"
ChannelMessage_TYPE_JOIN = "join"
ChannelMessage_TYPE_LEAVE = "leave"
ChannelMessage_TYPE_PRIVATE_MESSAGE = "privatemessage"
ChannelMessage_TYPE_BOT = "bot"
ChannelMessage_TYPE_SYSTEM = "system"
ChannelMessagePayloadKeyLocation = "location"
ChannelMessagePayloadKeyIntegration = "channelIntegrationId"
)
func (c *ChannelMessage) Location() *string {
return c.GetPayload(ChannelMessagePayloadKeyLocation)
}
func (c *ChannelMessage) MarkIfExempt() error {
isExempt, err := c.isExempt()
if err != nil {
return err
}
if isExempt {
c.MetaBits.Mark(Troll)
}
return nil
}
func (c *ChannelMessage) isExempt() (bool, error) {
if c.MetaBits.Is(Troll) {
return true, nil
}
accountId, err := c.getAccountId()
if err != nil {
return false, err
}
account, err := ResetAccountCache(accountId)
if err != nil {
return false, err
}
if account == nil {
return false, fmt.Errorf("account is nil, accountId:%d", c.AccountId)
}
if account.IsTroll {
return true, nil
}
return false, nil
}
// Tests are done
func (c *ChannelMessage) getAccountId() (int64, error) {
if c.AccountId != 0 {
return c.AccountId, nil
}
if c.Id == 0 {
return 0, fmt.Errorf("couldnt find accountId from content %+v", c)
}
cm := NewChannelMessage()
if err := cm.ById(c.Id); err != nil {
return 0, err
}
return cm.AccountId, nil
}
// Tests are done
func bodyLenCheck(body string) error {
if len(body) < config.MustGet().Limits.MessageBodyMinLen {
return fmt.Errorf("message body length should be greater than %d, yours is %d ", config.MustGet().Limits.MessageBodyMinLen, len(body))
}
return nil
}
type messageResponseStruct struct {
Index int
Message *ChannelMessageContainer
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessages(query *request.Query, messages []ChannelMessage) ([]*ChannelMessageContainer, error) {
containers := make([]*ChannelMessageContainer, len(messages))
if len(containers) == 0 {
return containers, nil
}
var onMessage = make(chan *messageResponseStruct, len(messages))
var onError = make(chan error, 1)
var wg sync.WaitGroup
for i, message := range messages {
wg.Add(1)
go func(i int, message ChannelMessage) {
defer wg.Done()
d := NewChannelMessage()
*d = message
data, err := d.BuildMessage(query)
if err != nil {
onError <- err
return
}
onMessage <- &messageResponseStruct{Index: i, Message: data}
}(i, message)
}
wg.Wait()
for i := 1; i <= len(messages); i++ {
select {
case messageResp := <-onMessage:
containers[messageResp.Index] = messageResp.Message
case err := <-onError:
return containers, err
}
}
return containers, nil
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessage(query *request.Query) (*ChannelMessageContainer, error) {
cmc := NewChannelMessageContainer()
if err := cmc.Fetch(c.Id, query); err != nil {
return nil, err
}
if cmc.Message == nil {
return cmc, nil
}
var err error
cmc.Message, err = cmc.Message.PopulatePayload()
if err != nil {
return nil, err
}
// return cmc, cmc.AddIsFollowed(query).AddIsInteracted(query).Err
return cmc, cmc.AddIsInteracted(query).Err
}
func (c *ChannelMessage) CheckIsMessageFollowed(query *request.Query) (bool, error) {
if query.AccountId == 0 {
return false, nil
}
channel := NewChannel()
if err := channel.FetchPinnedActivityChannel(query.AccountId, query.GroupName); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
cml := NewChannelMessageList()
q := &bongo.Query{
Selector: map[string]interface{}{
"channel_id": channel.Id,
"message_id": c.Id,
},
}
if err := cml.One(q); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Tests are done.
func (c *ChannelMessage) BuildEmptyMessageContainer() (*ChannelMessageContainer, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
container := NewChannelMessageContainer()
container.Message = c
if c.AccountId == 0 {
return container, nil
}
acc, err := Cache.Account.ById(c.AccountId)
if err != nil {
return nil, err
}
container.AccountOldId = acc.OldId
return container, nil
}
func generateMessageListQuery(q *request.Query) *bongo.Query {
messageType := q.Type
if messageType == "" {
messageType = ChannelMessage_TYPE_POST
}
query := &bongo.Query{
Selector: map[string]interface{}{
"type_constant": messageType,
},
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
}
if q.GroupChannelId != 0 {
query.Selector["initial_channel_id"] = q.GroupChannelId
}
if q.AccountId != 0 {
query.Selector["account_id"] = q.AccountId
}
query.AddScope(ExcludeFields(q.Exclude))
query.AddScope(StartFrom(q.From))
query.AddScope(TillTo(q.To))
return query
}
func (c *ChannelMessage) FetchMessagesByChannelId(channelId int64, q *request.Query) ([]ChannelMessage, error) {
q.GroupChannelId = channelId
query := generateMessageListQuery(q)
query.Sort = map[string]string{
"created_at": "DESC",
}
var messages []ChannelMessage
if err := c.Some(&messages, query); err != nil {
return nil, err
}
if messages == nil {
return make([]ChannelMessage, 0), nil
}
return messages, nil
}
func (c *ChannelMessage) GetMentionedUsernames() []string {
flattened := make([]string, 0)
res := mentionRegex.FindAllStringSubmatch(c.Body, -1)
if len(res) == 0 {
return flattened
}
participants := map[string]struct{}{}
// remove duplicate mentions
for _, ele := range res {
participants[ele[1]] = struct{}{}
}
for participant := range participants {
flattened = append(flattened, participant)
}
return flattened
}
// FetchTotalMessageCount fetch the count of all messages in the channel
func (c *ChannelMessage) FetchTotalMessageCount(q *request.Query) (int, error) {
query := generateMessageListQuery(q)
query.AddScope(RemoveTrollContent(
c, q.ShowExempt,
))
return c.CountWithQuery(query)
}
// FetchMessageIds fetch id of the messages in the channel
// sorts the messages by descending order
func (c *ChannelMessage) FetchMessageIds(q *request.Query) ([]int64, error) {
query := &bongo.Query{
Selector: map[string]interface{}{
"account_id": q.AccountId,
"type_constant": q.Type,
},
Pluck: "id",
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
Sort: map[string]string{
"created_at": "DESC",
},
}
query.AddScope(RemoveTrollContent(c, q.ShowExempt))
var messageIds []int64
if err := c.Some(&messageIds, query); err != nil {
return nil, err
}
if messageIds == nil {
return make([]int64, 0), nil
}
return messageIds, nil
}
// BySlug fetchs channel message by its slug
// checks if message is in the channel or not
func (c *ChannelMessage) BySlug(query *request.Query) error {
if query.Slug == "" {
return ErrSlugIsNotSet
}
// fetch message itself
q := &bongo.Query{
Selector: map[string]interface{}{
"slug": query.Slug,
},
}
q.AddScope(RemoveTrollContent(
c, query.ShowExempt,
))
if err := c.One(q); err != nil {
return err
}
query.Type = Channel_TYPE_GROUP
res, err := c.isInChannel(query, "public")
if err != nil {
return err
}
if res {
return nil
}
query.Type = Channel_TYPE_ANNOUNCEMENT
res, err = c.isInChannel(query, "changelog")
if err != nil {
return err
}
if !res {
return bongo.RecordNotFound
}
return nil
}
func (c *ChannelMessage) isInChannel(query *request.Query, channelName string) (bool, error) {
if c.Id == 0 {
return false, ErrChannelMessageIdIsNotSet
}
// fetch channel by group name
query.Name = query.GroupName
if query.GroupName == "koding" {
query.Name = channelName
}
ch := NewChannel()
channel, err := ch.ByName(query)
if err != nil {
return false, err
}
if channel.Id == 0 {
return false, ErrChannelIsNotSet
}
// check if message is in the channel
cml := NewChannelMessageList()
return cml.IsInChannel(c.Id, channel.Id)
}
// DeleteMessageDependencies deletes all records from the database that are
// dependencies of a given message. This includes interactions, optionally
// replies, and channel message lists.
func (c *ChannelMessage) DeleteMessageAndDependencies(deleteReplies bool) error {
// fetch interactions
i := NewInteraction()
i.MessageId = c.Id
interactions, err := i.FetchAll("like")
if err != nil {
return err
}
// delete interactions
for _, interaction := range interactions {
err := interaction.Delete()
if err != nil {
return err
}
}
if deleteReplies {
if err := c.DeleteReplies(); err != nil {
return err
}
}
// delete any associated channel message lists
if err = c.DeleteChannelMessageLists(); err != nil {
return err
}
err = NewMessageReply().DeleteByOrQuery(c.Id)
if err != nil {
return err
}
// delete channel message itself
return c.Delete()
}
// AddReply adds the reply message to db ,
// according to message id
func (c *ChannelMessage) AddReply(reply *ChannelMessage) (*MessageReply, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
mr := NewMessageReply()
mr.MessageId = c.Id
mr.ReplyId = reply.Id
mr.CreatedAt = reply.CreatedAt
if err := mr.Create(); err != nil {
return nil, err
}
return mr, nil
}
// DeleteReplies deletes all the replies of a given ChannelMessage, one level deep
func (c *ChannelMessage) DeleteReplies() error {
mr := NewMessageReply()
mr.MessageId = c.Id
// list returns ChannelMessage
messageReplies, err := mr.ListAll()
if err != nil {
return err
}
// delete message replies
for _, replyMessage := range messageReplies {
err := replyMessage.DeleteMessageAndDependencies(false)
if err != nil {
return err
}
}
return nil
}
func (c *ChannelMessage) GetChannelMessageLists() ([]ChannelMessageList, error) {
var listings []ChannelMessageList
q := &bongo.Query{
Selector: map[string]interface{}{
"message_id": c.Id,
},
}
if err := NewChannelMessageList().Some(&listings, q); err != nil {
return nil, err
}
return listings, nil
}
func (c *ChannelMessage) DeleteChannelMessageLists() error {
listings, err := c.GetChannelMessageLists()
if err != nil {
return err
}
for _, listing := range listings {
if err := listing.Delete(); err != nil {
return err
}
}
return nil
}
// FetchByIds fetchs given ids from database, it doesnt add any meta bits
// properties into query
func (c *ChannelMessage) FetchByIds(ids []int64) ([]ChannelMessage, error) {
var messages []ChannelMessage
if len(ids) == 0 {
return messages, nil
}
if err := bongo.B.FetchByIds(c, &messages, ids); err != nil {
return nil, err
}
return messages, nil
}
func (c *ChannelMessage) P | ) (*ChannelMessage, error) {
cm, err := c.PopulateAddedBy()
if err != nil {
return nil, err
}
i, err := cm.PopulateIntegration()
if err != nil {
return nil, err
}
return i.PopulateInitialParticipants()
}
func (c *ChannelMessage) PopulateAddedBy() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
addedByData, ok := c.Payload["addedBy"]
if !ok {
return c, nil
}
addedBy, err := strconv.ParseInt(*addedByData, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(addedBy)
if err != nil {
return c, err
}
*addedByData = a.Nick
newCm.Payload["addedBy"] = addedByData
return newCm, nil
}
func (c *ChannelMessage) PopulateIntegration() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
channelIntegration := c.GetPayload(ChannelMessagePayloadKeyIntegration)
if channelIntegration != nil && *channelIntegration != "" {
id, err := strconv.ParseInt(*channelIntegration, 10, 64)
if err != nil {
return c, err
}
i, err := Cache.Integration.ByChannelIntegrationId(id)
if err != nil {
return c, err
}
newCm.SetPayload("integrationTitle", i.Title)
newCm.SetPayload("integrationIconPath", i.IconPath)
return newCm, nil
}
return c, nil
}
func (c *ChannelMessage) PopulateInitialParticipants() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
initialParticipants, ok := c.Payload["initialParticipants"]
if !ok {
return c, nil
}
var participants []string
err := json.Unmarshal([]byte(*initialParticipants), &participants)
if err != nil {
return c, err
}
accountIds := make([]string, len(participants))
for i, participant := range participants {
accountId, err := strconv.ParseInt(participant, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(accountId)
if err != nil {
return c, err
}
accountIds[i] = a.Nick
}
participantNicks, err := json.Marshal(accountIds)
if err != nil {
return c, err
}
pns := string(participantNicks)
newCm.Payload["initialParticipants"] = &pns
return newCm, nil
}
// FetchParentChannel fetches the parent channel of the message. When
// initial channel is topic, it fetches the group channel, otherwise
// it just fetches the initial channel as parent.
func (cm *ChannelMessage) FetchParentChannel() (*Channel, error) {
c, err := Cache.Channel.ById(cm.InitialChannelId)
if err != nil {
return nil, err
}
if c.TypeConstant != Channel_TYPE_TOPIC {
return c, nil
}
ch, err := Cache.Channel.ByGroupName(c.GroupName)
if err != nil {
return nil, err
}
return ch, nil
}
func (cm *ChannelMessage) SetPayload(key string, value string) {
if cm.Payload == nil {
cm.Payload = gorm.Hstore{}
}
cm.Payload[key] = &value
}
func (cm *ChannelMessage) GetPayload(key string) *string {
if cm.Payload == nil {
return nil
}
val, ok := cm.Payload[key]
if !ok {
return nil
}
return val
}
// SearchIndexable decides if message is indexable on search engine or not
func (c *ChannelMessage) SearchIndexable() bool {
return IsIn(c.TypeConstant,
ChannelMessage_TYPE_POST,
ChannelMessage_TYPE_REPLY,
ChannelMessage_TYPE_PRIVATE_MESSAGE,
)
}
| opulatePayload( | identifier_name |
channelmessage.go | package models
import (
"encoding/json"
"fmt"
"socialapi/config"
"socialapi/request"
"strconv"
"sync"
"time"
ve "github.com/VerbalExpressions/GoVerbalExpressions"
"github.com/jinzhu/gorm"
"github.com/koding/bongo"
)
var mentionRegex = ve.New().
Find("@").
BeginCapture().
Word().
Maybe("-").
Maybe(".").
Word().
EndCapture().
Regex()
type ChannelMessage struct {
// unique identifier of the channel message
Id int64 `json:"id,string"`
// Token holds the uuid for interoperability with the bongo-client
Token string `json:"token"`
// Body of the mesage
Body string `json:"body"`
// Generated Slug for body
Slug string `json:"slug" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// type of the m essage
TypeConstant string `json:"typeConstant" sql:"NOT NULL;TYPE:VARCHAR(100);"`
// Creator of the channel message
AccountId int64 `json:"accountId,string" sql:"NOT NULL"`
// in which channel this message is created
InitialChannelId int64 `json:"initialChannelId,string" sql:"NOT NULL"`
// holds troll, unsafe, etc
MetaBits MetaBits `json:"metaBits"`
// Creation date of the message
CreatedAt time.Time `json:"createdAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Modification date of the message
UpdatedAt time.Time `json:"updatedAt" sql:"DEFAULT:CURRENT_TIMESTAMP"`
// Deletion date of the channel message
DeletedAt time.Time `json:"deletedAt"`
// Extra data storage
Payload gorm.Hstore `json:"payload"`
// is required to identify to request in client side
ClientRequestId string `json:"clientRequestId,omitempty" sql:"-"`
}
const (
ChannelMessage_TYPE_POST = "post"
ChannelMessage_TYPE_REPLY = "reply"
ChannelMessage_TYPE_JOIN = "join"
ChannelMessage_TYPE_LEAVE = "leave"
ChannelMessage_TYPE_PRIVATE_MESSAGE = "privatemessage"
ChannelMessage_TYPE_BOT = "bot"
ChannelMessage_TYPE_SYSTEM = "system"
ChannelMessagePayloadKeyLocation = "location"
ChannelMessagePayloadKeyIntegration = "channelIntegrationId"
)
func (c *ChannelMessage) Location() *string {
return c.GetPayload(ChannelMessagePayloadKeyLocation)
}
func (c *ChannelMessage) MarkIfExempt() error {
isExempt, err := c.isExempt()
if err != nil {
return err
}
if isExempt {
c.MetaBits.Mark(Troll)
}
return nil
}
func (c *ChannelMessage) isExempt() (bool, error) {
if c.MetaBits.Is(Troll) {
return true, nil
}
accountId, err := c.getAccountId()
if err != nil {
return false, err
}
account, err := ResetAccountCache(accountId)
if err != nil {
return false, err
}
if account == nil {
return false, fmt.Errorf("account is nil, accountId:%d", c.AccountId)
}
if account.IsTroll {
return true, nil
}
return false, nil
}
// Tests are done
func (c *ChannelMessage) getAccountId() (int64, error) {
if c.AccountId != 0 {
return c.AccountId, nil
}
if c.Id == 0 {
return 0, fmt.Errorf("couldnt find accountId from content %+v", c)
}
cm := NewChannelMessage()
if err := cm.ById(c.Id); err != nil {
return 0, err
}
return cm.AccountId, nil
}
// Tests are done
func bodyLenCheck(body string) error {
if len(body) < config.MustGet().Limits.MessageBodyMinLen {
return fmt.Errorf("message body length should be greater than %d, yours is %d ", config.MustGet().Limits.MessageBodyMinLen, len(body))
}
return nil
}
type messageResponseStruct struct {
Index int
Message *ChannelMessageContainer
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessages(query *request.Query, messages []ChannelMessage) ([]*ChannelMessageContainer, error) {
containers := make([]*ChannelMessageContainer, len(messages))
if len(containers) == 0 {
return containers, nil
}
var onMessage = make(chan *messageResponseStruct, len(messages))
var onError = make(chan error, 1)
var wg sync.WaitGroup
for i, message := range messages {
wg.Add(1)
go func(i int, message ChannelMessage) {
defer wg.Done()
d := NewChannelMessage()
*d = message
data, err := d.BuildMessage(query)
if err != nil {
onError <- err
return
}
onMessage <- &messageResponseStruct{Index: i, Message: data}
}(i, message)
}
wg.Wait()
for i := 1; i <= len(messages); i++ {
select {
case messageResp := <-onMessage:
containers[messageResp.Index] = messageResp.Message
case err := <-onError:
return containers, err
}
}
return containers, nil
}
// TODO - remove this function
func (c *ChannelMessage) BuildMessage(query *request.Query) (*ChannelMessageContainer, error) {
cmc := NewChannelMessageContainer()
if err := cmc.Fetch(c.Id, query); err != nil {
return nil, err
}
if cmc.Message == nil {
return cmc, nil
}
var err error
cmc.Message, err = cmc.Message.PopulatePayload()
if err != nil {
return nil, err
}
// return cmc, cmc.AddIsFollowed(query).AddIsInteracted(query).Err
return cmc, cmc.AddIsInteracted(query).Err
}
func (c *ChannelMessage) CheckIsMessageFollowed(query *request.Query) (bool, error) {
if query.AccountId == 0 {
return false, nil
}
channel := NewChannel()
if err := channel.FetchPinnedActivityChannel(query.AccountId, query.GroupName); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
cml := NewChannelMessageList()
q := &bongo.Query{
Selector: map[string]interface{}{
"channel_id": channel.Id,
"message_id": c.Id,
},
}
if err := cml.One(q); err != nil {
if err == bongo.RecordNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Tests are done.
func (c *ChannelMessage) BuildEmptyMessageContainer() (*ChannelMessageContainer, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
container := NewChannelMessageContainer()
container.Message = c
if c.AccountId == 0 {
return container, nil
}
acc, err := Cache.Account.ById(c.AccountId)
if err != nil {
return nil, err
}
container.AccountOldId = acc.OldId
return container, nil
}
func generateMessageListQuery(q *request.Query) *bongo.Query {
messageType := q.Type
if messageType == "" {
messageType = ChannelMessage_TYPE_POST
}
query := &bongo.Query{
Selector: map[string]interface{}{
"type_constant": messageType,
},
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
}
if q.GroupChannelId != 0 {
query.Selector["initial_channel_id"] = q.GroupChannelId
}
if q.AccountId != 0 {
query.Selector["account_id"] = q.AccountId
}
query.AddScope(ExcludeFields(q.Exclude))
query.AddScope(StartFrom(q.From))
query.AddScope(TillTo(q.To))
return query
}
func (c *ChannelMessage) FetchMessagesByChannelId(channelId int64, q *request.Query) ([]ChannelMessage, error) {
q.GroupChannelId = channelId
query := generateMessageListQuery(q)
query.Sort = map[string]string{
"created_at": "DESC",
}
var messages []ChannelMessage
if err := c.Some(&messages, query); err != nil {
return nil, err
}
if messages == nil {
return make([]ChannelMessage, 0), nil
}
return messages, nil
}
func (c *ChannelMessage) GetMentionedUsernames() []string {
flattened := make([]string, 0)
res := mentionRegex.FindAllStringSubmatch(c.Body, -1)
if len(res) == 0 {
return flattened
}
participants := map[string]struct{}{}
// remove duplicate mentions
for _, ele := range res {
participants[ele[1]] = struct{}{}
}
for participant := range participants {
flattened = append(flattened, participant)
}
return flattened
}
// FetchTotalMessageCount fetch the count of all messages in the channel
func (c *ChannelMessage) FetchTotalMessageCount(q *request.Query) (int, error) {
query := generateMessageListQuery(q)
query.AddScope(RemoveTrollContent(
c, q.ShowExempt,
))
return c.CountWithQuery(query)
}
// FetchMessageIds fetch id of the messages in the channel
// sorts the messages by descending order
func (c *ChannelMessage) FetchMessageIds(q *request.Query) ([]int64, error) {
query := &bongo.Query{
Selector: map[string]interface{}{
"account_id": q.AccountId,
"type_constant": q.Type,
},
Pluck: "id",
Pagination: *bongo.NewPagination(q.Limit, q.Skip),
Sort: map[string]string{
"created_at": "DESC",
},
}
query.AddScope(RemoveTrollContent(c, q.ShowExempt))
var messageIds []int64
if err := c.Some(&messageIds, query); err != nil {
return nil, err
}
if messageIds == nil {
return make([]int64, 0), nil
}
return messageIds, nil
}
// BySlug fetchs channel message by its slug
// checks if message is in the channel or not
func (c *ChannelMessage) BySlug(query *request.Query) error {
if query.Slug == "" {
return ErrSlugIsNotSet
}
// fetch message itself
q := &bongo.Query{
Selector: map[string]interface{}{
"slug": query.Slug,
},
}
q.AddScope(RemoveTrollContent(
c, query.ShowExempt,
))
if err := c.One(q); err != nil {
return err
}
query.Type = Channel_TYPE_GROUP
res, err := c.isInChannel(query, "public")
if err != nil {
return err
}
if res {
return nil
}
query.Type = Channel_TYPE_ANNOUNCEMENT
res, err = c.isInChannel(query, "changelog")
if err != nil {
return err
}
if !res {
return bongo.RecordNotFound
}
return nil
}
func (c *ChannelMessage) isInChannel(query *request.Query, channelName string) (bool, error) {
if c.Id == 0 {
return false, ErrChannelMessageIdIsNotSet
}
// fetch channel by group name
query.Name = query.GroupName
if query.GroupName == "koding" {
query.Name = channelName
}
ch := NewChannel()
channel, err := ch.ByName(query)
if err != nil {
return false, err
}
if channel.Id == 0 {
return false, ErrChannelIsNotSet
}
// check if message is in the channel
cml := NewChannelMessageList()
return cml.IsInChannel(c.Id, channel.Id)
}
// DeleteMessageDependencies deletes all records from the database that are
// dependencies of a given message. This includes interactions, optionally
// replies, and channel message lists.
func (c *ChannelMessage) DeleteMessageAndDependencies(deleteReplies bool) error {
// fetch interactions
i := NewInteraction()
i.MessageId = c.Id
interactions, err := i.FetchAll("like")
if err != nil {
return err
}
// delete interactions
for _, interaction := range interactions {
err := interaction.Delete()
if err != nil {
return err
}
}
if deleteReplies {
if err := c.DeleteReplies(); err != nil {
return err
}
}
// delete any associated channel message lists
if err = c.DeleteChannelMessageLists(); err != nil {
return err
}
err = NewMessageReply().DeleteByOrQuery(c.Id)
if err != nil {
return err
}
// delete channel message itself
return c.Delete()
}
// AddReply adds the reply message to db ,
// according to message id
func (c *ChannelMessage) AddReply(reply *ChannelMessage) (*MessageReply, error) {
if c.Id == 0 {
return nil, ErrChannelMessageIdIsNotSet
}
mr := NewMessageReply()
mr.MessageId = c.Id
mr.ReplyId = reply.Id
mr.CreatedAt = reply.CreatedAt
if err := mr.Create(); err != nil {
return nil, err
}
return mr, nil
}
// DeleteReplies deletes all the replies of a given ChannelMessage, one level deep
func (c *ChannelMessage) DeleteReplies() error {
mr := NewMessageReply()
mr.MessageId = c.Id
// list returns ChannelMessage
messageReplies, err := mr.ListAll()
if err != nil {
return err
}
// delete message replies
for _, replyMessage := range messageReplies {
err := replyMessage.DeleteMessageAndDependencies(false)
if err != nil {
return err
}
}
return nil
}
func (c *ChannelMessage) GetChannelMessageLists() ([]ChannelMessageList, error) {
var listings []ChannelMessageList
q := &bongo.Query{
Selector: map[string]interface{}{
"message_id": c.Id,
},
}
if err := NewChannelMessageList().Some(&listings, q); err != nil {
return nil, err
}
return listings, nil
}
func (c *ChannelMessage) DeleteChannelMessageLists() error {
listings, err := c.GetChannelMessageLists()
if err != nil {
return err
}
for _, listing := range listings {
if err := listing.Delete(); err != nil {
return err
}
}
return nil
}
// FetchByIds fetchs given ids from database, it doesnt add any meta bits
// properties into query
func (c *ChannelMessage) FetchByIds(ids []int64) ([]ChannelMessage, error) {
var messages []ChannelMessage
if len(ids) == 0 {
return messages, nil
}
if err := bongo.B.FetchByIds(c, &messages, ids); err != nil {
return nil, err
}
return messages, nil
}
func (c *ChannelMessage) PopulatePayload() (*ChannelMessage, error) {
cm, err := c.PopulateAddedBy()
if err != nil {
return nil, err
}
i, err := cm.PopulateIntegration()
if err != nil {
return nil, err
}
return i.PopulateInitialParticipants()
}
func (c *ChannelMessage) PopulateAddedBy() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
addedByData, ok := c.Payload["addedBy"]
if !ok {
return c, nil
}
addedBy, err := strconv.ParseInt(*addedByData, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(addedBy)
if err != nil {
return c, err
}
*addedByData = a.Nick
newCm.Payload["addedBy"] = addedByData
return newCm, nil
}
func (c *ChannelMessage) PopulateIntegration() (*ChannelMessage, error) { |
func (c *ChannelMessage) PopulateInitialParticipants() (*ChannelMessage, error) {
newCm := NewChannelMessage()
*newCm = *c
initialParticipants, ok := c.Payload["initialParticipants"]
if !ok {
return c, nil
}
var participants []string
err := json.Unmarshal([]byte(*initialParticipants), &participants)
if err != nil {
return c, err
}
accountIds := make([]string, len(participants))
for i, participant := range participants {
accountId, err := strconv.ParseInt(participant, 10, 64)
if err != nil {
return c, err
}
a, err := Cache.Account.ById(accountId)
if err != nil {
return c, err
}
accountIds[i] = a.Nick
}
participantNicks, err := json.Marshal(accountIds)
if err != nil {
return c, err
}
pns := string(participantNicks)
newCm.Payload["initialParticipants"] = &pns
return newCm, nil
}
// FetchParentChannel fetches the parent channel of the message. When
// initial channel is topic, it fetches the group channel, otherwise
// it just fetches the initial channel as parent.
func (cm *ChannelMessage) FetchParentChannel() (*Channel, error) {
c, err := Cache.Channel.ById(cm.InitialChannelId)
if err != nil {
return nil, err
}
if c.TypeConstant != Channel_TYPE_TOPIC {
return c, nil
}
ch, err := Cache.Channel.ByGroupName(c.GroupName)
if err != nil {
return nil, err
}
return ch, nil
}
func (cm *ChannelMessage) SetPayload(key string, value string) {
if cm.Payload == nil {
cm.Payload = gorm.Hstore{}
}
cm.Payload[key] = &value
}
func (cm *ChannelMessage) GetPayload(key string) *string {
if cm.Payload == nil {
return nil
}
val, ok := cm.Payload[key]
if !ok {
return nil
}
return val
}
// SearchIndexable decides if message is indexable on search engine or not
func (c *ChannelMessage) SearchIndexable() bool {
return IsIn(c.TypeConstant,
ChannelMessage_TYPE_POST,
ChannelMessage_TYPE_REPLY,
ChannelMessage_TYPE_PRIVATE_MESSAGE,
)
}
|
newCm := NewChannelMessage()
*newCm = *c
channelIntegration := c.GetPayload(ChannelMessagePayloadKeyIntegration)
if channelIntegration != nil && *channelIntegration != "" {
id, err := strconv.ParseInt(*channelIntegration, 10, 64)
if err != nil {
return c, err
}
i, err := Cache.Integration.ByChannelIntegrationId(id)
if err != nil {
return c, err
}
newCm.SetPayload("integrationTitle", i.Title)
newCm.SetPayload("integrationIconPath", i.IconPath)
return newCm, nil
}
return c, nil
}
| identifier_body |
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
}
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn | (&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(),
axis: self.axis,
})
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape != logits_shape {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
}
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind != i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
}
| clone_with_nodes_changed | identifier_name |
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
}
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(), | })
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape != logits_shape {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
}
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind != i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
} | axis: self.axis, | random_line_split |
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> |
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(),
axis: self.axis,
})
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape != logits_shape {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
}
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind != i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
}
| {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
} | identifier_body |
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
}
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call .build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(),
axis: self.axis,
})
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape != logits_shape |
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind != i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
}
| {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
} | conditional_block |
Image_Video_Download_fromnew_API.py | import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
import csv
# For plotting and visualization:
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
import json
import os
from urllib.request import urlopen ,URLError, HTTPError,Request
import urllib.error
import urllib
import glob
from glob import glob
import argparse
import shutil
import requests
from bs4 import BeautifulSoup
import json
import urllib.parse
import m3u8
from pathlib import Path
import re
import ffmpeg
import os
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def download(video_url):
video_player_url_prefix = 'https://twitter.com/i/videos/tweet/'
video_host = ''
output_dir = './output'
# Parse the tweet ID
video_url = video_url.split('?', 1)[0]
tweet_user = video_url.split('/')[3]
tweet_id = video_url.split('/')[5]
# Grab the video client HTML
video_player_url = video_player_url_prefix + tweet_id
video_player_response = requests.get(video_player_url)
# Get the JS file with the Bearer token to talk to the API.
js_file_soup = BeautifulSoup(video_player_response.text, 'html.parser')
js_file_url = js_file_soup.find('script')['src']
js_file_response = requests.get(js_file_url)
# Pull the bearer token out
bearer_token_pattern = re.compile('Bearer ([a-zA-Z0-9%-])+')
bearer_token = bearer_token_pattern.search(js_file_response.text)
bearer_token = bearer_token.group(0)
# Talk to the API to get the m3u8 URL
api_string = 'https://api.twitter.com/1.1/videos/tweet/config/' + tweet_id + '.json'
player_config = requests.get(api_string, headers={'Authorization': bearer_token})
m3u8_url_get = json.loads(player_config.text)
try:
m3u8_url_get = m3u8_url_get['track']['playbackUrl']
# Get m3u8
m3u8_response = requests.get(m3u8_url_get, headers = {'Authorization': bearer_token})
m3u8_url_parse = urllib.parse.urlparse(m3u8_url_get)
video_host = m3u8_url_parse.scheme + '://' + m3u8_url_parse.hostname
m3u8_parse = m3u8.loads(m3u8_response.text)
if m3u8_parse.is_variant and len(m3u8_parse.playlists) > 1:
playlist = m3u8_parse.playlists[len(m3u8_parse.playlists)-1]
resolution = str(playlist.stream_info.resolution[0]) + 'x' + str(playlist.stream_info.resolution[1])
resolution_file = Path('/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/') / Path(tweet_id + '.mp4')
if not os.path.exists(resolution_file):
# print('[+] Downloading ' + tweet_id)
playlist_url = video_host + playlist.uri
ts_m3u8_response = requests.get(playlist_url)
ts_m3u8_parse = m3u8.loads(ts_m3u8_response.text)
ts_list = []
for ts_uri in ts_m3u8_parse.segments.uri:
ts_list.append(video_host + ts_uri)
# Convert TS to MP4
ts_streams = [ ffmpeg.input(str(_)) for _ in ts_list ]
ffmpeg.concat(*ts_streams).output(str(resolution_file), strict= -2,loglevel='error').overwrite_output().run()
print('[+] Downloaded non embded' + tweet_id)
else :
print ("This video file is already exists")
except ffmpeg.Error as e:
print ("ffmpeg error with this tweet",tweet_id)
print("ffmpeg error")
print(e)
except:
error_response = json.loads(player_config.text)
if error_response['errors'][0]['code'] == 88:
with open('./twitter_video_fails.txt', 'a') as f:
f.write(api_string+"\n")
def get_image_video_url_from_tweet(tweet):
images_and_videos_links =[]
if "retweeted_status" in tweet:
if tweet["retweeted_status"]["truncated"]:
if "extended_tweet" in tweet["retweeted_status"] and 'extended_entities' in tweet["retweeted_status"]['extended_tweet'].keys():
medias = tweet["retweeted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["retweeted_status"] and 'media' in tweet["retweeted_status"]['extended_entities'].keys():
medias = tweet["retweeted_status"]['extended_entities']['media']
else:
medias ="NaN"
elif "quoted_status" in tweet:
if tweet["quoted_status"]["truncated"]:
if "extended_tweet" in tweet["quoted_status"] and 'extended_entities' in tweet["quoted_status"]['extended_tweet'].keys():
medias = tweet["quoted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["quoted_status"] and 'media' in tweet["quoted_status"]['extended_entities'].keys():
medias = tweet["quoted_status"]['extended_entities']['media']
else:
medias ="NaN"
else:
if tweet["truncated"]:
if "extended_tweet" in tweet and 'extended_entities' in tweet['extended_tweet'].keys():
medias = tweet["extended_tweet"]['extended_entities']['media']
else:
medias ="NaN"
else:
if "extended_entities" in tweet and 'media' in tweet['extended_entities'].keys():
medias = tweet['extended_entities']['media']
else:
medias ="NaN"
try:
if medias!= "NaN":
for media in medias:
if media['type'] in {"video" , "animated_gif"}:
videos = media["video_info"]["variants"]
bitrate = 0
index = 0
for i in range(0, len(videos)):
if videos[i]['content_type'] == 'video/mp4':
br = int(videos[i]['bitrate'])
if br > bitrate:
bitrate = br
index = i
images_and_videos_links.append(videos[index]['url'])
elif (media['type'] not in {'video' , "animated_gif"} and media['expanded_url'].split('/')[6] =='video'):
videos = media['expanded_url']
videos_spt =videos.split('/video')[0]
images_and_videos_links.append(videos_spt)
else :
images_url = media["media_url"]
images_and_videos_links.append(images_url)
else:
images_and_videos_links =[]
except AttributeError:
pass
return images_and_videos_links
def download_images_videos_to_local_dir(tweet):
images_and_videos_links =get_image_video_url_from_tweet(tweet)
if "retweeted_status" in tweet.keys():
tweetid = tweet["retweeted_status"]["id"]
print ("This is a retweet",tweetid)
else:
tweetid= tweet["id"]
if not len(images_and_videos_links) ==0:
for count,media_link in enumerate(images_and_videos_links):
print ("This is the media link",media_link)
try:
if os.path.basename(media_link)==str(tweetid):
download(media_link)
elif (os.path.splitext(media_link))[1] in [".jpg",".png"] :
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
image_file_name ='/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_' +str(count) +str((os.path.splitext(media_link))[1])
if not os.path.exists(image_file_name):
with open(image_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded images",tweetid)
else:
print ("This image file is already exists")
else:
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
video_file_name ='./CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_'+ str(count)+'.mp4'
if not os.path.exists(video_file_name):
# print ("Downloading non nonebedded video",tweetid)
with open(video_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded embded video",tweetid)
else :
print ("This video file is already exists")
except urllib.error.URLError as e:
if hasattr(e,'code'):
|
if hasattr(e,'reason'):
print (e.reason)
except urllib.error.HTTPError as e:
if hasattr(e,'code'):
print(e.code)
if hasattr(e,'reason'):
print(e.reason)
print('HTTPError!!!')
def write_tweets(tweets, filename):
''' Function that appends tweets to a file. '''
with open(filename, 'a') as f:
for tweet in tweets:
json.dump(tweet, f)
f.write('\n')
def find_unique_tweets(unique_tweet_ids,all_tweets):
unique_tweets =[]
for current_tweet in all_tweets:
if str(current_tweet['id']) not in unique_tweet_ids:
unique_tweets.append(current_tweet)
return unique_tweets
def collect_tweets(tweet_files):
all_tweets =[]
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
all_tweets.append(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
return all_tweets
if __name__ == '__main__':
tweet_path = "./CSK_Training_Camp/Twitter/"
tweet_files = glob(tweet_path +"/" +"*.json")
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
# tweet_id =(os.path.splitext(line))[0].split("/")[7]
# tweet_remain_ids.append(tweet_id)
# print (line.split("/")[7])
download_images_videos_to_local_dir(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
# unique_tweets = find_unique_tweets(tweet_remain_ids,all_tweets)
# write_tweets(unique_tweets,tweet_remain_filepath)
#https://api.twitter.com/1.1/videos/tweet/config/1041280536646434816.json
| print (e.code) | conditional_block |
Image_Video_Download_fromnew_API.py | import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
import csv
# For plotting and visualization:
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
import json
import os
from urllib.request import urlopen ,URLError, HTTPError,Request
import urllib.error
import urllib
import glob
from glob import glob
import argparse
import shutil
import requests
from bs4 import BeautifulSoup
import json
import urllib.parse
import m3u8
from pathlib import Path
import re
import ffmpeg
import os
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def | (video_url):
video_player_url_prefix = 'https://twitter.com/i/videos/tweet/'
video_host = ''
output_dir = './output'
# Parse the tweet ID
video_url = video_url.split('?', 1)[0]
tweet_user = video_url.split('/')[3]
tweet_id = video_url.split('/')[5]
# Grab the video client HTML
video_player_url = video_player_url_prefix + tweet_id
video_player_response = requests.get(video_player_url)
# Get the JS file with the Bearer token to talk to the API.
js_file_soup = BeautifulSoup(video_player_response.text, 'html.parser')
js_file_url = js_file_soup.find('script')['src']
js_file_response = requests.get(js_file_url)
# Pull the bearer token out
bearer_token_pattern = re.compile('Bearer ([a-zA-Z0-9%-])+')
bearer_token = bearer_token_pattern.search(js_file_response.text)
bearer_token = bearer_token.group(0)
# Talk to the API to get the m3u8 URL
api_string = 'https://api.twitter.com/1.1/videos/tweet/config/' + tweet_id + '.json'
player_config = requests.get(api_string, headers={'Authorization': bearer_token})
m3u8_url_get = json.loads(player_config.text)
try:
m3u8_url_get = m3u8_url_get['track']['playbackUrl']
# Get m3u8
m3u8_response = requests.get(m3u8_url_get, headers = {'Authorization': bearer_token})
m3u8_url_parse = urllib.parse.urlparse(m3u8_url_get)
video_host = m3u8_url_parse.scheme + '://' + m3u8_url_parse.hostname
m3u8_parse = m3u8.loads(m3u8_response.text)
if m3u8_parse.is_variant and len(m3u8_parse.playlists) > 1:
playlist = m3u8_parse.playlists[len(m3u8_parse.playlists)-1]
resolution = str(playlist.stream_info.resolution[0]) + 'x' + str(playlist.stream_info.resolution[1])
resolution_file = Path('/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/') / Path(tweet_id + '.mp4')
if not os.path.exists(resolution_file):
# print('[+] Downloading ' + tweet_id)
playlist_url = video_host + playlist.uri
ts_m3u8_response = requests.get(playlist_url)
ts_m3u8_parse = m3u8.loads(ts_m3u8_response.text)
ts_list = []
for ts_uri in ts_m3u8_parse.segments.uri:
ts_list.append(video_host + ts_uri)
# Convert TS to MP4
ts_streams = [ ffmpeg.input(str(_)) for _ in ts_list ]
ffmpeg.concat(*ts_streams).output(str(resolution_file), strict= -2,loglevel='error').overwrite_output().run()
print('[+] Downloaded non embded' + tweet_id)
else :
print ("This video file is already exists")
except ffmpeg.Error as e:
print ("ffmpeg error with this tweet",tweet_id)
print("ffmpeg error")
print(e)
except:
error_response = json.loads(player_config.text)
if error_response['errors'][0]['code'] == 88:
with open('./twitter_video_fails.txt', 'a') as f:
f.write(api_string+"\n")
def get_image_video_url_from_tweet(tweet):
images_and_videos_links =[]
if "retweeted_status" in tweet:
if tweet["retweeted_status"]["truncated"]:
if "extended_tweet" in tweet["retweeted_status"] and 'extended_entities' in tweet["retweeted_status"]['extended_tweet'].keys():
medias = tweet["retweeted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["retweeted_status"] and 'media' in tweet["retweeted_status"]['extended_entities'].keys():
medias = tweet["retweeted_status"]['extended_entities']['media']
else:
medias ="NaN"
elif "quoted_status" in tweet:
if tweet["quoted_status"]["truncated"]:
if "extended_tweet" in tweet["quoted_status"] and 'extended_entities' in tweet["quoted_status"]['extended_tweet'].keys():
medias = tweet["quoted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["quoted_status"] and 'media' in tweet["quoted_status"]['extended_entities'].keys():
medias = tweet["quoted_status"]['extended_entities']['media']
else:
medias ="NaN"
else:
if tweet["truncated"]:
if "extended_tweet" in tweet and 'extended_entities' in tweet['extended_tweet'].keys():
medias = tweet["extended_tweet"]['extended_entities']['media']
else:
medias ="NaN"
else:
if "extended_entities" in tweet and 'media' in tweet['extended_entities'].keys():
medias = tweet['extended_entities']['media']
else:
medias ="NaN"
try:
if medias!= "NaN":
for media in medias:
if media['type'] in {"video" , "animated_gif"}:
videos = media["video_info"]["variants"]
bitrate = 0
index = 0
for i in range(0, len(videos)):
if videos[i]['content_type'] == 'video/mp4':
br = int(videos[i]['bitrate'])
if br > bitrate:
bitrate = br
index = i
images_and_videos_links.append(videos[index]['url'])
elif (media['type'] not in {'video' , "animated_gif"} and media['expanded_url'].split('/')[6] =='video'):
videos = media['expanded_url']
videos_spt =videos.split('/video')[0]
images_and_videos_links.append(videos_spt)
else :
images_url = media["media_url"]
images_and_videos_links.append(images_url)
else:
images_and_videos_links =[]
except AttributeError:
pass
return images_and_videos_links
def download_images_videos_to_local_dir(tweet):
images_and_videos_links =get_image_video_url_from_tweet(tweet)
if "retweeted_status" in tweet.keys():
tweetid = tweet["retweeted_status"]["id"]
print ("This is a retweet",tweetid)
else:
tweetid= tweet["id"]
if not len(images_and_videos_links) ==0:
for count,media_link in enumerate(images_and_videos_links):
print ("This is the media link",media_link)
try:
if os.path.basename(media_link)==str(tweetid):
download(media_link)
elif (os.path.splitext(media_link))[1] in [".jpg",".png"] :
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
image_file_name ='/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_' +str(count) +str((os.path.splitext(media_link))[1])
if not os.path.exists(image_file_name):
with open(image_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded images",tweetid)
else:
print ("This image file is already exists")
else:
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
video_file_name ='./CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_'+ str(count)+'.mp4'
if not os.path.exists(video_file_name):
# print ("Downloading non nonebedded video",tweetid)
with open(video_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded embded video",tweetid)
else :
print ("This video file is already exists")
except urllib.error.URLError as e:
if hasattr(e,'code'):
print (e.code)
if hasattr(e,'reason'):
print (e.reason)
except urllib.error.HTTPError as e:
if hasattr(e,'code'):
print(e.code)
if hasattr(e,'reason'):
print(e.reason)
print('HTTPError!!!')
def write_tweets(tweets, filename):
''' Function that appends tweets to a file. '''
with open(filename, 'a') as f:
for tweet in tweets:
json.dump(tweet, f)
f.write('\n')
def find_unique_tweets(unique_tweet_ids,all_tweets):
unique_tweets =[]
for current_tweet in all_tweets:
if str(current_tweet['id']) not in unique_tweet_ids:
unique_tweets.append(current_tweet)
return unique_tweets
def collect_tweets(tweet_files):
all_tweets =[]
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
all_tweets.append(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
return all_tweets
if __name__ == '__main__':
tweet_path = "./CSK_Training_Camp/Twitter/"
tweet_files = glob(tweet_path +"/" +"*.json")
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
# tweet_id =(os.path.splitext(line))[0].split("/")[7]
# tweet_remain_ids.append(tweet_id)
# print (line.split("/")[7])
download_images_videos_to_local_dir(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
# unique_tweets = find_unique_tweets(tweet_remain_ids,all_tweets)
# write_tweets(unique_tweets,tweet_remain_filepath)
#https://api.twitter.com/1.1/videos/tweet/config/1041280536646434816.json
| download | identifier_name |
Image_Video_Download_fromnew_API.py | import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
import csv
# For plotting and visualization:
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
import json
import os
from urllib.request import urlopen ,URLError, HTTPError,Request
import urllib.error
import urllib
import glob
from glob import glob
import argparse
import shutil
import requests
from bs4 import BeautifulSoup
import json
import urllib.parse
import m3u8
from pathlib import Path
import re
import ffmpeg
import os
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def download(video_url):
video_player_url_prefix = 'https://twitter.com/i/videos/tweet/'
video_host = ''
output_dir = './output'
# Parse the tweet ID
video_url = video_url.split('?', 1)[0]
tweet_user = video_url.split('/')[3]
tweet_id = video_url.split('/')[5]
# Grab the video client HTML
video_player_url = video_player_url_prefix + tweet_id
video_player_response = requests.get(video_player_url)
# Get the JS file with the Bearer token to talk to the API.
js_file_soup = BeautifulSoup(video_player_response.text, 'html.parser') | js_file_response = requests.get(js_file_url)
# Pull the bearer token out
bearer_token_pattern = re.compile('Bearer ([a-zA-Z0-9%-])+')
bearer_token = bearer_token_pattern.search(js_file_response.text)
bearer_token = bearer_token.group(0)
# Talk to the API to get the m3u8 URL
api_string = 'https://api.twitter.com/1.1/videos/tweet/config/' + tweet_id + '.json'
player_config = requests.get(api_string, headers={'Authorization': bearer_token})
m3u8_url_get = json.loads(player_config.text)
try:
m3u8_url_get = m3u8_url_get['track']['playbackUrl']
# Get m3u8
m3u8_response = requests.get(m3u8_url_get, headers = {'Authorization': bearer_token})
m3u8_url_parse = urllib.parse.urlparse(m3u8_url_get)
video_host = m3u8_url_parse.scheme + '://' + m3u8_url_parse.hostname
m3u8_parse = m3u8.loads(m3u8_response.text)
if m3u8_parse.is_variant and len(m3u8_parse.playlists) > 1:
playlist = m3u8_parse.playlists[len(m3u8_parse.playlists)-1]
resolution = str(playlist.stream_info.resolution[0]) + 'x' + str(playlist.stream_info.resolution[1])
resolution_file = Path('/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/') / Path(tweet_id + '.mp4')
if not os.path.exists(resolution_file):
# print('[+] Downloading ' + tweet_id)
playlist_url = video_host + playlist.uri
ts_m3u8_response = requests.get(playlist_url)
ts_m3u8_parse = m3u8.loads(ts_m3u8_response.text)
ts_list = []
for ts_uri in ts_m3u8_parse.segments.uri:
ts_list.append(video_host + ts_uri)
# Convert TS to MP4
ts_streams = [ ffmpeg.input(str(_)) for _ in ts_list ]
ffmpeg.concat(*ts_streams).output(str(resolution_file), strict= -2,loglevel='error').overwrite_output().run()
print('[+] Downloaded non embded' + tweet_id)
else :
print ("This video file is already exists")
except ffmpeg.Error as e:
print ("ffmpeg error with this tweet",tweet_id)
print("ffmpeg error")
print(e)
except:
error_response = json.loads(player_config.text)
if error_response['errors'][0]['code'] == 88:
with open('./twitter_video_fails.txt', 'a') as f:
f.write(api_string+"\n")
def get_image_video_url_from_tweet(tweet):
images_and_videos_links =[]
if "retweeted_status" in tweet:
if tweet["retweeted_status"]["truncated"]:
if "extended_tweet" in tweet["retweeted_status"] and 'extended_entities' in tweet["retweeted_status"]['extended_tweet'].keys():
medias = tweet["retweeted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["retweeted_status"] and 'media' in tweet["retweeted_status"]['extended_entities'].keys():
medias = tweet["retweeted_status"]['extended_entities']['media']
else:
medias ="NaN"
elif "quoted_status" in tweet:
if tweet["quoted_status"]["truncated"]:
if "extended_tweet" in tweet["quoted_status"] and 'extended_entities' in tweet["quoted_status"]['extended_tweet'].keys():
medias = tweet["quoted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["quoted_status"] and 'media' in tweet["quoted_status"]['extended_entities'].keys():
medias = tweet["quoted_status"]['extended_entities']['media']
else:
medias ="NaN"
else:
if tweet["truncated"]:
if "extended_tweet" in tweet and 'extended_entities' in tweet['extended_tweet'].keys():
medias = tweet["extended_tweet"]['extended_entities']['media']
else:
medias ="NaN"
else:
if "extended_entities" in tweet and 'media' in tweet['extended_entities'].keys():
medias = tweet['extended_entities']['media']
else:
medias ="NaN"
try:
if medias!= "NaN":
for media in medias:
if media['type'] in {"video" , "animated_gif"}:
videos = media["video_info"]["variants"]
bitrate = 0
index = 0
for i in range(0, len(videos)):
if videos[i]['content_type'] == 'video/mp4':
br = int(videos[i]['bitrate'])
if br > bitrate:
bitrate = br
index = i
images_and_videos_links.append(videos[index]['url'])
elif (media['type'] not in {'video' , "animated_gif"} and media['expanded_url'].split('/')[6] =='video'):
videos = media['expanded_url']
videos_spt =videos.split('/video')[0]
images_and_videos_links.append(videos_spt)
else :
images_url = media["media_url"]
images_and_videos_links.append(images_url)
else:
images_and_videos_links =[]
except AttributeError:
pass
return images_and_videos_links
def download_images_videos_to_local_dir(tweet):
images_and_videos_links =get_image_video_url_from_tweet(tweet)
if "retweeted_status" in tweet.keys():
tweetid = tweet["retweeted_status"]["id"]
print ("This is a retweet",tweetid)
else:
tweetid= tweet["id"]
if not len(images_and_videos_links) ==0:
for count,media_link in enumerate(images_and_videos_links):
print ("This is the media link",media_link)
try:
if os.path.basename(media_link)==str(tweetid):
download(media_link)
elif (os.path.splitext(media_link))[1] in [".jpg",".png"] :
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
image_file_name ='/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_' +str(count) +str((os.path.splitext(media_link))[1])
if not os.path.exists(image_file_name):
with open(image_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded images",tweetid)
else:
print ("This image file is already exists")
else:
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
video_file_name ='./CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_'+ str(count)+'.mp4'
if not os.path.exists(video_file_name):
# print ("Downloading non nonebedded video",tweetid)
with open(video_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded embded video",tweetid)
else :
print ("This video file is already exists")
except urllib.error.URLError as e:
if hasattr(e,'code'):
print (e.code)
if hasattr(e,'reason'):
print (e.reason)
except urllib.error.HTTPError as e:
if hasattr(e,'code'):
print(e.code)
if hasattr(e,'reason'):
print(e.reason)
print('HTTPError!!!')
def write_tweets(tweets, filename):
''' Function that appends tweets to a file. '''
with open(filename, 'a') as f:
for tweet in tweets:
json.dump(tweet, f)
f.write('\n')
def find_unique_tweets(unique_tweet_ids,all_tweets):
unique_tweets =[]
for current_tweet in all_tweets:
if str(current_tweet['id']) not in unique_tweet_ids:
unique_tweets.append(current_tweet)
return unique_tweets
def collect_tweets(tweet_files):
all_tweets =[]
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
all_tweets.append(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
return all_tweets
if __name__ == '__main__':
tweet_path = "./CSK_Training_Camp/Twitter/"
tweet_files = glob(tweet_path +"/" +"*.json")
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
# tweet_id =(os.path.splitext(line))[0].split("/")[7]
# tweet_remain_ids.append(tweet_id)
# print (line.split("/")[7])
download_images_videos_to_local_dir(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
# unique_tweets = find_unique_tweets(tweet_remain_ids,all_tweets)
# write_tweets(unique_tweets,tweet_remain_filepath)
#https://api.twitter.com/1.1/videos/tweet/config/1041280536646434816.json | js_file_url = js_file_soup.find('script')['src'] | random_line_split |
Image_Video_Download_fromnew_API.py | import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
import csv
# For plotting and visualization:
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
import json
import os
from urllib.request import urlopen ,URLError, HTTPError,Request
import urllib.error
import urllib
import glob
from glob import glob
import argparse
import shutil
import requests
from bs4 import BeautifulSoup
import json
import urllib.parse
import m3u8
from pathlib import Path
import re
import ffmpeg
import os
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def download(video_url):
video_player_url_prefix = 'https://twitter.com/i/videos/tweet/'
video_host = ''
output_dir = './output'
# Parse the tweet ID
video_url = video_url.split('?', 1)[0]
tweet_user = video_url.split('/')[3]
tweet_id = video_url.split('/')[5]
# Grab the video client HTML
video_player_url = video_player_url_prefix + tweet_id
video_player_response = requests.get(video_player_url)
# Get the JS file with the Bearer token to talk to the API.
js_file_soup = BeautifulSoup(video_player_response.text, 'html.parser')
js_file_url = js_file_soup.find('script')['src']
js_file_response = requests.get(js_file_url)
# Pull the bearer token out
bearer_token_pattern = re.compile('Bearer ([a-zA-Z0-9%-])+')
bearer_token = bearer_token_pattern.search(js_file_response.text)
bearer_token = bearer_token.group(0)
# Talk to the API to get the m3u8 URL
api_string = 'https://api.twitter.com/1.1/videos/tweet/config/' + tweet_id + '.json'
player_config = requests.get(api_string, headers={'Authorization': bearer_token})
m3u8_url_get = json.loads(player_config.text)
try:
m3u8_url_get = m3u8_url_get['track']['playbackUrl']
# Get m3u8
m3u8_response = requests.get(m3u8_url_get, headers = {'Authorization': bearer_token})
m3u8_url_parse = urllib.parse.urlparse(m3u8_url_get)
video_host = m3u8_url_parse.scheme + '://' + m3u8_url_parse.hostname
m3u8_parse = m3u8.loads(m3u8_response.text)
if m3u8_parse.is_variant and len(m3u8_parse.playlists) > 1:
playlist = m3u8_parse.playlists[len(m3u8_parse.playlists)-1]
resolution = str(playlist.stream_info.resolution[0]) + 'x' + str(playlist.stream_info.resolution[1])
resolution_file = Path('/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/') / Path(tweet_id + '.mp4')
if not os.path.exists(resolution_file):
# print('[+] Downloading ' + tweet_id)
playlist_url = video_host + playlist.uri
ts_m3u8_response = requests.get(playlist_url)
ts_m3u8_parse = m3u8.loads(ts_m3u8_response.text)
ts_list = []
for ts_uri in ts_m3u8_parse.segments.uri:
ts_list.append(video_host + ts_uri)
# Convert TS to MP4
ts_streams = [ ffmpeg.input(str(_)) for _ in ts_list ]
ffmpeg.concat(*ts_streams).output(str(resolution_file), strict= -2,loglevel='error').overwrite_output().run()
print('[+] Downloaded non embded' + tweet_id)
else :
print ("This video file is already exists")
except ffmpeg.Error as e:
print ("ffmpeg error with this tweet",tweet_id)
print("ffmpeg error")
print(e)
except:
error_response = json.loads(player_config.text)
if error_response['errors'][0]['code'] == 88:
with open('./twitter_video_fails.txt', 'a') as f:
f.write(api_string+"\n")
def get_image_video_url_from_tweet(tweet):
images_and_videos_links =[]
if "retweeted_status" in tweet:
if tweet["retweeted_status"]["truncated"]:
if "extended_tweet" in tweet["retweeted_status"] and 'extended_entities' in tweet["retweeted_status"]['extended_tweet'].keys():
medias = tweet["retweeted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["retweeted_status"] and 'media' in tweet["retweeted_status"]['extended_entities'].keys():
medias = tweet["retweeted_status"]['extended_entities']['media']
else:
medias ="NaN"
elif "quoted_status" in tweet:
if tweet["quoted_status"]["truncated"]:
if "extended_tweet" in tweet["quoted_status"] and 'extended_entities' in tweet["quoted_status"]['extended_tweet'].keys():
medias = tweet["quoted_status"]["extended_tweet"]['extended_entities']['media']
else:
medias="NaN"
else:
if "extended_entities" in tweet["quoted_status"] and 'media' in tweet["quoted_status"]['extended_entities'].keys():
medias = tweet["quoted_status"]['extended_entities']['media']
else:
medias ="NaN"
else:
if tweet["truncated"]:
if "extended_tweet" in tweet and 'extended_entities' in tweet['extended_tweet'].keys():
medias = tweet["extended_tweet"]['extended_entities']['media']
else:
medias ="NaN"
else:
if "extended_entities" in tweet and 'media' in tweet['extended_entities'].keys():
medias = tweet['extended_entities']['media']
else:
medias ="NaN"
try:
if medias!= "NaN":
for media in medias:
if media['type'] in {"video" , "animated_gif"}:
videos = media["video_info"]["variants"]
bitrate = 0
index = 0
for i in range(0, len(videos)):
if videos[i]['content_type'] == 'video/mp4':
br = int(videos[i]['bitrate'])
if br > bitrate:
bitrate = br
index = i
images_and_videos_links.append(videos[index]['url'])
elif (media['type'] not in {'video' , "animated_gif"} and media['expanded_url'].split('/')[6] =='video'):
videos = media['expanded_url']
videos_spt =videos.split('/video')[0]
images_and_videos_links.append(videos_spt)
else :
images_url = media["media_url"]
images_and_videos_links.append(images_url)
else:
images_and_videos_links =[]
except AttributeError:
pass
return images_and_videos_links
def download_images_videos_to_local_dir(tweet):
images_and_videos_links =get_image_video_url_from_tweet(tweet)
if "retweeted_status" in tweet.keys():
tweetid = tweet["retweeted_status"]["id"]
print ("This is a retweet",tweetid)
else:
tweetid= tweet["id"]
if not len(images_and_videos_links) ==0:
for count,media_link in enumerate(images_and_videos_links):
print ("This is the media link",media_link)
try:
if os.path.basename(media_link)==str(tweetid):
download(media_link)
elif (os.path.splitext(media_link))[1] in [".jpg",".png"] :
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
image_file_name ='/media/nano/Nanoyotta/Maplytiks_Social/Events/CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_' +str(count) +str((os.path.splitext(media_link))[1])
if not os.path.exists(image_file_name):
with open(image_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded images",tweetid)
else:
print ("This image file is already exists")
else:
req = Request(media_link ,headers=headers)
rsp = urlopen(req)
video_file_name ='./CSK_Training_Camp/Twitter/'+'/' + str(tweetid)+ '_'+ str(count)+'.mp4'
if not os.path.exists(video_file_name):
# print ("Downloading non nonebedded video",tweetid)
with open(video_file_name,'wb') as f:
f.write(rsp.read())
print ("Downloaded embded video",tweetid)
else :
print ("This video file is already exists")
except urllib.error.URLError as e:
if hasattr(e,'code'):
print (e.code)
if hasattr(e,'reason'):
print (e.reason)
except urllib.error.HTTPError as e:
if hasattr(e,'code'):
print(e.code)
if hasattr(e,'reason'):
print(e.reason)
print('HTTPError!!!')
def write_tweets(tweets, filename):
|
def find_unique_tweets(unique_tweet_ids,all_tweets):
unique_tweets =[]
for current_tweet in all_tweets:
if str(current_tweet['id']) not in unique_tweet_ids:
unique_tweets.append(current_tweet)
return unique_tweets
def collect_tweets(tweet_files):
all_tweets =[]
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
all_tweets.append(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
return all_tweets
if __name__ == '__main__':
tweet_path = "./CSK_Training_Camp/Twitter/"
tweet_files = glob(tweet_path +"/" +"*.json")
for file in tweet_files:
with open(file, 'r') as f:
for j ,line in enumerate(f.readlines()):
try:
# tweet_id =(os.path.splitext(line))[0].split("/")[7]
# tweet_remain_ids.append(tweet_id)
# print (line.split("/")[7])
download_images_videos_to_local_dir(json.loads(line))
except ValueError:
print ("{} failed the JSON load".format(line))
# unique_tweets = find_unique_tweets(tweet_remain_ids,all_tweets)
# write_tweets(unique_tweets,tweet_remain_filepath)
#https://api.twitter.com/1.1/videos/tweet/config/1041280536646434816.json
| ''' Function that appends tweets to a file. '''
with open(filename, 'a') as f:
for tweet in tweets:
json.dump(tweet, f)
f.write('\n') | identifier_body |
main.go | package main
import (
"bufio"
"flag"
"fmt"
"github.com/manifoldco/promptui"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/vsteffen/42_api/reqAPI42"
"github.com/vsteffen/42_api/tools"
cst "github.com/vsteffen/42_api/tools/constants"
"os"
"regexp"
"time"
)
type projectParent struct {
this *reqAPI42.API42ProjectParent
childs []*reqAPI42.API42Project
}
type projectsPerType struct {
parents map[uint]*projectParent
directs []*reqAPI42.API42Project
}
func askStringClean(askStr string) string {
fmt.Print(askStr)
scannerStdin := bufio.NewScanner(os.Stdin)
scannerStdin.Scan()
if err := scannerStdin.Err(); err != nil {
log.Fatal().Err(err).Msg("askString: Failed to read user input")
}
regexWhitespace := regexp.MustCompile(`\s+`)
str := regexWhitespace.ReplaceAllString(scannerStdin.Text(), " ")
return str
}
func findProjectName(searchStr string, projects *[]*reqAPI42.API42Project) ([]*reqAPI42.API42Project, []string, bool) {
matchProjects := make([]*reqAPI42.API42Project, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *projects {
currentCost := tools.EditDistance(searchStr, project.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchProjects[0] = (*projects)[indexProject]
return matchProjects, []string{project.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchProjects[indexMatchCost+1:], matchProjects[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchProjects[indexMatchCost] = (*projects)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
}
}
}
}
matchStrings := make([]string, 0)
for _, project := range matchProjects {
if project == nil {
break
}
matchStrings = append(matchStrings, project.Name)
}
return matchProjects, matchStrings, false
}
func findProjectParentName(searchStr string, parents *map[uint]*projectParent) ([]*projectParent, []string, bool) {
matchParent := make([]*projectParent, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *parents {
currentCost := tools.EditDistance(searchStr, project.this.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchParent[0] = (*parents)[indexProject]
return matchParent, []string{project.this.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchParent[indexMatchCost+1:], matchParent[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchParent[indexMatchCost] = (*parents)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
}
}
}
}
matchStrings := make([]string, 0)
for _, parent := range matchParent {
matchStrings = append(matchStrings, parent.this.Name)
}
return matchParent, matchStrings, false
}
func getIndexNameChoice(items []string) int {
items = append(items, "Cancel")
prompt := promptui.Select{
Label: "Found these projects name. Choose or cancel",
Items: items,
HideHelp: true,
}
indexProjectFind, _, err := prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("PromptUI: failed")
}
if indexProjectFind == cst.FindNameMaxResults {
return -1
}
return indexProjectFind
}
func findExaminer(api42 *reqAPI42.API42, allProjects *projectsPerType, usersLogged *map[uint]*reqAPI42.API42Location, usersLvl21 *[]*reqAPI42.API42User) {
if allProjects == nil {
log.Error().Msg("Prompt: list of projects empty")
return
}
if usersLogged == nil {
log.Error().Msg("Prompt: map of users logged empty")
return
}
prompt := promptui.Select{
Label: "Does your project have a parent",
Items: []string{"Yes", "No"},
HideHelp: true,
}
indexAction, _, err := prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("PromptUI: failed")
}
var realProjectsToSearch *[]*reqAPI42.API42Project
if indexAction == 0 {
parentProjectName := askStringClean("Please, enter the parent project name: ")
parentFind, parentsFindNames, fullMatch := findProjectParentName(parentProjectName, &allProjects.parents)
if fullMatch {
realProjectsToSearch = &(parentFind[0].childs)
} else {
indexChoose := getIndexNameChoice(parentsFindNames)
if indexChoose == -1 {
return
}
realProjectsToSearch = &(parentFind[indexChoose].childs)
}
} else {
realProjectsToSearch = &allProjects.directs
}
projectName := askStringClean("Please, enter the project name: ")
projectsFind, projectsFindNames, fullMatch := findProjectName(projectName, realProjectsToSearch)
var projectSelected *reqAPI42.API42Project
if fullMatch {
projectSelected = projectsFind[0]
} else {
indexChoose := getIndexNameChoice(projectsFindNames)
if indexChoose == -1 {
return
}
projectSelected = projectsFind[indexChoose]
}
projectsUsers := api42.GetUsersOfProjectsUsers((*projectSelected).ID)
if projectsUsers == nil {
return
}
var i uint = 1
if projectsUsers != nil && len(*projectsUsers) > 0 {
fmt.Println("Users which did or are doing \"" + (*projectSelected).Name + "\":")
for _, projectsUsers := range *projectsUsers {
if examinerLogged, ok := (*usersLogged)[projectsUsers.User.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", i, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
i++
}
}
if i == 1 {
fmt.Println("No user which did or is doing \"" + (*projectSelected).Name + "\" is logged in")
}
}
var j uint = 1
if usersLvl21 != nil {
fmt.Println("Users level 21:")
for _, userLvl21 := range *usersLvl21 {
if examinerLogged, ok := (*usersLogged)[userLvl21.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", j, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
j++
}
}
if j == 1 {
fmt.Println("No user level21 is logged in")
}
}
if i + j == 2 {
log.Error().Msg("findExaminer: no examiner available")
}
}
func sortProjectsPerType(api42Projects *[]reqAPI42.API42Project) *projectsPerType {
if api42Projects == nil {
return nil
}
var allProjects projectsPerType
allProjects.parents = make(map[uint]*projectParent)
allProjects.directs = make([]*reqAPI42.API42Project, 0)
for index, project := range *api42Projects {
if project.Parent == nil {
allProjects.directs = append(allProjects.directs, &(*api42Projects)[index])
} else {
projectDeref := (*api42Projects)[index]
if parentMapValue, ok := allProjects.parents[projectDeref.Parent.ID]; !ok {
allProjects.parents[projectDeref.Parent.ID] = &projectParent{projectDeref.Parent, []*reqAPI42.API42Project{&(*api42Projects)[index]}}
} else {
parentMapValue.childs = append(parentMapValue.childs, &(*api42Projects)[index])
}
}
}
return &allProjects
}
func locationsToUsersIDMap(locations *[]reqAPI42.API42Location, me *reqAPI42.API42User) *map[uint]*reqAPI42.API42Location |
func filterLvl21InCursusUser(cursusUsers *[]reqAPI42.API42CursusUser) *[]*reqAPI42.API42User {
if cursusUsers == nil {
return nil
}
usersLvl21 := make([]*reqAPI42.API42User, 0)
for index, cursusUser := range *cursusUsers {
if cursusUser.Level >= 21.0 {
usersLvl21 = append(usersLvl21, &(*cursusUsers)[index].User)
}
}
return &usersLvl21
}
func debugPrintProjectsPerType(allProjects *projectsPerType) {
fmt.Println("###################################")
for _, parent := range allProjects.parents {
fmt.Println(parent.this.Name)
for _, son := range parent.childs {
fmt.Println("-> " + son.Name)
}
fmt.Println("----------------")
}
fmt.Println("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
for _, direct := range allProjects.directs {
fmt.Println(direct.Name)
}
fmt.Println("###################################")
}
func main() {
flags := []interface{}{}
flags = append(flags, flag.Bool("refresh", false, "force to refresh token"))
flags = append(flags, flag.Bool("check-default-values", false, "send a request to verify the default values"))
flags = append(flags, flag.Bool("no-check-lvl21", false, "don't send a request (send by default) to check users lvl 21 logged in"))
flag.Parse()
nonFlags := flag.Args()
if len(nonFlags) > 0 {
flag.Usage()
os.Exit(1)
}
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.Stamp})
fmt.Print(cst.MenuHello)
api42 := reqAPI42.New(flags)
allProjects := sortProjectsPerType(api42.GetProjects())
usersLogged := locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
var usersLvl21 *[]*reqAPI42.API42User
if *flags[2].(*bool) == false {
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
}
var indexAction int
var err error
menuActions := []string{
cst.MenuActionFind,
cst.MenuActionUpdateLocations,
cst.MenuActionUpdateProjects,
cst.MenuActionUpdateUsersLvl21,
cst.MenuActionUpdateCursus,
cst.MenuActionUpdateCampus,
cst.MenuActionRefreshTokens,
cst.MenuActionQuit,
}
for {
prompt := promptui.Select{
Label: "Choose an action",
Items: menuActions,
HideHelp: true,
}
indexAction, _, err = prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("Prompt: failed")
}
switch menuActions[indexAction] {
case cst.MenuActionFind:
findExaminer(api42, allProjects, usersLogged, usersLvl21)
case cst.MenuActionUpdateLocations:
usersLogged = locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
case cst.MenuActionUpdateProjects:
allProjects = sortProjectsPerType(api42.GetProjects())
case cst.MenuActionUpdateUsersLvl21:
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
case cst.MenuActionUpdateCursus:
cursusName := askStringClean("Please, enter the cursus name: ")
api42.UpdateCursus(cursusName)
case cst.MenuActionUpdateCampus:
campusName := askStringClean("Please, enter the campus name: ")
api42.UpdateCampus(campusName)
case cst.MenuActionRefreshTokens:
api42.RefreshToken()
case cst.MenuActionQuit:
fmt.Println("Goodbye!")
os.Exit(0)
default:
log.Fatal().Msg("Prompt: indexAction out of bound")
}
}
}
| {
if locations == nil {
return nil
}
usersLogged := make(map[uint]*reqAPI42.API42Location)
for index := range *locations {
if (*locations)[index].User.ID == me.ID {
continue
}
usersLogged[(*locations)[index].User.ID] = &(*locations)[index]
}
return &usersLogged
} | identifier_body |
main.go | package main
import (
"bufio"
"flag"
"fmt"
"github.com/manifoldco/promptui"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/vsteffen/42_api/reqAPI42"
"github.com/vsteffen/42_api/tools"
cst "github.com/vsteffen/42_api/tools/constants"
"os"
"regexp"
"time"
)
type projectParent struct {
this *reqAPI42.API42ProjectParent
childs []*reqAPI42.API42Project
}
type projectsPerType struct {
parents map[uint]*projectParent
directs []*reqAPI42.API42Project
}
func askStringClean(askStr string) string {
fmt.Print(askStr)
scannerStdin := bufio.NewScanner(os.Stdin)
scannerStdin.Scan()
if err := scannerStdin.Err(); err != nil {
log.Fatal().Err(err).Msg("askString: Failed to read user input")
}
regexWhitespace := regexp.MustCompile(`\s+`)
str := regexWhitespace.ReplaceAllString(scannerStdin.Text(), " ")
return str
}
func findProjectName(searchStr string, projects *[]*reqAPI42.API42Project) ([]*reqAPI42.API42Project, []string, bool) {
matchProjects := make([]*reqAPI42.API42Project, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *projects {
currentCost := tools.EditDistance(searchStr, project.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchProjects[0] = (*projects)[indexProject]
return matchProjects, []string{project.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchProjects[indexMatchCost+1:], matchProjects[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchProjects[indexMatchCost] = (*projects)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
}
}
}
}
matchStrings := make([]string, 0)
for _, project := range matchProjects {
if project == nil {
break
}
matchStrings = append(matchStrings, project.Name)
}
return matchProjects, matchStrings, false
}
func findProjectParentName(searchStr string, parents *map[uint]*projectParent) ([]*projectParent, []string, bool) {
matchParent := make([]*projectParent, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *parents {
currentCost := tools.EditDistance(searchStr, project.this.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchParent[0] = (*parents)[indexProject]
return matchParent, []string{project.this.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost |
}
}
}
matchStrings := make([]string, 0)
for _, parent := range matchParent {
matchStrings = append(matchStrings, parent.this.Name)
}
return matchParent, matchStrings, false
}
func getIndexNameChoice(items []string) int {
items = append(items, "Cancel")
prompt := promptui.Select{
Label: "Found these projects name. Choose or cancel",
Items: items,
HideHelp: true,
}
indexProjectFind, _, err := prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("PromptUI: failed")
}
if indexProjectFind == cst.FindNameMaxResults {
return -1
}
return indexProjectFind
}
func findExaminer(api42 *reqAPI42.API42, allProjects *projectsPerType, usersLogged *map[uint]*reqAPI42.API42Location, usersLvl21 *[]*reqAPI42.API42User) {
if allProjects == nil {
log.Error().Msg("Prompt: list of projects empty")
return
}
if usersLogged == nil {
log.Error().Msg("Prompt: map of users logged empty")
return
}
prompt := promptui.Select{
Label: "Does your project have a parent",
Items: []string{"Yes", "No"},
HideHelp: true,
}
indexAction, _, err := prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("PromptUI: failed")
}
var realProjectsToSearch *[]*reqAPI42.API42Project
if indexAction == 0 {
parentProjectName := askStringClean("Please, enter the parent project name: ")
parentFind, parentsFindNames, fullMatch := findProjectParentName(parentProjectName, &allProjects.parents)
if fullMatch {
realProjectsToSearch = &(parentFind[0].childs)
} else {
indexChoose := getIndexNameChoice(parentsFindNames)
if indexChoose == -1 {
return
}
realProjectsToSearch = &(parentFind[indexChoose].childs)
}
} else {
realProjectsToSearch = &allProjects.directs
}
projectName := askStringClean("Please, enter the project name: ")
projectsFind, projectsFindNames, fullMatch := findProjectName(projectName, realProjectsToSearch)
var projectSelected *reqAPI42.API42Project
if fullMatch {
projectSelected = projectsFind[0]
} else {
indexChoose := getIndexNameChoice(projectsFindNames)
if indexChoose == -1 {
return
}
projectSelected = projectsFind[indexChoose]
}
projectsUsers := api42.GetUsersOfProjectsUsers((*projectSelected).ID)
if projectsUsers == nil {
return
}
var i uint = 1
if projectsUsers != nil && len(*projectsUsers) > 0 {
fmt.Println("Users which did or are doing \"" + (*projectSelected).Name + "\":")
for _, projectsUsers := range *projectsUsers {
if examinerLogged, ok := (*usersLogged)[projectsUsers.User.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", i, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
i++
}
}
if i == 1 {
fmt.Println("No user which did or is doing \"" + (*projectSelected).Name + "\" is logged in")
}
}
var j uint = 1
if usersLvl21 != nil {
fmt.Println("Users level 21:")
for _, userLvl21 := range *usersLvl21 {
if examinerLogged, ok := (*usersLogged)[userLvl21.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", j, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
j++
}
}
if j == 1 {
fmt.Println("No user level21 is logged in")
}
}
if i + j == 2 {
log.Error().Msg("findExaminer: no examiner available")
}
}
func sortProjectsPerType(api42Projects *[]reqAPI42.API42Project) *projectsPerType {
if api42Projects == nil {
return nil
}
var allProjects projectsPerType
allProjects.parents = make(map[uint]*projectParent)
allProjects.directs = make([]*reqAPI42.API42Project, 0)
for index, project := range *api42Projects {
if project.Parent == nil {
allProjects.directs = append(allProjects.directs, &(*api42Projects)[index])
} else {
projectDeref := (*api42Projects)[index]
if parentMapValue, ok := allProjects.parents[projectDeref.Parent.ID]; !ok {
allProjects.parents[projectDeref.Parent.ID] = &projectParent{projectDeref.Parent, []*reqAPI42.API42Project{&(*api42Projects)[index]}}
} else {
parentMapValue.childs = append(parentMapValue.childs, &(*api42Projects)[index])
}
}
}
return &allProjects
}
func locationsToUsersIDMap(locations *[]reqAPI42.API42Location, me *reqAPI42.API42User) *map[uint]*reqAPI42.API42Location {
if locations == nil {
return nil
}
usersLogged := make(map[uint]*reqAPI42.API42Location)
for index := range *locations {
if (*locations)[index].User.ID == me.ID {
continue
}
usersLogged[(*locations)[index].User.ID] = &(*locations)[index]
}
return &usersLogged
}
func filterLvl21InCursusUser(cursusUsers *[]reqAPI42.API42CursusUser) *[]*reqAPI42.API42User {
if cursusUsers == nil {
return nil
}
usersLvl21 := make([]*reqAPI42.API42User, 0)
for index, cursusUser := range *cursusUsers {
if cursusUser.Level >= 21.0 {
usersLvl21 = append(usersLvl21, &(*cursusUsers)[index].User)
}
}
return &usersLvl21
}
func debugPrintProjectsPerType(allProjects *projectsPerType) {
fmt.Println("###################################")
for _, parent := range allProjects.parents {
fmt.Println(parent.this.Name)
for _, son := range parent.childs {
fmt.Println("-> " + son.Name)
}
fmt.Println("----------------")
}
fmt.Println("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
for _, direct := range allProjects.directs {
fmt.Println(direct.Name)
}
fmt.Println("###################################")
}
func main() {
flags := []interface{}{}
flags = append(flags, flag.Bool("refresh", false, "force to refresh token"))
flags = append(flags, flag.Bool("check-default-values", false, "send a request to verify the default values"))
flags = append(flags, flag.Bool("no-check-lvl21", false, "don't send a request (send by default) to check users lvl 21 logged in"))
flag.Parse()
nonFlags := flag.Args()
if len(nonFlags) > 0 {
flag.Usage()
os.Exit(1)
}
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.Stamp})
fmt.Print(cst.MenuHello)
api42 := reqAPI42.New(flags)
allProjects := sortProjectsPerType(api42.GetProjects())
usersLogged := locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
var usersLvl21 *[]*reqAPI42.API42User
if *flags[2].(*bool) == false {
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
}
var indexAction int
var err error
menuActions := []string{
cst.MenuActionFind,
cst.MenuActionUpdateLocations,
cst.MenuActionUpdateProjects,
cst.MenuActionUpdateUsersLvl21,
cst.MenuActionUpdateCursus,
cst.MenuActionUpdateCampus,
cst.MenuActionRefreshTokens,
cst.MenuActionQuit,
}
for {
prompt := promptui.Select{
Label: "Choose an action",
Items: menuActions,
HideHelp: true,
}
indexAction, _, err = prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("Prompt: failed")
}
switch menuActions[indexAction] {
case cst.MenuActionFind:
findExaminer(api42, allProjects, usersLogged, usersLvl21)
case cst.MenuActionUpdateLocations:
usersLogged = locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
case cst.MenuActionUpdateProjects:
allProjects = sortProjectsPerType(api42.GetProjects())
case cst.MenuActionUpdateUsersLvl21:
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
case cst.MenuActionUpdateCursus:
cursusName := askStringClean("Please, enter the cursus name: ")
api42.UpdateCursus(cursusName)
case cst.MenuActionUpdateCampus:
campusName := askStringClean("Please, enter the campus name: ")
api42.UpdateCampus(campusName)
case cst.MenuActionRefreshTokens:
api42.RefreshToken()
case cst.MenuActionQuit:
fmt.Println("Goodbye!")
os.Exit(0)
default:
log.Fatal().Msg("Prompt: indexAction out of bound")
}
}
}
| {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchParent[indexMatchCost+1:], matchParent[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchParent[indexMatchCost] = (*parents)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
} | conditional_block |
main.go | package main
import (
"bufio"
"flag"
"fmt"
"github.com/manifoldco/promptui"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/vsteffen/42_api/reqAPI42"
"github.com/vsteffen/42_api/tools"
cst "github.com/vsteffen/42_api/tools/constants"
"os"
"regexp"
"time"
)
type projectParent struct {
this *reqAPI42.API42ProjectParent
childs []*reqAPI42.API42Project
}
type projectsPerType struct {
parents map[uint]*projectParent
directs []*reqAPI42.API42Project
}
func askStringClean(askStr string) string {
fmt.Print(askStr)
scannerStdin := bufio.NewScanner(os.Stdin)
scannerStdin.Scan()
if err := scannerStdin.Err(); err != nil {
log.Fatal().Err(err).Msg("askString: Failed to read user input")
}
regexWhitespace := regexp.MustCompile(`\s+`)
str := regexWhitespace.ReplaceAllString(scannerStdin.Text(), " ")
return str
}
func findProjectName(searchStr string, projects *[]*reqAPI42.API42Project) ([]*reqAPI42.API42Project, []string, bool) {
matchProjects := make([]*reqAPI42.API42Project, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *projects {
currentCost := tools.EditDistance(searchStr, project.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchProjects[0] = (*projects)[indexProject]
return matchProjects, []string{project.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchProjects[indexMatchCost+1:], matchProjects[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchProjects[indexMatchCost] = (*projects)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
}
}
}
}
matchStrings := make([]string, 0)
for _, project := range matchProjects {
if project == nil {
break
}
matchStrings = append(matchStrings, project.Name)
}
return matchProjects, matchStrings, false
}
func findProjectParentName(searchStr string, parents *map[uint]*projectParent) ([]*projectParent, []string, bool) {
matchParent := make([]*projectParent, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *parents {
currentCost := tools.EditDistance(searchStr, project.this.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchParent[0] = (*parents)[indexProject]
return matchParent, []string{project.this.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchParent[indexMatchCost+1:], matchParent[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchParent[indexMatchCost] = (*parents)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
}
}
}
}
matchStrings := make([]string, 0)
for _, parent := range matchParent {
matchStrings = append(matchStrings, parent.this.Name)
}
return matchParent, matchStrings, false
}
func getIndexNameChoice(items []string) int {
items = append(items, "Cancel")
prompt := promptui.Select{
Label: "Found these projects name. Choose or cancel",
Items: items,
HideHelp: true,
}
indexProjectFind, _, err := prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("PromptUI: failed")
}
if indexProjectFind == cst.FindNameMaxResults {
return -1
}
return indexProjectFind
}
func findExaminer(api42 *reqAPI42.API42, allProjects *projectsPerType, usersLogged *map[uint]*reqAPI42.API42Location, usersLvl21 *[]*reqAPI42.API42User) {
if allProjects == nil {
log.Error().Msg("Prompt: list of projects empty")
return
}
if usersLogged == nil {
log.Error().Msg("Prompt: map of users logged empty")
return
}
prompt := promptui.Select{
Label: "Does your project have a parent",
Items: []string{"Yes", "No"},
HideHelp: true,
}
indexAction, _, err := prompt.Run()
if err != nil { | log.Fatal().Err(err).Msg("PromptUI: failed")
}
var realProjectsToSearch *[]*reqAPI42.API42Project
if indexAction == 0 {
parentProjectName := askStringClean("Please, enter the parent project name: ")
parentFind, parentsFindNames, fullMatch := findProjectParentName(parentProjectName, &allProjects.parents)
if fullMatch {
realProjectsToSearch = &(parentFind[0].childs)
} else {
indexChoose := getIndexNameChoice(parentsFindNames)
if indexChoose == -1 {
return
}
realProjectsToSearch = &(parentFind[indexChoose].childs)
}
} else {
realProjectsToSearch = &allProjects.directs
}
projectName := askStringClean("Please, enter the project name: ")
projectsFind, projectsFindNames, fullMatch := findProjectName(projectName, realProjectsToSearch)
var projectSelected *reqAPI42.API42Project
if fullMatch {
projectSelected = projectsFind[0]
} else {
indexChoose := getIndexNameChoice(projectsFindNames)
if indexChoose == -1 {
return
}
projectSelected = projectsFind[indexChoose]
}
projectsUsers := api42.GetUsersOfProjectsUsers((*projectSelected).ID)
if projectsUsers == nil {
return
}
var i uint = 1
if projectsUsers != nil && len(*projectsUsers) > 0 {
fmt.Println("Users which did or are doing \"" + (*projectSelected).Name + "\":")
for _, projectsUsers := range *projectsUsers {
if examinerLogged, ok := (*usersLogged)[projectsUsers.User.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", i, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
i++
}
}
if i == 1 {
fmt.Println("No user which did or is doing \"" + (*projectSelected).Name + "\" is logged in")
}
}
var j uint = 1
if usersLvl21 != nil {
fmt.Println("Users level 21:")
for _, userLvl21 := range *usersLvl21 {
if examinerLogged, ok := (*usersLogged)[userLvl21.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", j, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
j++
}
}
if j == 1 {
fmt.Println("No user level21 is logged in")
}
}
if i + j == 2 {
log.Error().Msg("findExaminer: no examiner available")
}
}
func sortProjectsPerType(api42Projects *[]reqAPI42.API42Project) *projectsPerType {
if api42Projects == nil {
return nil
}
var allProjects projectsPerType
allProjects.parents = make(map[uint]*projectParent)
allProjects.directs = make([]*reqAPI42.API42Project, 0)
for index, project := range *api42Projects {
if project.Parent == nil {
allProjects.directs = append(allProjects.directs, &(*api42Projects)[index])
} else {
projectDeref := (*api42Projects)[index]
if parentMapValue, ok := allProjects.parents[projectDeref.Parent.ID]; !ok {
allProjects.parents[projectDeref.Parent.ID] = &projectParent{projectDeref.Parent, []*reqAPI42.API42Project{&(*api42Projects)[index]}}
} else {
parentMapValue.childs = append(parentMapValue.childs, &(*api42Projects)[index])
}
}
}
return &allProjects
}
func locationsToUsersIDMap(locations *[]reqAPI42.API42Location, me *reqAPI42.API42User) *map[uint]*reqAPI42.API42Location {
if locations == nil {
return nil
}
usersLogged := make(map[uint]*reqAPI42.API42Location)
for index := range *locations {
if (*locations)[index].User.ID == me.ID {
continue
}
usersLogged[(*locations)[index].User.ID] = &(*locations)[index]
}
return &usersLogged
}
func filterLvl21InCursusUser(cursusUsers *[]reqAPI42.API42CursusUser) *[]*reqAPI42.API42User {
if cursusUsers == nil {
return nil
}
usersLvl21 := make([]*reqAPI42.API42User, 0)
for index, cursusUser := range *cursusUsers {
if cursusUser.Level >= 21.0 {
usersLvl21 = append(usersLvl21, &(*cursusUsers)[index].User)
}
}
return &usersLvl21
}
func debugPrintProjectsPerType(allProjects *projectsPerType) {
fmt.Println("###################################")
for _, parent := range allProjects.parents {
fmt.Println(parent.this.Name)
for _, son := range parent.childs {
fmt.Println("-> " + son.Name)
}
fmt.Println("----------------")
}
fmt.Println("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
for _, direct := range allProjects.directs {
fmt.Println(direct.Name)
}
fmt.Println("###################################")
}
func main() {
flags := []interface{}{}
flags = append(flags, flag.Bool("refresh", false, "force to refresh token"))
flags = append(flags, flag.Bool("check-default-values", false, "send a request to verify the default values"))
flags = append(flags, flag.Bool("no-check-lvl21", false, "don't send a request (send by default) to check users lvl 21 logged in"))
flag.Parse()
nonFlags := flag.Args()
if len(nonFlags) > 0 {
flag.Usage()
os.Exit(1)
}
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.Stamp})
fmt.Print(cst.MenuHello)
api42 := reqAPI42.New(flags)
allProjects := sortProjectsPerType(api42.GetProjects())
usersLogged := locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
var usersLvl21 *[]*reqAPI42.API42User
if *flags[2].(*bool) == false {
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
}
var indexAction int
var err error
menuActions := []string{
cst.MenuActionFind,
cst.MenuActionUpdateLocations,
cst.MenuActionUpdateProjects,
cst.MenuActionUpdateUsersLvl21,
cst.MenuActionUpdateCursus,
cst.MenuActionUpdateCampus,
cst.MenuActionRefreshTokens,
cst.MenuActionQuit,
}
for {
prompt := promptui.Select{
Label: "Choose an action",
Items: menuActions,
HideHelp: true,
}
indexAction, _, err = prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("Prompt: failed")
}
switch menuActions[indexAction] {
case cst.MenuActionFind:
findExaminer(api42, allProjects, usersLogged, usersLvl21)
case cst.MenuActionUpdateLocations:
usersLogged = locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
case cst.MenuActionUpdateProjects:
allProjects = sortProjectsPerType(api42.GetProjects())
case cst.MenuActionUpdateUsersLvl21:
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
case cst.MenuActionUpdateCursus:
cursusName := askStringClean("Please, enter the cursus name: ")
api42.UpdateCursus(cursusName)
case cst.MenuActionUpdateCampus:
campusName := askStringClean("Please, enter the campus name: ")
api42.UpdateCampus(campusName)
case cst.MenuActionRefreshTokens:
api42.RefreshToken()
case cst.MenuActionQuit:
fmt.Println("Goodbye!")
os.Exit(0)
default:
log.Fatal().Msg("Prompt: indexAction out of bound")
}
}
} | random_line_split | |
main.go | package main
import (
"bufio"
"flag"
"fmt"
"github.com/manifoldco/promptui"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/vsteffen/42_api/reqAPI42"
"github.com/vsteffen/42_api/tools"
cst "github.com/vsteffen/42_api/tools/constants"
"os"
"regexp"
"time"
)
type projectParent struct {
this *reqAPI42.API42ProjectParent
childs []*reqAPI42.API42Project
}
type projectsPerType struct {
parents map[uint]*projectParent
directs []*reqAPI42.API42Project
}
func askStringClean(askStr string) string {
fmt.Print(askStr)
scannerStdin := bufio.NewScanner(os.Stdin)
scannerStdin.Scan()
if err := scannerStdin.Err(); err != nil {
log.Fatal().Err(err).Msg("askString: Failed to read user input")
}
regexWhitespace := regexp.MustCompile(`\s+`)
str := regexWhitespace.ReplaceAllString(scannerStdin.Text(), " ")
return str
}
func | (searchStr string, projects *[]*reqAPI42.API42Project) ([]*reqAPI42.API42Project, []string, bool) {
matchProjects := make([]*reqAPI42.API42Project, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *projects {
currentCost := tools.EditDistance(searchStr, project.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchProjects[0] = (*projects)[indexProject]
return matchProjects, []string{project.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchProjects[indexMatchCost+1:], matchProjects[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchProjects[indexMatchCost] = (*projects)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
}
}
}
}
matchStrings := make([]string, 0)
for _, project := range matchProjects {
if project == nil {
break
}
matchStrings = append(matchStrings, project.Name)
}
return matchProjects, matchStrings, false
}
func findProjectParentName(searchStr string, parents *map[uint]*projectParent) ([]*projectParent, []string, bool) {
matchParent := make([]*projectParent, cst.FindNameMaxResults)
matchCosts := make([]int, cst.FindNameMaxResults)
highestCost := cst.MaxInt
for indexInit := range matchCosts {
matchCosts[indexInit] = cst.MaxInt
}
for indexProject, project := range *parents {
currentCost := tools.EditDistance(searchStr, project.this.Name)
if currentCost == 0 {
matchCosts[0] = currentCost
matchParent[0] = (*parents)[indexProject]
return matchParent, []string{project.this.Name}, true
}
if currentCost < highestCost {
for indexMatchCost, cost := range matchCosts {
if currentCost < cost {
copy(matchCosts[indexMatchCost+1:], matchCosts[indexMatchCost:])
copy(matchParent[indexMatchCost+1:], matchParent[indexMatchCost:])
matchCosts[indexMatchCost] = currentCost
matchParent[indexMatchCost] = (*parents)[indexProject]
if indexMatchCost+1 == cst.FindNameMaxResults {
highestCost = currentCost
}
break
}
}
}
}
matchStrings := make([]string, 0)
for _, parent := range matchParent {
matchStrings = append(matchStrings, parent.this.Name)
}
return matchParent, matchStrings, false
}
func getIndexNameChoice(items []string) int {
items = append(items, "Cancel")
prompt := promptui.Select{
Label: "Found these projects name. Choose or cancel",
Items: items,
HideHelp: true,
}
indexProjectFind, _, err := prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("PromptUI: failed")
}
if indexProjectFind == cst.FindNameMaxResults {
return -1
}
return indexProjectFind
}
func findExaminer(api42 *reqAPI42.API42, allProjects *projectsPerType, usersLogged *map[uint]*reqAPI42.API42Location, usersLvl21 *[]*reqAPI42.API42User) {
if allProjects == nil {
log.Error().Msg("Prompt: list of projects empty")
return
}
if usersLogged == nil {
log.Error().Msg("Prompt: map of users logged empty")
return
}
prompt := promptui.Select{
Label: "Does your project have a parent",
Items: []string{"Yes", "No"},
HideHelp: true,
}
indexAction, _, err := prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("PromptUI: failed")
}
var realProjectsToSearch *[]*reqAPI42.API42Project
if indexAction == 0 {
parentProjectName := askStringClean("Please, enter the parent project name: ")
parentFind, parentsFindNames, fullMatch := findProjectParentName(parentProjectName, &allProjects.parents)
if fullMatch {
realProjectsToSearch = &(parentFind[0].childs)
} else {
indexChoose := getIndexNameChoice(parentsFindNames)
if indexChoose == -1 {
return
}
realProjectsToSearch = &(parentFind[indexChoose].childs)
}
} else {
realProjectsToSearch = &allProjects.directs
}
projectName := askStringClean("Please, enter the project name: ")
projectsFind, projectsFindNames, fullMatch := findProjectName(projectName, realProjectsToSearch)
var projectSelected *reqAPI42.API42Project
if fullMatch {
projectSelected = projectsFind[0]
} else {
indexChoose := getIndexNameChoice(projectsFindNames)
if indexChoose == -1 {
return
}
projectSelected = projectsFind[indexChoose]
}
projectsUsers := api42.GetUsersOfProjectsUsers((*projectSelected).ID)
if projectsUsers == nil {
return
}
var i uint = 1
if projectsUsers != nil && len(*projectsUsers) > 0 {
fmt.Println("Users which did or are doing \"" + (*projectSelected).Name + "\":")
for _, projectsUsers := range *projectsUsers {
if examinerLogged, ok := (*usersLogged)[projectsUsers.User.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", i, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
i++
}
}
if i == 1 {
fmt.Println("No user which did or is doing \"" + (*projectSelected).Name + "\" is logged in")
}
}
var j uint = 1
if usersLvl21 != nil {
fmt.Println("Users level 21:")
for _, userLvl21 := range *usersLvl21 {
if examinerLogged, ok := (*usersLogged)[userLvl21.ID]; ok {
fmt.Printf("%-2d: %-8s %-8s - %s\n", j, examinerLogged.Host, examinerLogged.User.Login, cst.ProfileUserURL+examinerLogged.User.Login)
j++
}
}
if j == 1 {
fmt.Println("No user level21 is logged in")
}
}
if i + j == 2 {
log.Error().Msg("findExaminer: no examiner available")
}
}
func sortProjectsPerType(api42Projects *[]reqAPI42.API42Project) *projectsPerType {
if api42Projects == nil {
return nil
}
var allProjects projectsPerType
allProjects.parents = make(map[uint]*projectParent)
allProjects.directs = make([]*reqAPI42.API42Project, 0)
for index, project := range *api42Projects {
if project.Parent == nil {
allProjects.directs = append(allProjects.directs, &(*api42Projects)[index])
} else {
projectDeref := (*api42Projects)[index]
if parentMapValue, ok := allProjects.parents[projectDeref.Parent.ID]; !ok {
allProjects.parents[projectDeref.Parent.ID] = &projectParent{projectDeref.Parent, []*reqAPI42.API42Project{&(*api42Projects)[index]}}
} else {
parentMapValue.childs = append(parentMapValue.childs, &(*api42Projects)[index])
}
}
}
return &allProjects
}
func locationsToUsersIDMap(locations *[]reqAPI42.API42Location, me *reqAPI42.API42User) *map[uint]*reqAPI42.API42Location {
if locations == nil {
return nil
}
usersLogged := make(map[uint]*reqAPI42.API42Location)
for index := range *locations {
if (*locations)[index].User.ID == me.ID {
continue
}
usersLogged[(*locations)[index].User.ID] = &(*locations)[index]
}
return &usersLogged
}
func filterLvl21InCursusUser(cursusUsers *[]reqAPI42.API42CursusUser) *[]*reqAPI42.API42User {
if cursusUsers == nil {
return nil
}
usersLvl21 := make([]*reqAPI42.API42User, 0)
for index, cursusUser := range *cursusUsers {
if cursusUser.Level >= 21.0 {
usersLvl21 = append(usersLvl21, &(*cursusUsers)[index].User)
}
}
return &usersLvl21
}
func debugPrintProjectsPerType(allProjects *projectsPerType) {
fmt.Println("###################################")
for _, parent := range allProjects.parents {
fmt.Println(parent.this.Name)
for _, son := range parent.childs {
fmt.Println("-> " + son.Name)
}
fmt.Println("----------------")
}
fmt.Println("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
for _, direct := range allProjects.directs {
fmt.Println(direct.Name)
}
fmt.Println("###################################")
}
func main() {
flags := []interface{}{}
flags = append(flags, flag.Bool("refresh", false, "force to refresh token"))
flags = append(flags, flag.Bool("check-default-values", false, "send a request to verify the default values"))
flags = append(flags, flag.Bool("no-check-lvl21", false, "don't send a request (send by default) to check users lvl 21 logged in"))
flag.Parse()
nonFlags := flag.Args()
if len(nonFlags) > 0 {
flag.Usage()
os.Exit(1)
}
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.Stamp})
fmt.Print(cst.MenuHello)
api42 := reqAPI42.New(flags)
allProjects := sortProjectsPerType(api42.GetProjects())
usersLogged := locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
var usersLvl21 *[]*reqAPI42.API42User
if *flags[2].(*bool) == false {
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
}
var indexAction int
var err error
menuActions := []string{
cst.MenuActionFind,
cst.MenuActionUpdateLocations,
cst.MenuActionUpdateProjects,
cst.MenuActionUpdateUsersLvl21,
cst.MenuActionUpdateCursus,
cst.MenuActionUpdateCampus,
cst.MenuActionRefreshTokens,
cst.MenuActionQuit,
}
for {
prompt := promptui.Select{
Label: "Choose an action",
Items: menuActions,
HideHelp: true,
}
indexAction, _, err = prompt.Run()
if err != nil {
log.Fatal().Err(err).Msg("Prompt: failed")
}
switch menuActions[indexAction] {
case cst.MenuActionFind:
findExaminer(api42, allProjects, usersLogged, usersLvl21)
case cst.MenuActionUpdateLocations:
usersLogged = locationsToUsersIDMap(api42.GetLocations(), api42.GetMe())
case cst.MenuActionUpdateProjects:
allProjects = sortProjectsPerType(api42.GetProjects())
case cst.MenuActionUpdateUsersLvl21:
usersLvl21 = filterLvl21InCursusUser(api42.GetCursusUsers())
case cst.MenuActionUpdateCursus:
cursusName := askStringClean("Please, enter the cursus name: ")
api42.UpdateCursus(cursusName)
case cst.MenuActionUpdateCampus:
campusName := askStringClean("Please, enter the campus name: ")
api42.UpdateCampus(campusName)
case cst.MenuActionRefreshTokens:
api42.RefreshToken()
case cst.MenuActionQuit:
fmt.Println("Goodbye!")
os.Exit(0)
default:
log.Fatal().Msg("Prompt: indexAction out of bound")
}
}
}
| findProjectName | identifier_name |
analyserUtil.go | package analyser
import (
"fmt"
"sort"
"strings"
"github.com/helloworldpark/govaluate"
"github.com/helloworldpark/tickle-stock-watcher/commons"
"github.com/helloworldpark/tickle-stock-watcher/structs"
"github.com/sdcoffey/techan"
)
// Operator Precedence
var opPrecedence = map[string]int{
"*": 6, "/": 6, "**": 6,
"+": 5, "-": 5,
"<": 4, "<=": 4, ">": 4, ">=": 4, "==": 4,
"(": 3, ")": 3,
"&&": 2, "||": 2,
}
// Indicator Map
// Function Name: Indicator Generator Function
var indicatorMap = make(map[string]indicatorGen)
// Rule Map
// Function Name: Rule Generator Function
var ruleMap = make(map[string]ruleGen)
// Error Convenience
var newError = commons.NewTaggedError("Analyser")
// Cache functions
func init() {
cacheIndicators()
cacheRules()
}
func cacheIndicators() {
// +-*/
modifierAppender := func(operator string, ctor func(lhs, rhs techan.Indicator) techan.Indicator) {
f := func(series *techan.TimeSeries, args ...interface{}) (techan.Indicator, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("[+-*/] Not enough parameters: got %d, need more or equal to 2", len(args)))
}
lhs, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] First argument must be of type techan.Indicator, you are %v", args[0]))
}
rhs, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] Second argument must be of type techan.Indicator, you are %v", args[1]))
}
return ctor(lhs, rhs), nil
}
indicatorMap[operator] = f
}
modifierAppender("+", newPlusIndicator)
modifierAppender("-", newMinusIndicator)
modifierAppender("*", newMultiplyIndicator)
modifierAppender("/", newDivideIndicator)
// MACD
indicatorMap["macd"] = makeMACD(false)
indicatorMap["macdhist"] = makeMACD(true)
indicatorMap["macdoscillator"] = makeMACD(true)
// RSI
indicatorMap["rsi"] = makeRSI()
// Close Price
funcClose := makeClosePrice()
indicatorMap["close"] = funcClose
indicatorMap["price"] = funcClose
indicatorMap["closeprice"] = funcClose
// Increase
indicatorMap["increase"] = makeIncrease()
// Local Extrema
indicatorMap["extrema"] = makeExtrema()
// Money Flow Index
funcMoneyFlow := makeMoneyFlowIndex()
indicatorMap["moneyflowindex"] = funcMoneyFlow
indicatorMap["moneyFlowIndex"] = funcMoneyFlow
indicatorMap["moneyflow"] = funcMoneyFlow
indicatorMap["moneyFlow"] = funcMoneyFlow
indicatorMap["mFlow"] = funcMoneyFlow
indicatorMap["mflow"] = funcMoneyFlow
// Zero
funcIsZero := makeIsZero()
indicatorMap["isZero"] = funcIsZero
indicatorMap["iszero"] = funcIsZero
indicatorMap["zero"] = funcIsZero
}
func cacheRules() {
appendRuleComparer := func(op string, ctor func(lhs, rhs techan.Rule) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendRuleComparer("&&", techan.And)
appendRuleComparer("||", techan.Or)
appendIndicatorComparer := func(op string, ctor func(lhs, rhs techan.Indicator) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendIndicatorComparer("<=", NewCrossLTEIndicatorRule)
appendIndicatorComparer("<", NewCrossLTIndicatorRule)
appendIndicatorComparer(">=", NewCrossGTEIndicatorRule)
appendIndicatorComparer(">", NewCrossGTIndicatorRule)
appendIndicatorComparer("==", NewCrossEqualIndicatorRule)
}
// Utility functions to parse strategy
func tidyTokens(tokens []token) ([]token, error) {
for i := range tokens {
t := &(tokens[i])
if t.Kind == govaluate.VARIABLE {
// Change function name to lower case
t.Value = strings.ToLower(t.Value.(string))
_, ok := indicatorMap[t.Value.(string)]
if !ok {
return nil, newError(fmt.Sprintf("Unsupported function used: %s", t.Value.(string)))
}
} else if t.Kind == govaluate.CLAUSE {
t.Value = "("
} else if t.Kind == govaluate.CLAUSE_CLOSE {
t.Value = ")"
}
}
return tokens, nil
}
func parseTokens(statement string) ([]token, error) {
return govaluate.ParseTokens(statement, nil)
}
type function struct {
t token
argc int
}
func newFunction(t token, argc int) *function {
f := function{t: t}
switch t.Kind {
case govaluate.NUMERIC, govaluate.CLAUSE, govaluate.CLAUSE_CLOSE:
f.argc = 0
case govaluate.PREFIX:
f.argc = 1
case govaluate.VARIABLE:
f.argc = argc
default:
f.argc = 2
}
return &f
}
// 재귀함수로 동작
func findFuncArgumentCount(tokens *[]token, clauses map[int]int, startIdx, endIdx int) (map[token]int, int, error) {
if startIdx == len(*tokens)-1 {
return make(map[govaluate.ExpressionToken]int), 0, nil
}
result := make(map[govaluate.ExpressionToken]int)
startedSearch := false
tokenIdx := startIdx
fcnNameIdx := -1
for tokenIdx <= endIdx {
t := (*tokens)[tokenIdx]
switch t.Kind {
case govaluate.VARIABLE:
if startedSearch {
subEndIdx := clauses[tokenIdx+1]
subFuncArgs, idxToSkip, err := findFuncArgumentCount(tokens, clauses, tokenIdx, subEndIdx)
if err != nil {
return nil, (tokenIdx + 1 - startIdx), err
}
for subFunc := range subFuncArgs {
subArgc := subFuncArgs[subFunc]
result[subFunc] = subArgc
}
result[(*tokens)[fcnNameIdx]]++
tokenIdx += idxToSkip
} else {
// 인자가 없는 경우 괄호를 생략하기도 함
// 이에 대한 예외처리
if tokenIdx < endIdx && (*tokens)[tokenIdx+1].Kind != govaluate.CLAUSE {
result[t] = 0
tokenIdx++
continue
}
startedSearch = true
fcnNameIdx = tokenIdx
result[t] = 0
tokenIdx++
}
case govaluate.NUMERIC:
if startedSearch {
result[(*tokens)[fcnNameIdx]]++
}
tokenIdx++
case govaluate.CLAUSE_CLOSE: // stop for a function, can proceed
startedSearch = false
fcnNameIdx = -1
tokenIdx++
default:
tokenIdx++
}
}
return result, tokenIdx - startIdx, nil
}
// clauseMap: true if clause, false if clauseClose
type clausePair struct {
openIdx int
closeIdx int
}
func inspectClausePairs(tokens *[]token) (closeMap map[int]*clausePair, err error) {
stack := make([]*clausePair, 0)
closeMap = make(map[int]*clausePair)
err = nil
for idx, tok := range *tokens {
if tok.Kind == govaluate.CLAUSE {
stack = append(stack, &clausePair{idx, -1})
} else if tok.Kind == govaluate.CLAUSE_CLOSE {
if len(stack) == 0 {
return nil, newError("Invalid pairing of clauses: Pairs do not match.")
}
popped := stack[len(stack)-1]
stack = stack[:len(stack)-1]
popped.closeIdx = idx
closeMap[idx] = popped
}
}
if len(stack) > 0 {
return nil, newError(fmt.Sprintf("Invalid pairing of clauses: Some clauses are left(%v)", tokens))
}
return closeMap, err
}
func reorderTokenByPostfix(tokens []token) ([]function, error) {
// Convert tokens into techan strategy
// Tokens are reordered by postfix notation
// operators:
// functions: 8
// -: 7(Negation)
// * /: 6
// + -: 5
// < <= == >= >: 4
// && ||: 3
// ( ): 2
postfixToken := make([]function, 0)
operatorStack := make([]*function, 0)
closeClauseMap, _ := inspectClausePairs(&tokens)
// 불필요한 괄호들은 trim한다
clauseList := make([]*clausePair, 0)
for _, v := range closeClauseMap {
clauseList = append(clauseList, v)
}
sort.Slice(clauseList, func(i, j int) bool {
return clauseList[i].openIdx < clauseList[j].openIdx
})
for _, pair := range clauseList {
dummy := false
if pair.openIdx == 0 {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.COMPARATOR {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.LOGICALOP {
dummy = true
}
if dummy {
tokens = append(tokens[:pair.closeIdx], tokens[pair.closeIdx+1:]...)
tokens = append(tokens[:pair.openIdx], tokens[pair.openIdx+1:]...)
for _, subPair := range clauseList {
subPair.openIdx--
subPair.closeIdx--
if pair.closeIdx < subPair.closeIdx {
subPair.closeIdx--
}
if pair.closeIdx < subPair.openIdx {
subPair.openIdx--
}
}
}
}
closeClauseMap, _ = inspectClausePairs(&tokens)
for i := range tokens {
t := tokens[i]
switch t.Kind {
case govaluate.NUMERIC:
postfixToken = append(postfixToken, *newFunction(t, 0))
case govaluate.COMPARATOR, govaluate.LOGICALOP, govaluate.VARIABLE, govaluate.PREFIX, govaluate.MODIFIER:
p := precedenceOf(t)
for j := len(operatorStack) - 1; j >= 0; j-- {
o := operatorStack[j]
// 내 연산자 순위가 스택보다 높으면(즉, 숫자가 크면)
// 내가 들어간다
// 아니면
// 내가 스택보다 순위가 높을 때까지 애들을 다 postfixToken에 옮긴다
op := precedenceOf(o.t)
if p > op {
break
} else {
postfixToken = append(postfixToken, *o)
operatorStack = operatorStack[:j]
}
}
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE:
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE_CLOSE:
for {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
if o.t.Kind == govaluate.CLAUSE {
break
} else {
postfixToken = append(postfixToken, *o)
}
}
openClauseIdx := closeClauseMap[i].openIdx
// 함수도 operator stack에서 pop하고 postfix stack으로 옮긴다
if openClauseIdx-1 >= 0 && tokens[openClauseIdx-1].Kind == govaluate.VARIABLE {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
postfixToken = append(postfixToken, *o)
}
case govaluate.SEPARATOR:
continue
default:
return nil, newError(fmt.Sprintf("Invalid token: %v", t))
}
}
for j := len(operatorStack) - 1; j >= 0; j-- {
if operatorStack[j].t.Kind != govaluate.CLAUSE && operatorStack[j].t.Kind != govaluate.CLAUSE_CLOSE {
postfixToken = append(postfixToken, *operatorStack[j])
}
operatorStack = operatorStack[:j]
}
// 함수 인자의 수를 넣어준다
openCloseClauseMap := make(map[int]int)
for _, v := range closeClauseMap {
openCloseClauseMap[v.openIdx] = v.closeIdx
}
funcArgcMap, _, _ := findFuncArgumentCount(&tokens, openCloseClauseMap, 0, len(tokens)-1)
for idx := range postfixToken {
argc, funcExists := funcArgcMap[postfixToken[idx].t]
if funcExists {
postfixToken[idx].argc = argc
}
}
return postfixToken, nil
}
func precedenceOf(t token) int {
if t.Kind == govaluate.VARIABLE {
return 8
}
if t.Kind == govaluate.PREFIX {
return 7
}
return opPrecedence[t.Value.(string)]
}
func (a *Analyser) createRule(fcns []function) (techan.Rule, error) {
indicators := make([]interface{}, 0)
rules := make([]techan.Rule, 0)
for len(fcns) > 0 {
f := fcns[0]
fcns = fcns[1:]
switch f.t.Kind {
case govaluate.NUMERIC:
indi | ppend(indicators, f.t.Value.(float64))
case govaluate.VARIABLE:
// 함수를 구성한다
// 인자를 슬라이스에 담고
// indicator를 만든다
if len(indicators) < f.argc {
return nil, newError("Invalid syntax")
}
args := indicators[len(indicators)-f.argc:]
indicators = indicators[:len(indicators)-f.argc]
gen, ok := indicatorMap[f.t.Value.(string)]
if !ok {
return nil, newError("Not implemented function")
}
indicator, err := gen(a.timeSeries, args...)
if err != nil {
return nil, err
}
indicators = append(indicators, indicator)
case govaluate.PREFIX:
v := indicators[len(indicators)-1]
indicators = indicators[:(len(indicators) - 1)]
indi, ok := v.(techan.Indicator)
if ok {
indicators = append(indicators, newNegateIndicator(indi))
} else {
indicators = append(indicators, newNegateIndicatorFromFloat(v.(float64)))
}
case govaluate.COMPARATOR:
if len(indicators) < 2 {
return nil, newError(fmt.Sprintf("Cannot compose a comparing rule with %d indicators", len(indicators)))
}
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
rule, err := ruleMaker(lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.LOGICALOP:
rhs := rules[len(rules)-1]
lhs := rules[len(rules)-2]
rules = rules[:(len(rules) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rule, err := ruleMaker(lhs, rhs)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.MODIFIER:
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
operated, err := indicatorMap[f.t.Value.(string)](nil, lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
indicators = append(indicators, operated)
}
}
if len(rules) != 1 {
// Something wrong
return nil, newError(fmt.Sprintf("Rule must exist and be unique: %d rules generated", len(rules)))
}
return rules[0], nil
}
func candleToStockPrice(stockID string, c *techan.Candle, useEndTime bool) structs.StockPrice {
if c == nil {
return structs.StockPrice{}
}
timestamp := c.Period.Start.Unix()
if useEndTime {
timestamp = c.Period.End.Unix()
}
return structs.StockPrice{
StockID: stockID,
Timestamp: timestamp,
Open: int(c.OpenPrice.Float()),
Close: int(c.ClosePrice.Float()),
High: int(c.MaxPrice.Float()),
Low: int(c.MinPrice.Float()),
Volume: c.Volume.Float(),
}
}
| cators = a | identifier_name |
analyserUtil.go | package analyser
import (
"fmt"
"sort"
"strings"
"github.com/helloworldpark/govaluate"
"github.com/helloworldpark/tickle-stock-watcher/commons"
"github.com/helloworldpark/tickle-stock-watcher/structs"
"github.com/sdcoffey/techan"
)
// Operator Precedence
var opPrecedence = map[string]int{
"*": 6, "/": 6, "**": 6,
"+": 5, "-": 5,
"<": 4, "<=": 4, ">": 4, ">=": 4, "==": 4,
"(": 3, ")": 3,
"&&": 2, "||": 2,
}
// Indicator Map
// Function Name: Indicator Generator Function
var indicatorMap = make(map[string]indicatorGen)
// Rule Map
// Function Name: Rule Generator Function
var ruleMap = make(map[string]ruleGen)
// Error Convenience
var newError = commons.NewTaggedError("Analyser")
// Cache functions
func init() {
cacheIndicators()
cacheRules()
}
func cacheIndicators() {
// +-*/
modifierAppender := func(operator string, ctor func(lhs, rhs techan.Indicator) techan.Indicator) {
f := func(series *techan.TimeSeries, args ...interface{}) (techan.Indicator, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("[+-*/] Not enough parameters: got %d, need more or equal to 2", len(args)))
}
lhs, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] First argument must be of type techan.Indicator, you are %v", args[0]))
}
rhs, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] Second argument must be of type techan.Indicator, you are %v", args[1]))
}
return ctor(lhs, rhs), nil
}
indicatorMap[operator] = f
}
modifierAppender("+", newPlusIndicator)
modifierAppender("-", newMinusIndicator)
modifierAppender("*", newMultiplyIndicator)
modifierAppender("/", newDivideIndicator)
// MACD | indicatorMap["macdhist"] = makeMACD(true)
indicatorMap["macdoscillator"] = makeMACD(true)
// RSI
indicatorMap["rsi"] = makeRSI()
// Close Price
funcClose := makeClosePrice()
indicatorMap["close"] = funcClose
indicatorMap["price"] = funcClose
indicatorMap["closeprice"] = funcClose
// Increase
indicatorMap["increase"] = makeIncrease()
// Local Extrema
indicatorMap["extrema"] = makeExtrema()
// Money Flow Index
funcMoneyFlow := makeMoneyFlowIndex()
indicatorMap["moneyflowindex"] = funcMoneyFlow
indicatorMap["moneyFlowIndex"] = funcMoneyFlow
indicatorMap["moneyflow"] = funcMoneyFlow
indicatorMap["moneyFlow"] = funcMoneyFlow
indicatorMap["mFlow"] = funcMoneyFlow
indicatorMap["mflow"] = funcMoneyFlow
// Zero
funcIsZero := makeIsZero()
indicatorMap["isZero"] = funcIsZero
indicatorMap["iszero"] = funcIsZero
indicatorMap["zero"] = funcIsZero
}
func cacheRules() {
appendRuleComparer := func(op string, ctor func(lhs, rhs techan.Rule) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendRuleComparer("&&", techan.And)
appendRuleComparer("||", techan.Or)
appendIndicatorComparer := func(op string, ctor func(lhs, rhs techan.Indicator) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendIndicatorComparer("<=", NewCrossLTEIndicatorRule)
appendIndicatorComparer("<", NewCrossLTIndicatorRule)
appendIndicatorComparer(">=", NewCrossGTEIndicatorRule)
appendIndicatorComparer(">", NewCrossGTIndicatorRule)
appendIndicatorComparer("==", NewCrossEqualIndicatorRule)
}
// Utility functions to parse strategy
func tidyTokens(tokens []token) ([]token, error) {
for i := range tokens {
t := &(tokens[i])
if t.Kind == govaluate.VARIABLE {
// Change function name to lower case
t.Value = strings.ToLower(t.Value.(string))
_, ok := indicatorMap[t.Value.(string)]
if !ok {
return nil, newError(fmt.Sprintf("Unsupported function used: %s", t.Value.(string)))
}
} else if t.Kind == govaluate.CLAUSE {
t.Value = "("
} else if t.Kind == govaluate.CLAUSE_CLOSE {
t.Value = ")"
}
}
return tokens, nil
}
func parseTokens(statement string) ([]token, error) {
return govaluate.ParseTokens(statement, nil)
}
type function struct {
t token
argc int
}
func newFunction(t token, argc int) *function {
f := function{t: t}
switch t.Kind {
case govaluate.NUMERIC, govaluate.CLAUSE, govaluate.CLAUSE_CLOSE:
f.argc = 0
case govaluate.PREFIX:
f.argc = 1
case govaluate.VARIABLE:
f.argc = argc
default:
f.argc = 2
}
return &f
}
// 재귀함수로 동작
func findFuncArgumentCount(tokens *[]token, clauses map[int]int, startIdx, endIdx int) (map[token]int, int, error) {
if startIdx == len(*tokens)-1 {
return make(map[govaluate.ExpressionToken]int), 0, nil
}
result := make(map[govaluate.ExpressionToken]int)
startedSearch := false
tokenIdx := startIdx
fcnNameIdx := -1
for tokenIdx <= endIdx {
t := (*tokens)[tokenIdx]
switch t.Kind {
case govaluate.VARIABLE:
if startedSearch {
subEndIdx := clauses[tokenIdx+1]
subFuncArgs, idxToSkip, err := findFuncArgumentCount(tokens, clauses, tokenIdx, subEndIdx)
if err != nil {
return nil, (tokenIdx + 1 - startIdx), err
}
for subFunc := range subFuncArgs {
subArgc := subFuncArgs[subFunc]
result[subFunc] = subArgc
}
result[(*tokens)[fcnNameIdx]]++
tokenIdx += idxToSkip
} else {
// 인자가 없는 경우 괄호를 생략하기도 함
// 이에 대한 예외처리
if tokenIdx < endIdx && (*tokens)[tokenIdx+1].Kind != govaluate.CLAUSE {
result[t] = 0
tokenIdx++
continue
}
startedSearch = true
fcnNameIdx = tokenIdx
result[t] = 0
tokenIdx++
}
case govaluate.NUMERIC:
if startedSearch {
result[(*tokens)[fcnNameIdx]]++
}
tokenIdx++
case govaluate.CLAUSE_CLOSE: // stop for a function, can proceed
startedSearch = false
fcnNameIdx = -1
tokenIdx++
default:
tokenIdx++
}
}
return result, tokenIdx - startIdx, nil
}
// clauseMap: true if clause, false if clauseClose
type clausePair struct {
openIdx int
closeIdx int
}
func inspectClausePairs(tokens *[]token) (closeMap map[int]*clausePair, err error) {
stack := make([]*clausePair, 0)
closeMap = make(map[int]*clausePair)
err = nil
for idx, tok := range *tokens {
if tok.Kind == govaluate.CLAUSE {
stack = append(stack, &clausePair{idx, -1})
} else if tok.Kind == govaluate.CLAUSE_CLOSE {
if len(stack) == 0 {
return nil, newError("Invalid pairing of clauses: Pairs do not match.")
}
popped := stack[len(stack)-1]
stack = stack[:len(stack)-1]
popped.closeIdx = idx
closeMap[idx] = popped
}
}
if len(stack) > 0 {
return nil, newError(fmt.Sprintf("Invalid pairing of clauses: Some clauses are left(%v)", tokens))
}
return closeMap, err
}
func reorderTokenByPostfix(tokens []token) ([]function, error) {
// Convert tokens into techan strategy
// Tokens are reordered by postfix notation
// operators:
// functions: 8
// -: 7(Negation)
// * /: 6
// + -: 5
// < <= == >= >: 4
// && ||: 3
// ( ): 2
postfixToken := make([]function, 0)
operatorStack := make([]*function, 0)
closeClauseMap, _ := inspectClausePairs(&tokens)
// 불필요한 괄호들은 trim한다
clauseList := make([]*clausePair, 0)
for _, v := range closeClauseMap {
clauseList = append(clauseList, v)
}
sort.Slice(clauseList, func(i, j int) bool {
return clauseList[i].openIdx < clauseList[j].openIdx
})
for _, pair := range clauseList {
dummy := false
if pair.openIdx == 0 {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.COMPARATOR {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.LOGICALOP {
dummy = true
}
if dummy {
tokens = append(tokens[:pair.closeIdx], tokens[pair.closeIdx+1:]...)
tokens = append(tokens[:pair.openIdx], tokens[pair.openIdx+1:]...)
for _, subPair := range clauseList {
subPair.openIdx--
subPair.closeIdx--
if pair.closeIdx < subPair.closeIdx {
subPair.closeIdx--
}
if pair.closeIdx < subPair.openIdx {
subPair.openIdx--
}
}
}
}
closeClauseMap, _ = inspectClausePairs(&tokens)
for i := range tokens {
t := tokens[i]
switch t.Kind {
case govaluate.NUMERIC:
postfixToken = append(postfixToken, *newFunction(t, 0))
case govaluate.COMPARATOR, govaluate.LOGICALOP, govaluate.VARIABLE, govaluate.PREFIX, govaluate.MODIFIER:
p := precedenceOf(t)
for j := len(operatorStack) - 1; j >= 0; j-- {
o := operatorStack[j]
// 내 연산자 순위가 스택보다 높으면(즉, 숫자가 크면)
// 내가 들어간다
// 아니면
// 내가 스택보다 순위가 높을 때까지 애들을 다 postfixToken에 옮긴다
op := precedenceOf(o.t)
if p > op {
break
} else {
postfixToken = append(postfixToken, *o)
operatorStack = operatorStack[:j]
}
}
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE:
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE_CLOSE:
for {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
if o.t.Kind == govaluate.CLAUSE {
break
} else {
postfixToken = append(postfixToken, *o)
}
}
openClauseIdx := closeClauseMap[i].openIdx
// 함수도 operator stack에서 pop하고 postfix stack으로 옮긴다
if openClauseIdx-1 >= 0 && tokens[openClauseIdx-1].Kind == govaluate.VARIABLE {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
postfixToken = append(postfixToken, *o)
}
case govaluate.SEPARATOR:
continue
default:
return nil, newError(fmt.Sprintf("Invalid token: %v", t))
}
}
for j := len(operatorStack) - 1; j >= 0; j-- {
if operatorStack[j].t.Kind != govaluate.CLAUSE && operatorStack[j].t.Kind != govaluate.CLAUSE_CLOSE {
postfixToken = append(postfixToken, *operatorStack[j])
}
operatorStack = operatorStack[:j]
}
// 함수 인자의 수를 넣어준다
openCloseClauseMap := make(map[int]int)
for _, v := range closeClauseMap {
openCloseClauseMap[v.openIdx] = v.closeIdx
}
funcArgcMap, _, _ := findFuncArgumentCount(&tokens, openCloseClauseMap, 0, len(tokens)-1)
for idx := range postfixToken {
argc, funcExists := funcArgcMap[postfixToken[idx].t]
if funcExists {
postfixToken[idx].argc = argc
}
}
return postfixToken, nil
}
func precedenceOf(t token) int {
if t.Kind == govaluate.VARIABLE {
return 8
}
if t.Kind == govaluate.PREFIX {
return 7
}
return opPrecedence[t.Value.(string)]
}
func (a *Analyser) createRule(fcns []function) (techan.Rule, error) {
indicators := make([]interface{}, 0)
rules := make([]techan.Rule, 0)
for len(fcns) > 0 {
f := fcns[0]
fcns = fcns[1:]
switch f.t.Kind {
case govaluate.NUMERIC:
indicators = append(indicators, f.t.Value.(float64))
case govaluate.VARIABLE:
// 함수를 구성한다
// 인자를 슬라이스에 담고
// indicator를 만든다
if len(indicators) < f.argc {
return nil, newError("Invalid syntax")
}
args := indicators[len(indicators)-f.argc:]
indicators = indicators[:len(indicators)-f.argc]
gen, ok := indicatorMap[f.t.Value.(string)]
if !ok {
return nil, newError("Not implemented function")
}
indicator, err := gen(a.timeSeries, args...)
if err != nil {
return nil, err
}
indicators = append(indicators, indicator)
case govaluate.PREFIX:
v := indicators[len(indicators)-1]
indicators = indicators[:(len(indicators) - 1)]
indi, ok := v.(techan.Indicator)
if ok {
indicators = append(indicators, newNegateIndicator(indi))
} else {
indicators = append(indicators, newNegateIndicatorFromFloat(v.(float64)))
}
case govaluate.COMPARATOR:
if len(indicators) < 2 {
return nil, newError(fmt.Sprintf("Cannot compose a comparing rule with %d indicators", len(indicators)))
}
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
rule, err := ruleMaker(lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.LOGICALOP:
rhs := rules[len(rules)-1]
lhs := rules[len(rules)-2]
rules = rules[:(len(rules) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rule, err := ruleMaker(lhs, rhs)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.MODIFIER:
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
operated, err := indicatorMap[f.t.Value.(string)](nil, lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
indicators = append(indicators, operated)
}
}
if len(rules) != 1 {
// Something wrong
return nil, newError(fmt.Sprintf("Rule must exist and be unique: %d rules generated", len(rules)))
}
return rules[0], nil
}
func candleToStockPrice(stockID string, c *techan.Candle, useEndTime bool) structs.StockPrice {
if c == nil {
return structs.StockPrice{}
}
timestamp := c.Period.Start.Unix()
if useEndTime {
timestamp = c.Period.End.Unix()
}
return structs.StockPrice{
StockID: stockID,
Timestamp: timestamp,
Open: int(c.OpenPrice.Float()),
Close: int(c.ClosePrice.Float()),
High: int(c.MaxPrice.Float()),
Low: int(c.MinPrice.Float()),
Volume: c.Volume.Float(),
}
} | indicatorMap["macd"] = makeMACD(false) | random_line_split |
analyserUtil.go | package analyser
import (
"fmt"
"sort"
"strings"
"github.com/helloworldpark/govaluate"
"github.com/helloworldpark/tickle-stock-watcher/commons"
"github.com/helloworldpark/tickle-stock-watcher/structs"
"github.com/sdcoffey/techan"
)
// Operator Precedence
var opPrecedence = map[string]int{
"*": 6, "/": 6, "**": 6,
"+": 5, "-": 5,
"<": 4, "<=": 4, ">": 4, ">=": 4, "==": 4,
"(": 3, ")": 3,
"&&": 2, "||": 2,
}
// Indicator Map
// Function Name: Indicator Generator Function
var indicatorMap = make(map[string]indicatorGen)
// Rule Map
// Function Name: Rule Generator Function
var ruleMap = make(map[string]ruleGen)
// Error Convenience
var newError = commons.NewTaggedError("Analyser")
// Cache functions
func init() {
cacheIndicators()
cacheRules()
}
func cacheIndicators() {
// +-*/
modifierAppender := func(operator string, ctor func(lhs, rhs techan.Indicator) techan.Indicator) {
f := func(series *techan.TimeSeries, args ...interface{}) (techan.Indicator, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("[+-*/] Not enough parameters: got %d, need more or equal to 2", len(args)))
}
lhs, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] First argument must be of type techan.Indicator, you are %v", args[0]))
}
rhs, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] Second argument must be of type techan.Indicator, you are %v", args[1]))
}
return ctor(lhs, rhs), nil
}
indicatorMap[operator] = f
}
modifierAppender("+", newPlusIndicator)
modifierAppender("-", newMinusIndicator)
modifierAppender("*", newMultiplyIndicator)
modifierAppender("/", newDivideIndicator)
// MACD
indicatorMap["macd"] = makeMACD(false)
indicatorMap["macdhist"] = makeMACD(true)
indicatorMap["macdoscillator"] = makeMACD(true)
// RSI
indicatorMap["rsi"] = makeRSI()
// Close Price
funcClose := makeClosePrice()
indicatorMap["close"] = funcClose
indicatorMap["price"] = funcClose
indicatorMap["closeprice"] = funcClose
// Increase
indicatorMap["increase"] = makeIncrease()
// Local Extrema
indicatorMap["extrema"] = makeExtrema()
// Money Flow Index
funcMoneyFlow := makeMoneyFlowIndex()
indicatorMap["moneyflowindex"] = funcMoneyFlow
indicatorMap["moneyFlowIndex"] = funcMoneyFlow
indicatorMap["moneyflow"] = funcMoneyFlow
indicatorMap["moneyFlow"] = funcMoneyFlow
indicatorMap["mFlow"] = funcMoneyFlow
indicatorMap["mflow"] = funcMoneyFlow
// Zero
funcIsZero := makeIsZero()
indicatorMap["isZero"] = funcIsZero
indicatorMap["iszero"] = funcIsZero
indicatorMap["zero"] = funcIsZero
}
func cacheRules() {
appendRuleComparer := func(op string, ctor func(lhs, rhs techan.Rule) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendRuleComparer("&&", techan.And)
appendRuleComparer("||", techan.Or)
appendIndicatorComparer := func(op string, ctor func(lhs, rhs techan.Indicator) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendIndicatorComparer("<=", NewCrossLTEIndicatorRule)
appendIndicatorComparer("<", NewCrossLTIndicatorRule)
appendIndicatorComparer(">=", NewCrossGTEIndicatorRule)
appendIndicatorComparer(">", NewCrossGTIndicatorRule)
appendIndicatorComparer("==", NewCrossEqualIndicatorRule)
}
// Utility functions to parse strategy
func tidyTokens(tokens []token) ([]token, error) {
for i := range tokens {
t := &(tokens[i])
if t.Kind == govaluate.VARIABLE {
// Change function name to lower case
t.Value = strings.ToLower(t.Value.(string))
_, ok := indicatorMap[t.Value.(string)]
if !ok {
return nil, newError(fmt.Sprintf("Unsupported function used: %s", t.Value.(string)))
}
} else if t.Kind == govaluate.CLAUSE {
t.Value = "("
} else if t.Kind == govaluate.CLAUSE_CLOSE {
t.Value = ")"
}
}
return tokens, nil
}
func parseTokens(statement string) ([]token, error) {
return govaluate.ParseTokens(statement, nil)
}
type function struct {
t token
argc int
}
func newFunction(t token, argc int) *function {
f := function{t: t}
switch t.Kind {
case govaluate.NUMERIC, govaluate.CLAUSE, govaluate.CLAUSE_CLOSE:
f.argc = 0
case govaluate.PREFIX:
f.argc = 1
case govaluate.VARIABLE:
f.argc = argc
default:
f.argc = 2
}
return &f
}
// 재귀함수로 동작
func findFuncArgumentCount(tokens *[]token, clauses map[int]int, startIdx, endIdx int) (map[token]int, int, error) {
if startIdx == len(*tokens)-1 {
return make(map[govaluate.ExpressionToken]int), 0, nil
}
result := make(map[govaluate.ExpressionToken]int)
startedSearch := false
tokenIdx := startIdx
fcnNameIdx := -1
for tokenIdx <= endIdx {
t := (*tokens)[tokenIdx]
switch t.Kind {
case govaluate.VARIABLE:
if startedSearch {
subEndIdx := clauses[tokenIdx+1]
subFuncArgs, idxToSkip, err := findFuncArgumentCount(tokens, clauses, tokenIdx, subEndIdx)
if err != nil {
return nil, (tokenIdx + 1 - startIdx), err
}
for subFunc := range subFuncArgs {
subArgc | tokens)[fcnNameIdx]]++
tokenIdx += idxToSkip
} else {
// 인자가 없는 경우 괄호를 생략하기도 함
// 이에 대한 예외처리
if tokenIdx < endIdx && (*tokens)[tokenIdx+1].Kind != govaluate.CLAUSE {
result[t] = 0
tokenIdx++
continue
}
startedSearch = true
fcnNameIdx = tokenIdx
result[t] = 0
tokenIdx++
}
case govaluate.NUMERIC:
if startedSearch {
result[(*tokens)[fcnNameIdx]]++
}
tokenIdx++
case govaluate.CLAUSE_CLOSE: // stop for a function, can proceed
startedSearch = false
fcnNameIdx = -1
tokenIdx++
default:
tokenIdx++
}
}
return result, tokenIdx - startIdx, nil
}
// clauseMap: true if clause, false if clauseClose
type clausePair struct {
openIdx int
closeIdx int
}
func inspectClausePairs(tokens *[]token) (closeMap map[int]*clausePair, err error) {
stack := make([]*clausePair, 0)
closeMap = make(map[int]*clausePair)
err = nil
for idx, tok := range *tokens {
if tok.Kind == govaluate.CLAUSE {
stack = append(stack, &clausePair{idx, -1})
} else if tok.Kind == govaluate.CLAUSE_CLOSE {
if len(stack) == 0 {
return nil, newError("Invalid pairing of clauses: Pairs do not match.")
}
popped := stack[len(stack)-1]
stack = stack[:len(stack)-1]
popped.closeIdx = idx
closeMap[idx] = popped
}
}
if len(stack) > 0 {
return nil, newError(fmt.Sprintf("Invalid pairing of clauses: Some clauses are left(%v)", tokens))
}
return closeMap, err
}
func reorderTokenByPostfix(tokens []token) ([]function, error) {
// Convert tokens into techan strategy
// Tokens are reordered by postfix notation
// operators:
// functions: 8
// -: 7(Negation)
// * /: 6
// + -: 5
// < <= == >= >: 4
// && ||: 3
// ( ): 2
postfixToken := make([]function, 0)
operatorStack := make([]*function, 0)
closeClauseMap, _ := inspectClausePairs(&tokens)
// 불필요한 괄호들은 trim한다
clauseList := make([]*clausePair, 0)
for _, v := range closeClauseMap {
clauseList = append(clauseList, v)
}
sort.Slice(clauseList, func(i, j int) bool {
return clauseList[i].openIdx < clauseList[j].openIdx
})
for _, pair := range clauseList {
dummy := false
if pair.openIdx == 0 {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.COMPARATOR {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.LOGICALOP {
dummy = true
}
if dummy {
tokens = append(tokens[:pair.closeIdx], tokens[pair.closeIdx+1:]...)
tokens = append(tokens[:pair.openIdx], tokens[pair.openIdx+1:]...)
for _, subPair := range clauseList {
subPair.openIdx--
subPair.closeIdx--
if pair.closeIdx < subPair.closeIdx {
subPair.closeIdx--
}
if pair.closeIdx < subPair.openIdx {
subPair.openIdx--
}
}
}
}
closeClauseMap, _ = inspectClausePairs(&tokens)
for i := range tokens {
t := tokens[i]
switch t.Kind {
case govaluate.NUMERIC:
postfixToken = append(postfixToken, *newFunction(t, 0))
case govaluate.COMPARATOR, govaluate.LOGICALOP, govaluate.VARIABLE, govaluate.PREFIX, govaluate.MODIFIER:
p := precedenceOf(t)
for j := len(operatorStack) - 1; j >= 0; j-- {
o := operatorStack[j]
// 내 연산자 순위가 스택보다 높으면(즉, 숫자가 크면)
// 내가 들어간다
// 아니면
// 내가 스택보다 순위가 높을 때까지 애들을 다 postfixToken에 옮긴다
op := precedenceOf(o.t)
if p > op {
break
} else {
postfixToken = append(postfixToken, *o)
operatorStack = operatorStack[:j]
}
}
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE:
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE_CLOSE:
for {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
if o.t.Kind == govaluate.CLAUSE {
break
} else {
postfixToken = append(postfixToken, *o)
}
}
openClauseIdx := closeClauseMap[i].openIdx
// 함수도 operator stack에서 pop하고 postfix stack으로 옮긴다
if openClauseIdx-1 >= 0 && tokens[openClauseIdx-1].Kind == govaluate.VARIABLE {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
postfixToken = append(postfixToken, *o)
}
case govaluate.SEPARATOR:
continue
default:
return nil, newError(fmt.Sprintf("Invalid token: %v", t))
}
}
for j := len(operatorStack) - 1; j >= 0; j-- {
if operatorStack[j].t.Kind != govaluate.CLAUSE && operatorStack[j].t.Kind != govaluate.CLAUSE_CLOSE {
postfixToken = append(postfixToken, *operatorStack[j])
}
operatorStack = operatorStack[:j]
}
// 함수 인자의 수를 넣어준다
openCloseClauseMap := make(map[int]int)
for _, v := range closeClauseMap {
openCloseClauseMap[v.openIdx] = v.closeIdx
}
funcArgcMap, _, _ := findFuncArgumentCount(&tokens, openCloseClauseMap, 0, len(tokens)-1)
for idx := range postfixToken {
argc, funcExists := funcArgcMap[postfixToken[idx].t]
if funcExists {
postfixToken[idx].argc = argc
}
}
return postfixToken, nil
}
func precedenceOf(t token) int {
if t.Kind == govaluate.VARIABLE {
return 8
}
if t.Kind == govaluate.PREFIX {
return 7
}
return opPrecedence[t.Value.(string)]
}
func (a *Analyser) createRule(fcns []function) (techan.Rule, error) {
indicators := make([]interface{}, 0)
rules := make([]techan.Rule, 0)
for len(fcns) > 0 {
f := fcns[0]
fcns = fcns[1:]
switch f.t.Kind {
case govaluate.NUMERIC:
indicators = append(indicators, f.t.Value.(float64))
case govaluate.VARIABLE:
// 함수를 구성한다
// 인자를 슬라이스에 담고
// indicator를 만든다
if len(indicators) < f.argc {
return nil, newError("Invalid syntax")
}
args := indicators[len(indicators)-f.argc:]
indicators = indicators[:len(indicators)-f.argc]
gen, ok := indicatorMap[f.t.Value.(string)]
if !ok {
return nil, newError("Not implemented function")
}
indicator, err := gen(a.timeSeries, args...)
if err != nil {
return nil, err
}
indicators = append(indicators, indicator)
case govaluate.PREFIX:
v := indicators[len(indicators)-1]
indicators = indicators[:(len(indicators) - 1)]
indi, ok := v.(techan.Indicator)
if ok {
indicators = append(indicators, newNegateIndicator(indi))
} else {
indicators = append(indicators, newNegateIndicatorFromFloat(v.(float64)))
}
case govaluate.COMPARATOR:
if len(indicators) < 2 {
return nil, newError(fmt.Sprintf("Cannot compose a comparing rule with %d indicators", len(indicators)))
}
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
rule, err := ruleMaker(lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.LOGICALOP:
rhs := rules[len(rules)-1]
lhs := rules[len(rules)-2]
rules = rules[:(len(rules) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rule, err := ruleMaker(lhs, rhs)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.MODIFIER:
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
operated, err := indicatorMap[f.t.Value.(string)](nil, lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
indicators = append(indicators, operated)
}
}
if len(rules) != 1 {
// Something wrong
return nil, newError(fmt.Sprintf("Rule must exist and be unique: %d rules generated", len(rules)))
}
return rules[0], nil
}
func candleToStockPrice(stockID string, c *techan.Candle, useEndTime bool) structs.StockPrice {
if c == nil {
return structs.StockPrice{}
}
timestamp := c.Period.Start.Unix()
if useEndTime {
timestamp = c.Period.End.Unix()
}
return structs.StockPrice{
StockID: stockID,
Timestamp: timestamp,
Open: int(c.OpenPrice.Float()),
Close: int(c.ClosePrice.Float()),
High: int(c.MaxPrice.Float()),
Low: int(c.MinPrice.Float()),
Volume: c.Volume.Float(),
}
}
| := subFuncArgs[subFunc]
result[subFunc] = subArgc
}
result[(* | conditional_block |
analyserUtil.go | package analyser
import (
"fmt"
"sort"
"strings"
"github.com/helloworldpark/govaluate"
"github.com/helloworldpark/tickle-stock-watcher/commons"
"github.com/helloworldpark/tickle-stock-watcher/structs"
"github.com/sdcoffey/techan"
)
// Operator Precedence
var opPrecedence = map[string]int{
"*": 6, "/": 6, "**": 6,
"+": 5, "-": 5,
"<": 4, "<=": 4, ">": 4, ">=": 4, "==": 4,
"(": 3, ")": 3,
"&&": 2, "||": 2,
}
// Indicator Map
// Function Name: Indicator Generator Function
var indicatorMap = make(map[string]indicatorGen)
// Rule Map
// Function Name: Rule Generator Function
var ruleMap = make(map[string]ruleGen)
// Error Convenience
var newError = commons.NewTaggedError("Analyser")
// Cache functions
func init() {
cacheIndicators()
cacheRules()
}
func cacheIndicators() {
// +-*/
modifierAppender := func(operator string, ctor func(lhs, rhs techan.Indicator) techan.Indicator) {
f := func(series *techan.TimeSeries, args ...interface{}) (techan.Indicator, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("[+-*/] Not enough parameters: got %d, need more or equal to 2", len(args)))
}
lhs, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] First argument must be of type techan.Indicator, you are %v", args[0]))
}
rhs, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("[+-*/] Second argument must be of type techan.Indicator, you are %v", args[1]))
}
return ctor(lhs, rhs), nil
}
indicatorMap[operator] = f
}
modifierAppender("+", newPlusIndicator)
modifierAppender("-", newMinusIndicator)
modifierAppender("*", newMultiplyIndicator)
modifierAppender("/", newDivideIndicator)
// MACD
indicatorMap["macd"] = makeMACD(false)
indicatorMap["macdhist"] = makeMACD(true)
indicatorMap["macdoscillator"] = makeMACD(true)
// RSI
indicatorMap["rsi"] = makeRSI()
// Close Price
funcClose := makeClosePrice()
indicatorMap["close"] = funcClose
indicatorMap["price"] = funcClose
indicatorMap["closeprice"] = funcClose
// Increase
indicatorMap["increase"] = makeIncrease()
// Local Extrema
indicatorMap["extrema"] = makeExtrema()
// Money Flow Index
funcMoneyFlow := makeMoneyFlowIndex()
indicatorMap["moneyflowindex"] = funcMoneyFlow
indicatorMap["moneyFlowIndex"] = funcMoneyFlow
indicatorMap["moneyflow"] = funcMoneyFlow
indicatorMap["moneyFlow"] = funcMoneyFlow
indicatorMap["mFlow"] = funcMoneyFlow
indicatorMap["mflow"] = funcMoneyFlow
// Zero
funcIsZero := makeIsZero()
indicatorMap["isZero"] = funcIsZero
indicatorMap["iszero"] = funcIsZero
indicatorMap["zero"] = funcIsZero
}
func cacheRules() {
appendRuleComparer := func(op string, ctor func(lhs, rhs techan.Rule) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Rule)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendRuleComparer("&&", techan.And)
appendRuleComparer("||", techan.Or)
appendIndicatorComparer := func(op string, ctor func(lhs, rhs techan.Indicator) techan.Rule) {
f := func(args ...interface{}) (techan.Rule, error) {
if len(args) != 2 {
return nil, newError(fmt.Sprintf("Arguments for rule '%s' must be 2, you are %d", op, len(args)))
}
r1, ok := args[0].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("First argument must be of type techan.Rule, you are %v", args[0]))
}
r2, ok := args[1].(techan.Indicator)
if !ok {
return nil, newError(fmt.Sprintf("Second argument must be of type techan.Rule, you are %v", args[1]))
}
return ctor(r1, r2), nil
}
ruleMap[op] = f
}
appendIndicatorComparer("<=", NewCrossLTEIndicatorRule)
appendIndicatorComparer("<", NewCrossLTIndicatorRule)
appendIndicatorComparer(">=", NewCrossGTEIndicatorRule)
appendIndicatorComparer(">", NewCrossGTIndicatorRule)
appendIndicatorComparer("==", NewCrossEqualIndicatorRule)
}
// Utility functions to parse strategy
func tidyTokens(tokens []token) ([]token, error) {
for i := range tokens {
t := &(tokens[i])
if t.Kind == govaluate.VARIABLE {
// Change function name to lower case
t.Value = strings.ToLower(t.Value.(string))
_, ok := indicatorMap[t.Value.(string)]
if !ok {
return nil, newError(fmt.Sprintf("Unsupported function used: %s", t.Value.(string)))
}
} else if t.Kind == govaluate.CLAUSE {
t.Value = "("
} else if t.Kind == govaluate.CLAUSE_CLOSE {
t.Value = ")"
}
}
return tokens, nil
}
func parseTokens(statement string) ([]token, error) {
return govaluate.ParseTokens(statement, nil)
}
type function struct {
t token
argc int
}
func newFunction(t token, argc int) *function {
f := function{t: t}
switch t.Kind {
case govaluate.NUMERIC, govaluate.CLAUSE, govaluate.CLAUSE_CLOSE:
f.argc = 0
case govaluate.PREFIX:
f.argc = 1
case govaluate.VARIABLE:
f.argc = argc
default:
f.argc = 2
}
return &f
}
// 재귀함수로 동작
func findFuncArgumentCount(tokens *[]token, clauses map[int]int, startIdx, endIdx int) (map[token]int, int, error) {
if startIdx == len(*tokens)-1 {
return make(map[govaluate.ExpressionToken]int), 0, nil
}
result := make(map[govaluate.ExpressionToken]int)
startedSearch := false
tokenIdx := startIdx
fcnNameIdx := -1
for tokenIdx <= endIdx {
t := (*tokens)[tokenIdx]
switch t.Kind {
case govaluate.VARIABLE:
if startedSearch {
subEndIdx := clauses[tokenIdx+1]
subFuncArgs, idxToSkip, err := findFuncArgumentCount(tokens, clauses, tokenIdx, subEndIdx)
if err != nil {
return nil, (tokenIdx + 1 - startIdx), err
}
for subFunc := range subFuncArgs {
subArgc := subFuncArgs[subFunc]
result[subFunc] = subArgc
}
result[(*tokens)[fcnNameIdx]]++
tokenIdx += idxToSkip
} else {
// 인자가 없는 경우 괄호를 생략하기도 함
// 이에 대한 예외처리
if tokenIdx < endIdx && (*tokens)[tokenIdx+1].Kind != govaluate.CLAUSE {
result[t] = 0
tokenIdx++
continue
}
startedSearch = true
fcnNameIdx = tokenIdx
result[t] = 0
tokenIdx++
}
case govaluate.NUMERIC:
if startedSearch {
result[(*tokens)[fcnNameIdx]]++
}
tokenIdx++
case govaluate.CLAUSE_CLOSE: // stop for a function, can proceed
startedSearch = false
fcnNameIdx = -1
tokenIdx++
default:
tokenIdx++
}
}
return result, tokenIdx - startIdx, nil
}
// clauseMap: true if clause, false if clauseClose
type clausePair struct {
openIdx int
closeIdx int
}
func inspectClausePairs(tokens *[]token) (closeMap map[int]*clausePair, err error) {
stack := make([]*clausePair, 0)
closeMap = make(map[int]*c | r) {
// Convert tokens into techan strategy
// Tokens are reordered by postfix notation
// operators:
// functions: 8
// -: 7(Negation)
// * /: 6
// + -: 5
// < <= == >= >: 4
// && ||: 3
// ( ): 2
postfixToken := make([]function, 0)
operatorStack := make([]*function, 0)
closeClauseMap, _ := inspectClausePairs(&tokens)
// 불필요한 괄호들은 trim한다
clauseList := make([]*clausePair, 0)
for _, v := range closeClauseMap {
clauseList = append(clauseList, v)
}
sort.Slice(clauseList, func(i, j int) bool {
return clauseList[i].openIdx < clauseList[j].openIdx
})
for _, pair := range clauseList {
dummy := false
if pair.openIdx == 0 {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.COMPARATOR {
dummy = true
} else if tokens[pair.openIdx-1].Kind == govaluate.LOGICALOP {
dummy = true
}
if dummy {
tokens = append(tokens[:pair.closeIdx], tokens[pair.closeIdx+1:]...)
tokens = append(tokens[:pair.openIdx], tokens[pair.openIdx+1:]...)
for _, subPair := range clauseList {
subPair.openIdx--
subPair.closeIdx--
if pair.closeIdx < subPair.closeIdx {
subPair.closeIdx--
}
if pair.closeIdx < subPair.openIdx {
subPair.openIdx--
}
}
}
}
closeClauseMap, _ = inspectClausePairs(&tokens)
for i := range tokens {
t := tokens[i]
switch t.Kind {
case govaluate.NUMERIC:
postfixToken = append(postfixToken, *newFunction(t, 0))
case govaluate.COMPARATOR, govaluate.LOGICALOP, govaluate.VARIABLE, govaluate.PREFIX, govaluate.MODIFIER:
p := precedenceOf(t)
for j := len(operatorStack) - 1; j >= 0; j-- {
o := operatorStack[j]
// 내 연산자 순위가 스택보다 높으면(즉, 숫자가 크면)
// 내가 들어간다
// 아니면
// 내가 스택보다 순위가 높을 때까지 애들을 다 postfixToken에 옮긴다
op := precedenceOf(o.t)
if p > op {
break
} else {
postfixToken = append(postfixToken, *o)
operatorStack = operatorStack[:j]
}
}
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE:
operatorStack = append(operatorStack, newFunction(t, 0))
case govaluate.CLAUSE_CLOSE:
for {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
if o.t.Kind == govaluate.CLAUSE {
break
} else {
postfixToken = append(postfixToken, *o)
}
}
openClauseIdx := closeClauseMap[i].openIdx
// 함수도 operator stack에서 pop하고 postfix stack으로 옮긴다
if openClauseIdx-1 >= 0 && tokens[openClauseIdx-1].Kind == govaluate.VARIABLE {
o := operatorStack[len(operatorStack)-1]
operatorStack = operatorStack[:len(operatorStack)-1]
postfixToken = append(postfixToken, *o)
}
case govaluate.SEPARATOR:
continue
default:
return nil, newError(fmt.Sprintf("Invalid token: %v", t))
}
}
for j := len(operatorStack) - 1; j >= 0; j-- {
if operatorStack[j].t.Kind != govaluate.CLAUSE && operatorStack[j].t.Kind != govaluate.CLAUSE_CLOSE {
postfixToken = append(postfixToken, *operatorStack[j])
}
operatorStack = operatorStack[:j]
}
// 함수 인자의 수를 넣어준다
openCloseClauseMap := make(map[int]int)
for _, v := range closeClauseMap {
openCloseClauseMap[v.openIdx] = v.closeIdx
}
funcArgcMap, _, _ := findFuncArgumentCount(&tokens, openCloseClauseMap, 0, len(tokens)-1)
for idx := range postfixToken {
argc, funcExists := funcArgcMap[postfixToken[idx].t]
if funcExists {
postfixToken[idx].argc = argc
}
}
return postfixToken, nil
}
func precedenceOf(t token) int {
if t.Kind == govaluate.VARIABLE {
return 8
}
if t.Kind == govaluate.PREFIX {
return 7
}
return opPrecedence[t.Value.(string)]
}
func (a *Analyser) createRule(fcns []function) (techan.Rule, error) {
indicators := make([]interface{}, 0)
rules := make([]techan.Rule, 0)
for len(fcns) > 0 {
f := fcns[0]
fcns = fcns[1:]
switch f.t.Kind {
case govaluate.NUMERIC:
indicators = append(indicators, f.t.Value.(float64))
case govaluate.VARIABLE:
// 함수를 구성한다
// 인자를 슬라이스에 담고
// indicator를 만든다
if len(indicators) < f.argc {
return nil, newError("Invalid syntax")
}
args := indicators[len(indicators)-f.argc:]
indicators = indicators[:len(indicators)-f.argc]
gen, ok := indicatorMap[f.t.Value.(string)]
if !ok {
return nil, newError("Not implemented function")
}
indicator, err := gen(a.timeSeries, args...)
if err != nil {
return nil, err
}
indicators = append(indicators, indicator)
case govaluate.PREFIX:
v := indicators[len(indicators)-1]
indicators = indicators[:(len(indicators) - 1)]
indi, ok := v.(techan.Indicator)
if ok {
indicators = append(indicators, newNegateIndicator(indi))
} else {
indicators = append(indicators, newNegateIndicatorFromFloat(v.(float64)))
}
case govaluate.COMPARATOR:
if len(indicators) < 2 {
return nil, newError(fmt.Sprintf("Cannot compose a comparing rule with %d indicators", len(indicators)))
}
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
rule, err := ruleMaker(lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.LOGICALOP:
rhs := rules[len(rules)-1]
lhs := rules[len(rules)-2]
rules = rules[:(len(rules) - 2)]
ruleMaker := ruleMap[f.t.Value.(string)]
rule, err := ruleMaker(lhs, rhs)
if err != nil {
return nil, err
}
rules = append(rules, rule)
case govaluate.MODIFIER:
rhs := indicators[len(indicators)-1]
lhs := indicators[len(indicators)-2]
indicators = indicators[:(len(indicators) - 2)]
rhsIndicator, ok := rhs.(techan.Indicator)
if !ok {
rhsIndicator = techan.NewConstantIndicator(rhs.(float64))
}
lhsIndicator, ok := lhs.(techan.Indicator)
if !ok {
lhsIndicator = techan.NewConstantIndicator(lhs.(float64))
}
operated, err := indicatorMap[f.t.Value.(string)](nil, lhsIndicator, rhsIndicator)
if err != nil {
return nil, err
}
indicators = append(indicators, operated)
}
}
if len(rules) != 1 {
// Something wrong
return nil, newError(fmt.Sprintf("Rule must exist and be unique: %d rules generated", len(rules)))
}
return rules[0], nil
}
func candleToStockPrice(stockID string, c *techan.Candle, useEndTime bool) structs.StockPrice {
if c == nil {
return structs.StockPrice{}
}
timestamp := c.Period.Start.Unix()
if useEndTime {
timestamp = c.Period.End.Unix()
}
return structs.StockPrice{
StockID: stockID,
Timestamp: timestamp,
Open: int(c.OpenPrice.Float()),
Close: int(c.ClosePrice.Float()),
High: int(c.MaxPrice.Float()),
Low: int(c.MinPrice.Float()),
Volume: c.Volume.Float(),
}
}
| lausePair)
err = nil
for idx, tok := range *tokens {
if tok.Kind == govaluate.CLAUSE {
stack = append(stack, &clausePair{idx, -1})
} else if tok.Kind == govaluate.CLAUSE_CLOSE {
if len(stack) == 0 {
return nil, newError("Invalid pairing of clauses: Pairs do not match.")
}
popped := stack[len(stack)-1]
stack = stack[:len(stack)-1]
popped.closeIdx = idx
closeMap[idx] = popped
}
}
if len(stack) > 0 {
return nil, newError(fmt.Sprintf("Invalid pairing of clauses: Some clauses are left(%v)", tokens))
}
return closeMap, err
}
func reorderTokenByPostfix(tokens []token) ([]function, erro | identifier_body |
index.umd.js | (function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else if(typeof exports === 'object')
exports["index"] = factory();
else
root["index"] = factory();
})((typeof self !== 'undefined' ? self : this), function() {
return /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = "fae3");
/******/ })
/************************************************************************/
/******/ ({
/***/ "f6fd":
/***/ (function(module, exports) {
// document.currentScript polyfill by Adam Miller
// MIT license
(function(document){
var currentScript = "currentScript",
scripts = document.getElementsByTagName('script'); // Live NodeList collection
// If browser needs currentScript polyfill, add get currentScript() to the document object
if (!(currentScript in document)) {
Object.defineProperty(document, currentScript, {
get: function(){
// IE 6-10 supports script readyState
// IE 10+ support stack trace
try { throw new Error(); }
catch (err) {
// Find the second match for the "at" string to get file src url from stack.
// Specifically works with the format of stack traces in IE.
var i, res = ((/.*at [^\(]*\((.*):.+:.+\)$/ig).exec(err.stack) || [false])[1];
// For all scripts on the page, if src matches or if ready state is interactive, return the script tag
for(i in scripts){
if(scripts[i].src == res || scripts[i].readyState == "interactive"){
return scripts[i];
}
}
// If no match, return null
return null;
}
}
});
}
})(document);
/***/ }),
/***/ "fae3":
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
// CONCATENATED MODULE: ./node_modules/@vue/cli-service/lib/commands/build/setPublicPath.js
// This file is imported into lib/wc client bundles.
if (typeof window !== 'undefined') {
if (true) {
__webpack_require__("f6fd")
}
var i
if ((i = window.document.currentScript) && (i = i.src.match(/(.+\/)[^/]+\.js(\?.*)?$/))) {
__webpack_require__.p = i[1] // eslint-disable-line
}
}
// Indicate to webpack that this file can be concatenated
/* harmony default export */ var setPublicPath = (null);
// CONCATENATED MODULE: ./src/js/Resource.js
class Resource {
constructor(url, owner) {
this.url = url;
this.owner = owner ? owner : {
"$set"(target, attribute, value) {
target[attribute] = value;
}
};
}
array(params = {}, index = []) {
this.index(params, index);
return index;
}
row(dataOrId = null, params = {}, record = {}) {
if (dataOrId instanceof Object) {
this.load(null, dataOrId, params);
return params;
} else {
this.load(dataOrId, params, record);
return record;
}
}
index(params = {}, index = null) {
return this.get(params, index, this.url).then(response => response.data);
}
load(id = null, params = {}, record = null) {
return this.get(params, record, id ? `${this.url}/${id}` : this.url).then(response => response.data.data);
}
refresh(record, params = {}, initial = []) {
return record instanceof Array ? this.index(params, record.splice(0, record.length, ...(initial || [])) && record).then(data => data.data) : this.load(record.id, params, record);
}
get(params = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "get",
params
});
}
post(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "post",
data: {
data
}
});
}
/**
* Usage:
* this.call(id, 'method', {param1:value, param2:value}, response)
* this.call(id, 'method', {param1:value, param2:value})
* this.call(id, 'method')
* this.call('method', {param1:value, param2:value})
* this.call('method')
*
* @param {*} id
* @param {*} method
* @param {*} parameters
* @param {*} result
*/
call(id, method = {}, parameters = {}, result = null) {
if (typeof id === 'string' && method instanceof Object) {
result = parameters;
parameters = method;
method = id;
id = null;
}
return this.axios(result, {
url: this.url + (id ? '/' + id : ''),
method: "post",
data: {
call: {
method,
parameters
}
}
}).then(response => {
result instanceof Array ? result.push(...response.data.response) : result instanceof Object ? this.assign(result, response.data.response) : null;
return response.data.response;
});
}
rowCall(id, method = {}, parameters = {}, response = {}) {
if (typeof id === 'string' && method instanceof Object) {
this.call(id, method, parameters, response);
return parameters;
} else {
this.call(id, method, parameters, response);
return response;
}
}
arrayCall(id, method = {}, parameters = [], response = []) {
if (typeof id === 'string' && method instanceof Object) {
this.call(id, method, parameters, response);
return parameters;
} else {
this.call(id, method, parameters, response);
return response;
}
}
put(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "put",
data: {
data
}
});
}
patch(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "patch",
data: {
data
}
});
}
save(data = {}, response = null) {
return this.put(data, response, `${this.url}/${data.id}`);
}
delete(dataOrId = null, response = null) {
return this.axios(response, {
url: dataOrId ? this.url + '/' + (isNaN(dataOrId) ? dataOrId.id : dataOrId) : this.url,
method: "delete"
});
}
assign(target, source) {
Object.keys(source).forEach(attribute => {
this.owner.$set(target, attribute, source[attribute]);
});
}
axios(result, params) {
return window.axios(params).then(response => {
response.data.data ? result instanceof Array ? result.push(...response.data.data) : result instanceof Object ? this.assign(result, response.data.data) : null : null;
return response;
});
}
}
/* harmony default export */ var js_Resource = (Resource);
// CONCATENATED MODULE: ./src/js/ResourceMixin.js
/**
* Usage:
*
* {
* data() {
* return {
* apiIndex: {
* users:
* }
* };
* }
* }
*
* this.$api.user[1].row() User with id=1
* this.$api.user[1].roleObject.row() Role object of User with id=1
* this.$api.user[1].roleObject.users.array() Users of RoleObject of User with id=1
*/
// Reserved names (by Vue/debugger)
const reserved = ['_isVue', '_vm', 'toJSON', 'state', 'render'];
const ResourceHandler = {
get(resource, index) {
if (typeof index === 'symbol' || reserved.includes(index)) {
return undefined;
}
if (resource[index] !== undefined) {
return resource[index];
}
return buildResource(index, resource, resource.owner);
}
};
function | (index, base = null, owner = null) {
const url = base ? `${base.url}/${index}` : index;
return new Proxy(new js_Resource(url, owner), ResourceHandler);
}
/* harmony default export */ var ResourceMixin = ({
beforeCreate() {
const owner = this;
this.$api = new Proxy({}, {
get(resources, name) {
return resources[name] ? resources[name] : resources[name] = buildResource(name, null, owner);
}
});
},
data() {
return {
apiPrevIndex: {},
apiIsRunning: false
};
},
watch: {
apiIndex: {
handler(apiIndex) {
for (let data in apiIndex) {
let jParams = JSON.stringify(apiIndex[data]);
if (jParams !== JSON.stringify(this.apiPrevIndex[data] === undefined ? null : this.apiPrevIndex[data])) {
let params = JSON.parse(jParams);
let api = params.$api ? params.$api : data;
let call = params.$call ? params.$call : null;
let id = params.$id ? params.$id : null;
delete params.$api;
delete params.$call;
delete params.$id;
this.apiIsRunning = true;
(call ? this.$api[api].call(id, call, params).then(response => window._.set(this, data, response)) : this.$api[api].refresh(window._.get(this, data), params)).then(response => {
this.apiIsRunning = false;
return response;
});
}
}
this.apiPrevIndex = JSON.parse(JSON.stringify(apiIndex === undefined ? {} : apiIndex));
},
deep: true,
immediate: true
}
}
});
// CONCATENATED MODULE: ./src/index.js
window.Resource = js_Resource;
window.ResourceMixin = ResourceMixin;
// CONCATENATED MODULE: ./node_modules/@vue/cli-service/lib/commands/build/entry-lib-no-default.js
/***/ })
/******/ });
});
//# sourceMappingURL=index.umd.js.map | buildResource | identifier_name |
index.umd.js | (function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else if(typeof exports === 'object')
exports["index"] = factory();
else
root["index"] = factory();
})((typeof self !== 'undefined' ? self : this), function() {
return /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = "fae3");
/******/ })
/************************************************************************/
/******/ ({
/***/ "f6fd":
/***/ (function(module, exports) {
// document.currentScript polyfill by Adam Miller
// MIT license
(function(document){
var currentScript = "currentScript",
scripts = document.getElementsByTagName('script'); // Live NodeList collection
// If browser needs currentScript polyfill, add get currentScript() to the document object
if (!(currentScript in document)) {
Object.defineProperty(document, currentScript, {
get: function(){
// IE 6-10 supports script readyState
// IE 10+ support stack trace
try { throw new Error(); }
catch (err) {
// Find the second match for the "at" string to get file src url from stack.
// Specifically works with the format of stack traces in IE.
var i, res = ((/.*at [^\(]*\((.*):.+:.+\)$/ig).exec(err.stack) || [false])[1];
// For all scripts on the page, if src matches or if ready state is interactive, return the script tag
for(i in scripts){
if(scripts[i].src == res || scripts[i].readyState == "interactive"){
return scripts[i];
}
}
// If no match, return null
return null;
}
}
});
}
})(document);
/***/ }),
/***/ "fae3":
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
// CONCATENATED MODULE: ./node_modules/@vue/cli-service/lib/commands/build/setPublicPath.js
// This file is imported into lib/wc client bundles.
if (typeof window !== 'undefined') {
if (true) {
__webpack_require__("f6fd")
}
var i
if ((i = window.document.currentScript) && (i = i.src.match(/(.+\/)[^/]+\.js(\?.*)?$/))) {
__webpack_require__.p = i[1] // eslint-disable-line
}
}
// Indicate to webpack that this file can be concatenated
/* harmony default export */ var setPublicPath = (null);
// CONCATENATED MODULE: ./src/js/Resource.js
class Resource {
constructor(url, owner) {
this.url = url;
this.owner = owner ? owner : {
"$set"(target, attribute, value) {
target[attribute] = value;
}
};
}
array(params = {}, index = []) {
this.index(params, index);
return index;
}
row(dataOrId = null, params = {}, record = {}) {
if (dataOrId instanceof Object) {
this.load(null, dataOrId, params);
return params;
} else {
this.load(dataOrId, params, record);
return record;
}
}
index(params = {}, index = null) {
return this.get(params, index, this.url).then(response => response.data);
}
load(id = null, params = {}, record = null) {
return this.get(params, record, id ? `${this.url}/${id}` : this.url).then(response => response.data.data);
}
refresh(record, params = {}, initial = []) {
return record instanceof Array ? this.index(params, record.splice(0, record.length, ...(initial || [])) && record).then(data => data.data) : this.load(record.id, params, record);
}
get(params = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "get",
params
});
}
post(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "post",
data: {
data
}
});
}
/**
* Usage:
* this.call(id, 'method', {param1:value, param2:value}, response)
* this.call(id, 'method', {param1:value, param2:value})
* this.call(id, 'method')
* this.call('method', {param1:value, param2:value})
* this.call('method')
*
* @param {*} id
* @param {*} method
* @param {*} parameters
* @param {*} result
*/
| if (typeof id === 'string' && method instanceof Object) {
result = parameters;
parameters = method;
method = id;
id = null;
}
return this.axios(result, {
url: this.url + (id ? '/' + id : ''),
method: "post",
data: {
call: {
method,
parameters
}
}
}).then(response => {
result instanceof Array ? result.push(...response.data.response) : result instanceof Object ? this.assign(result, response.data.response) : null;
return response.data.response;
});
}
rowCall(id, method = {}, parameters = {}, response = {}) {
if (typeof id === 'string' && method instanceof Object) {
this.call(id, method, parameters, response);
return parameters;
} else {
this.call(id, method, parameters, response);
return response;
}
}
arrayCall(id, method = {}, parameters = [], response = []) {
if (typeof id === 'string' && method instanceof Object) {
this.call(id, method, parameters, response);
return parameters;
} else {
this.call(id, method, parameters, response);
return response;
}
}
put(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "put",
data: {
data
}
});
}
patch(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "patch",
data: {
data
}
});
}
save(data = {}, response = null) {
return this.put(data, response, `${this.url}/${data.id}`);
}
delete(dataOrId = null, response = null) {
return this.axios(response, {
url: dataOrId ? this.url + '/' + (isNaN(dataOrId) ? dataOrId.id : dataOrId) : this.url,
method: "delete"
});
}
assign(target, source) {
Object.keys(source).forEach(attribute => {
this.owner.$set(target, attribute, source[attribute]);
});
}
axios(result, params) {
return window.axios(params).then(response => {
response.data.data ? result instanceof Array ? result.push(...response.data.data) : result instanceof Object ? this.assign(result, response.data.data) : null : null;
return response;
});
}
}
/* harmony default export */ var js_Resource = (Resource);
// CONCATENATED MODULE: ./src/js/ResourceMixin.js
/**
* Usage:
*
* {
* data() {
* return {
* apiIndex: {
* users:
* }
* };
* }
* }
*
* this.$api.user[1].row() User with id=1
* this.$api.user[1].roleObject.row() Role object of User with id=1
* this.$api.user[1].roleObject.users.array() Users of RoleObject of User with id=1
*/
// Reserved names (by Vue/debugger)
const reserved = ['_isVue', '_vm', 'toJSON', 'state', 'render'];
const ResourceHandler = {
get(resource, index) {
if (typeof index === 'symbol' || reserved.includes(index)) {
return undefined;
}
if (resource[index] !== undefined) {
return resource[index];
}
return buildResource(index, resource, resource.owner);
}
};
function buildResource(index, base = null, owner = null) {
const url = base ? `${base.url}/${index}` : index;
return new Proxy(new js_Resource(url, owner), ResourceHandler);
}
/* harmony default export */ var ResourceMixin = ({
beforeCreate() {
const owner = this;
this.$api = new Proxy({}, {
get(resources, name) {
return resources[name] ? resources[name] : resources[name] = buildResource(name, null, owner);
}
});
},
data() {
return {
apiPrevIndex: {},
apiIsRunning: false
};
},
watch: {
apiIndex: {
handler(apiIndex) {
for (let data in apiIndex) {
let jParams = JSON.stringify(apiIndex[data]);
if (jParams !== JSON.stringify(this.apiPrevIndex[data] === undefined ? null : this.apiPrevIndex[data])) {
let params = JSON.parse(jParams);
let api = params.$api ? params.$api : data;
let call = params.$call ? params.$call : null;
let id = params.$id ? params.$id : null;
delete params.$api;
delete params.$call;
delete params.$id;
this.apiIsRunning = true;
(call ? this.$api[api].call(id, call, params).then(response => window._.set(this, data, response)) : this.$api[api].refresh(window._.get(this, data), params)).then(response => {
this.apiIsRunning = false;
return response;
});
}
}
this.apiPrevIndex = JSON.parse(JSON.stringify(apiIndex === undefined ? {} : apiIndex));
},
deep: true,
immediate: true
}
}
});
// CONCATENATED MODULE: ./src/index.js
window.Resource = js_Resource;
window.ResourceMixin = ResourceMixin;
// CONCATENATED MODULE: ./node_modules/@vue/cli-service/lib/commands/build/entry-lib-no-default.js
/***/ })
/******/ });
});
//# sourceMappingURL=index.umd.js.map | call(id, method = {}, parameters = {}, result = null) { | random_line_split |
index.umd.js | (function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else if(typeof exports === 'object')
exports["index"] = factory();
else
root["index"] = factory();
})((typeof self !== 'undefined' ? self : this), function() {
return /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = "fae3");
/******/ })
/************************************************************************/
/******/ ({
/***/ "f6fd":
/***/ (function(module, exports) {
// document.currentScript polyfill by Adam Miller
// MIT license
(function(document){
var currentScript = "currentScript",
scripts = document.getElementsByTagName('script'); // Live NodeList collection
// If browser needs currentScript polyfill, add get currentScript() to the document object
if (!(currentScript in document)) {
Object.defineProperty(document, currentScript, {
get: function(){
// IE 6-10 supports script readyState
// IE 10+ support stack trace
try { throw new Error(); }
catch (err) {
// Find the second match for the "at" string to get file src url from stack.
// Specifically works with the format of stack traces in IE.
var i, res = ((/.*at [^\(]*\((.*):.+:.+\)$/ig).exec(err.stack) || [false])[1];
// For all scripts on the page, if src matches or if ready state is interactive, return the script tag
for(i in scripts){
if(scripts[i].src == res || scripts[i].readyState == "interactive"){
return scripts[i];
}
}
// If no match, return null
return null;
}
}
});
}
})(document);
/***/ }),
/***/ "fae3":
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
// CONCATENATED MODULE: ./node_modules/@vue/cli-service/lib/commands/build/setPublicPath.js
// This file is imported into lib/wc client bundles.
if (typeof window !== 'undefined') {
if (true) {
__webpack_require__("f6fd")
}
var i
if ((i = window.document.currentScript) && (i = i.src.match(/(.+\/)[^/]+\.js(\?.*)?$/))) {
__webpack_require__.p = i[1] // eslint-disable-line
}
}
// Indicate to webpack that this file can be concatenated
/* harmony default export */ var setPublicPath = (null);
// CONCATENATED MODULE: ./src/js/Resource.js
class Resource {
constructor(url, owner) {
this.url = url;
this.owner = owner ? owner : {
"$set"(target, attribute, value) {
target[attribute] = value;
}
};
}
array(params = {}, index = []) {
this.index(params, index);
return index;
}
row(dataOrId = null, params = {}, record = {}) {
if (dataOrId instanceof Object) {
this.load(null, dataOrId, params);
return params;
} else {
this.load(dataOrId, params, record);
return record;
}
}
index(params = {}, index = null) {
return this.get(params, index, this.url).then(response => response.data);
}
load(id = null, params = {}, record = null) {
return this.get(params, record, id ? `${this.url}/${id}` : this.url).then(response => response.data.data);
}
refresh(record, params = {}, initial = []) |
get(params = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "get",
params
});
}
post(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "post",
data: {
data
}
});
}
/**
* Usage:
* this.call(id, 'method', {param1:value, param2:value}, response)
* this.call(id, 'method', {param1:value, param2:value})
* this.call(id, 'method')
* this.call('method', {param1:value, param2:value})
* this.call('method')
*
* @param {*} id
* @param {*} method
* @param {*} parameters
* @param {*} result
*/
call(id, method = {}, parameters = {}, result = null) {
if (typeof id === 'string' && method instanceof Object) {
result = parameters;
parameters = method;
method = id;
id = null;
}
return this.axios(result, {
url: this.url + (id ? '/' + id : ''),
method: "post",
data: {
call: {
method,
parameters
}
}
}).then(response => {
result instanceof Array ? result.push(...response.data.response) : result instanceof Object ? this.assign(result, response.data.response) : null;
return response.data.response;
});
}
rowCall(id, method = {}, parameters = {}, response = {}) {
if (typeof id === 'string' && method instanceof Object) {
this.call(id, method, parameters, response);
return parameters;
} else {
this.call(id, method, parameters, response);
return response;
}
}
arrayCall(id, method = {}, parameters = [], response = []) {
if (typeof id === 'string' && method instanceof Object) {
this.call(id, method, parameters, response);
return parameters;
} else {
this.call(id, method, parameters, response);
return response;
}
}
put(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "put",
data: {
data
}
});
}
patch(data = {}, response = null, url = this.url) {
return this.axios(response, {
url,
method: "patch",
data: {
data
}
});
}
save(data = {}, response = null) {
return this.put(data, response, `${this.url}/${data.id}`);
}
delete(dataOrId = null, response = null) {
return this.axios(response, {
url: dataOrId ? this.url + '/' + (isNaN(dataOrId) ? dataOrId.id : dataOrId) : this.url,
method: "delete"
});
}
assign(target, source) {
Object.keys(source).forEach(attribute => {
this.owner.$set(target, attribute, source[attribute]);
});
}
axios(result, params) {
return window.axios(params).then(response => {
response.data.data ? result instanceof Array ? result.push(...response.data.data) : result instanceof Object ? this.assign(result, response.data.data) : null : null;
return response;
});
}
}
/* harmony default export */ var js_Resource = (Resource);
// CONCATENATED MODULE: ./src/js/ResourceMixin.js
/**
* Usage:
*
* {
* data() {
* return {
* apiIndex: {
* users:
* }
* };
* }
* }
*
* this.$api.user[1].row() User with id=1
* this.$api.user[1].roleObject.row() Role object of User with id=1
* this.$api.user[1].roleObject.users.array() Users of RoleObject of User with id=1
*/
// Reserved names (by Vue/debugger)
const reserved = ['_isVue', '_vm', 'toJSON', 'state', 'render'];
const ResourceHandler = {
get(resource, index) {
if (typeof index === 'symbol' || reserved.includes(index)) {
return undefined;
}
if (resource[index] !== undefined) {
return resource[index];
}
return buildResource(index, resource, resource.owner);
}
};
function buildResource(index, base = null, owner = null) {
const url = base ? `${base.url}/${index}` : index;
return new Proxy(new js_Resource(url, owner), ResourceHandler);
}
/* harmony default export */ var ResourceMixin = ({
beforeCreate() {
const owner = this;
this.$api = new Proxy({}, {
get(resources, name) {
return resources[name] ? resources[name] : resources[name] = buildResource(name, null, owner);
}
});
},
data() {
return {
apiPrevIndex: {},
apiIsRunning: false
};
},
watch: {
apiIndex: {
handler(apiIndex) {
for (let data in apiIndex) {
let jParams = JSON.stringify(apiIndex[data]);
if (jParams !== JSON.stringify(this.apiPrevIndex[data] === undefined ? null : this.apiPrevIndex[data])) {
let params = JSON.parse(jParams);
let api = params.$api ? params.$api : data;
let call = params.$call ? params.$call : null;
let id = params.$id ? params.$id : null;
delete params.$api;
delete params.$call;
delete params.$id;
this.apiIsRunning = true;
(call ? this.$api[api].call(id, call, params).then(response => window._.set(this, data, response)) : this.$api[api].refresh(window._.get(this, data), params)).then(response => {
this.apiIsRunning = false;
return response;
});
}
}
this.apiPrevIndex = JSON.parse(JSON.stringify(apiIndex === undefined ? {} : apiIndex));
},
deep: true,
immediate: true
}
}
});
// CONCATENATED MODULE: ./src/index.js
window.Resource = js_Resource;
window.ResourceMixin = ResourceMixin;
// CONCATENATED MODULE: ./node_modules/@vue/cli-service/lib/commands/build/entry-lib-no-default.js
/***/ })
/******/ });
});
//# sourceMappingURL=index.umd.js.map | {
return record instanceof Array ? this.index(params, record.splice(0, record.length, ...(initial || [])) && record).then(data => data.data) : this.load(record.id, params, record);
} | identifier_body |
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn | (&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send + 'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if !self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
}
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() {
let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
}
#[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
}
| shutdown_priv | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.