index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
548 | EricHughesABC/T2EPGviewer | refs/heads/master | /simple_pandas_plot.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 10:29:38 2017
@author: neh69
"""
import os
import sys
import numpy as np
import pandas as pd
import lmfit as lm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from PyQt5 import QtCore, QtWidgets
import visionplot_widgets
import mriplotwidget
from ImageData import T2imageData
def openStudyDir():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getExistingDirectory(None, "Study Directory", "")
print("openStudyDir\n",returned_data, type(returned_data))
# tree_window.setRootIndex(tree_window.model.index(returned_data))
def openNiftiAnalyzeFile():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getOpenFileName(None, "MRI data nifti/analyze", procDataDirPath, "nii files (*.nii);;analyze files (*.img);;All files (*)")
print(returned_data)
def getH5file():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getOpenFileName(None, "select results file", procDataDirPath, "CSV files (*.csv);;All files (*)")
pathandfilename = returned_data[0]
#self.hd5_store = pd.HDFStore(pathandfilename)
if len(pathandfilename) > 0:
### attempt to extract details from data
print(pathandfilename)
imageData.readin_alldata_from_results_filename( os.path.abspath(pathandfilename))
if imageData.read_T2_img_hdr_files():
print("just before read_T2_data()")
if imageData.read_T2_data():
imageData.read_Dixon_data()
print("just after read_T2_data()")
mainWindow.setWindowTitle(imageData.T2resultsFilenameAndPath)
#### Update image displayed in window
imageData.overlayRoisOnImage(0, imageData.fittingParam)
# mri_window.update_plot(imageData.img1)
mri_window.update_plot(imageData.mriSliceIMG, imageData.maskedROIs)
print("type(imageData.ImageDataT2)",type(imageData.ImageDataT2))
hist_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m")
bar_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m")
#### set min max on sliders
mri_window.slicesSlider.setMinimum(0)
mri_window.slicesSlider.setMaximum(imageData.numSlicesT2-1)
mri_window.slicesSlider.setValue(0)
mri_window.echoesSlider.setMinimum(0)
mri_window.echoesSlider.setMaximum(imageData.numEchoesT2-1)
mri_window.slicesSlider.setValue(0)
else:
print(imageData.t2_image_hdr_pathfilename, " not found")
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
if __name__ == "__main__":
lmparams = {}
epgt2fitparams = lm.Parameters()
azzt2fitparams = lm.Parameters()
epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False)
epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True )
epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True )
epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True )
epgt2fitparams.add('T1fat', value = 365.0, vary=False)
epgt2fitparams.add('T1muscle', value = 1400, vary=False)
epgt2fitparams.add('echo', value = 10.0, vary=False)
epgt2fitparams.add('B1scale', value = 1.0, min=0, max=2, vary=True )
azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None),
('Amuscle', 40.0, True, 0, 250, None),
('T2muscle', 40.0, True, 0, 100, None),
('c_l', 0.55, False, 0, 2000, None),
('c_s', 0.45, False, 0, 2000, None),
('t2_fl', 250.0, False, 0, 2000, None),
('t2_fs', 43.0, False, 0, 2000, None),
('echo', 10.0, False, 0, 2000, None))
lmparams['epgt2fitparams'] = epgt2fitparams
lmparams['azzt2fitparams'] = azzt2fitparams
params=azzt2fitparams
matplotlib.use('Qt5Agg')
plt.style.context('seaborn-colorblind')
sns.set(font_scale = 0.6)
# sns.set_palette("pastel")
procDataDirPath = r"/home/eric/Documents/projects/programming/2019/mri_progs/T2EPGviewer/studyData/testStudy/HC-001/sess-1/upperleg/T2/results/muscle/AzzEPG"
progname = os.path.basename(sys.argv[0])
qApp = QtWidgets.QApplication(sys.argv)
imageData = T2imageData()
print("imageData.fittingParam:",imageData.fittingParam)
mainWindow = QtWidgets.QMainWindow()
mainWindow.setAttribute(QtCore.Qt.WA_DeleteOnClose)
mainWindow.setWindowTitle("application main window")
file_menu = QtWidgets.QMenu('&File', mainWindow)
# file_menu.addAction("&Open study Directory", openStudyDir)
file_menu.addAction('&Choose Study Results File', getH5file, QtCore.Qt.CTRL + QtCore.Qt.Key_H)
# file_menu.addAction('&Open nifti/analyze image File', openNiftiAnalyzeFile )
# file_menu.addAction('&Choose Rois', imageData.getRoiFiles, QtCore.Qt.CTRL + QtCore.Qt.Key_R)
# file_menu.addAction('&Quit', fileQuit, QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
mainWindow.menuBar().addMenu(file_menu)
main_widget = QtWidgets.QWidget(mainWindow)
mainlayout = QtWidgets.QHBoxLayout(main_widget)
# mainWindow.setCentralWidget(main_widget)
# plot_window1 = mri_widget(main_widget)
npts = 256*100
iii = np.random.permutation(np.arange(255*255))[:npts]
ddd = np.random.randn(npts)*100+500
data_df = pd.DataFrame({'iii': iii, 'ddd':ddd})
leftwindow = QtWidgets.QWidget()
rightwindow = QtWidgets.QWidget()
splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
hlayout = QtWidgets.QHBoxLayout(leftwindow)
vlayout = QtWidgets.QVBoxLayout(rightwindow)
mri_window = mriplotwidget.MRIPlotWidget( imageData=imageData)
rbtns_window = visionplot_widgets.radiobuttons_fitWidget(mri_window=mri_window)
t2plot_window = visionplot_widgets.T2PlotWidget( lmparams, showToolbar=False)
bar_window = visionplot_widgets.BarPlotWidget( showToolbar=False, data_df=data_df, image_size=256)
hist_window = visionplot_widgets.HistogramPlotWidget( mri_plot=mri_window, showToolbar=True,data_df=data_df, image_size=256)
mainlayout.addWidget(splitHwidget)
hlayout.addWidget(rbtns_window)
hlayout.addWidget(mri_window)
vlayout.addWidget(t2plot_window)
vlayout.addWidget(bar_window)
vlayout.addWidget(hist_window)
splitHwidget.addWidget(leftwindow)
splitHwidget.addWidget(rightwindow )
mri_window.register_PlotWidgets(t2plot_window, bar_window, hist_window, rbtns_window)
main_widget.setFocus()
mainWindow.setCentralWidget(main_widget)
mainWindow.show()
sys.exit(qApp.exec_())
| {"/visionplot_widgets.py": ["/t2fit.py", "/ImageData.py", "/epgT2paramsDialog.py", "/azzT2paramsDialog.py"], "/simple_pandas_plot.py": ["/visionplot_widgets.py", "/mriplotwidget.py", "/ImageData.py"]} |
549 | EricHughesABC/T2EPGviewer | refs/heads/master | /ImageData.py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 6 14:55:05 2018
@author: ERIC
"""
import os
import numpy as np
import pandas as pd
import nibabel
class T2imageData():
def __init__(self):
self.currentSlice = None
self.currentEcho = None
self.T2imagesDirpath = None
self.dixonImagesDirpath = None
self.dixonResultsDirpath = None
self.T2resultsDirpath = None
self.root = None
self.studyName = None
self.subject = None
self.session = None
self.imagedRegion = None
self.protocol = None
self.results = None
self.roiType = None
self.fitModel = None
self.imagedRegionType = self.roiType
self.T2imageType = None
self.T2MRIimageFilenameAndPath = ""
self.dixonImageType = None
self.dixonMRIimageFilenameAndPath = ""
self.T2resultsFilenameAndPath = ""
self.dixonResultsFilenameAndPath = ""
self.fittingParam = "T2m"
self.numRowsT2 = None
self.numColsT2 = None
self.numSlicesT2 = None
self.numEchoesT2 = None
self.dixonSlices = None
self.T2slices = None
self.ImageDataT2 = None
self.mriSliceIMG = None
self.t2_data_summary_df = None
self.dixon_data_summary_df = None
def readin_alldata_from_results_filename(self, fn):
print("inside readin_alldata_from_results_filename")
self.set_dataDir_and_results_filenames(fn)
self.set_T2imageData_filename_and_type()
self.set_dixonImageData_filename_and_type()
print("T2resultsDirpath :: ",self.T2resultsDirpath)
print("dixonResultsDirpath :: ", self.dixonResultsDirpath)
print("T2imagesDirpath :: ", self.T2imagesDirpath)
print("dixonImagesDirpath :: ", self.dixonImagesDirpath)
print("T2imageType :: ", self.T2imageType)
print("T2MRIimageFilenameAndPath :: ", self.T2MRIimageFilenameAndPath)
print("dixonImageType :: ", self.dixonImageType)
print("dixonMRIimageFilenameAndPath ::", self.dixonMRIimageFilenameAndPath)
print("T2resultsFilenameAndPath :: ", self.T2resultsFilenameAndPath)
print("dixonResultsFilenameAndPath :: ", self.dixonResultsFilenameAndPath)
def set_T2imageData_filename_and_type(self):
"""Searches for image data in directory
can be nifti or analyze sets the type and filename"""
print("inside set_T2imageData_filename_and_type")
print("self.T2imagesDirpath", self.T2imagesDirpath)
if self.T2imagesDirpath == None:
self.T2imageType = None
return False
else:
imgFilenameList = [ os.path.join(self.T2imagesDirpath,fn)
for fn in os.listdir(self.T2imagesDirpath)
if "nii" in fn or "img" in fn]
if len(imgFilenameList) == 0:
self.T2imageType = None
self.T2MRIimageFilenameAndPath = None
return False
else:
self.T2MRIimageFilenameAndPath = imgFilenameList[0]
if "nii" in self.T2MRIimageFilenameAndPath:
self.T2imageType = "nifti"
else:
self.T2imageType = "analyze"
return True
def set_dixonImageData_filename_and_type(self):
"""Searches for image data in directory
can be nifti or analyze sets the type and filename
filename must have fatPC. in it"""
print( "inside set_dixonImageData_filename_and_type")
print("self.dixonImagesDirpath",self.dixonImagesDirpath)
if self.dixonImagesDirpath == None:
self.dionImageType = None
return False
else:
imgFilenameList = [ os.path.join(self.dixonImagesDirpath,fn)
for fn in os.listdir(self.dixonImagesDirpath)
if "fatPC." in fn and ("nii" in fn or "img" in fn)]
if len(imgFilenameList) == 0:
self.dixonImageType = None
self.dixonMRIimageFilenameAndPath = None
return False
else:
self.dixonMRIimageFilenameAndPath = imgFilenameList[0]
if "nii" in self.dixonMRIimageFilenameAndPath:
self.dixonImageType = "nifti"
else:
self.dixonImageType = "analyze"
return True
def set_results_dir(self,protocol, resultsDir):
resultsDirpath = None
# resultsDirpath1 = resultsDir
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,protocol, self.results,self.roiType,self.fitModel)
if os.path.exists(dirpath):
resultsDirpath = dirpath
else:
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,protocol, self.results,self.roiType)
if os.path.exists(dirpath):
fitModels = [f for f in os.listdir(dirpath)]
if len(fitModels)> 0:
resultsDirpath = os.path.join(dirpath, fitModels[0])
return resultsDir, resultsDirpath
def set_dataDir_and_results_filenames( self, fn):
print("inside set_dataDir_and_results_filenames")
print("fn", fn)
resultsDir, resultsFilename = os.path.split(fn)
print("resultsDir", resultsDir)
print("resultsFilename", resultsFilename)
resultsDirList = resultsDir.split(os.path.sep)
print("resultsDirList",resultsDirList, )
sessionIndex = [ i for i,w in enumerate(resultsDirList) if "sess" in w]
print("sessionIndex",sessionIndex)
if len(sessionIndex):
si = sessionIndex[0]
print("si",si)
print("resultsDirList",resultsDirList)
print("resultsDirList[0]",resultsDirList[0])
# print("resultsDirList[0][-1]",resultsDirList[0][-1])
if len(resultsDirList[0])>0:
if ":" == resultsDirList[0][-1]: # add path seperator if root ends in :
resultsDirList[0] = resultsDirList[0]+os.path.sep
print("resultsDirList[0]", resultsDirList[0])
self.root = os.path.sep.join(resultsDirList[:si-2])
self.studyName = resultsDirList[si-2]
self.subject = resultsDirList[si-1]
self.session = resultsDirList[si]
self.imagedRegion = resultsDirList[si+1]
self.protocol = resultsDirList[si+2]
self.results = resultsDirList[si+3]
self.roiType = imagedRegionType = resultsDirList[si+4]
self.fitModel = resultsDirList[si+5]
print("self.root",self.root)
### create directory paths to T2 and Dixon results and image path
# T2_images_dirPath
# dixon_images_dirPath
# dixon_results_dirPath
# T2_results_dirPath
## T2 image path
dirpath = os.path.join(self.root,self.studyName,self.subject,
self.session,self.imagedRegion,"T2")
if os.path.exists(dirpath):
self.T2imagesDirpath = dirpath
## dixon image path
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,"dixon")
if os.path.exists(dirpath):
self.dixonImagesDirpath = dirpath
## set T2 and dixon results path
if self.protocol.lower() == "t2":
self.T2resultsDirpath, self.dixonResultsDirpath, = self.set_results_dir("dixon", resultsDir)
elif self.protocol.lower() == "dixon":
self.dixonResultsDirpath, self.T2resultsDirpath, = self.set_results_dir("T2", resultsDir)
print("self.dixonResultsDirpath", self.dixonResultsDirpath)
print("self.T2resultsDirpath", self.T2resultsDirpath)
## set csv results path name for T2 and dixon
if "T2".lower() in fn.lower():
self.T2resultsFilenameAndPath = fn
resultFilenameList = [ os.path.join(self.dixonResultsDirpath,fi)
for fi in os.listdir(self.dixonResultsDirpath)
if "results." in fi.lower() and (".csv" in fi.lower() )]
if resultFilenameList:
self.dixonResultsFilenameAndPath = resultFilenameList[0]
elif "dixon" in fn.lower():
self.dixonResultsFilenameAndPath = fn
resultFilenameList = [ os.path.join(self.T2resultsDirpath,fi)
for fi in os.listdir(self.T2ResultsDirpath)
if "results." in fi.lower() and (".csv" in fi.lower() )]
if resultFilenameList:
self.T2resultsFilenameAndPath = resultFilenameList[0]
def read_T2_data(self):
print("read_T2_data function entered")
print("self.T2resultsFilenameAndPath", self.T2resultsFilenameAndPath)
if os.path.exists(self.T2resultsFilenameAndPath):
print(self.T2resultsFilenameAndPath, "exists")
self.t2_data_summary_df = pd.read_csv(self.T2resultsFilenameAndPath)
self.T2slices = list(self.t2_data_summary_df["slice"].unique())
return(True)
else:
print(self.T2resultsFilenameAndPath, "not Found" )
return(False)
def read_Dixon_data(self):
print("read_Dixon_data function entered")
print("self.dixonResultsFilenameAndPath",self.dixonResultsFilenameAndPath)
if os.path.exists(self.dixonResultsFilenameAndPath):
print(self.dixonResultsFilenameAndPath, "exists")
self.dixon_data_summary_df = pd.read_csv(self.dixonResultsFilenameAndPath)
self.dixonSlices = list(self.dixon_data_summary_df["slice"].unique())
return(True)
else:
print(self.dixonResultsFilenameAndPath, "not Found" )
self.dixon_data_summary_df = pd.DataFrame()
return(False)
def read_T2_img_hdr_files(self):
if os.path.exists(self.T2MRIimageFilenameAndPath):
print(self.T2MRIimageFilenameAndPath, " found")
self.t2_imghdr = nibabel.load(self.T2MRIimageFilenameAndPath)
image_data = self.t2_imghdr.get_data()
image_data = np.flipud(image_data.swapaxes(1,0))
self.update_imageDataT2(image_data)
[self.numRowsT2, self.numColsT2, self.numSlicesT2, self.numEchoesT2] = self.ImageDataT2.shape
# self.img1 = np.zeros((self.numRowsT2, self.numColsT2,3), dtype=np.double)
self.mriSliceIMG = np.zeros((self.numRowsT2, self.numColsT2), dtype=np.double)
# self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]/(self.ImageDataT2[:,:,0,0].max()*2)
# self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]
self.mriSliceIMG = self.ImageDataT2[:,:,0,0]*1.0
self.currentEcho = 0
self.currentSlice = 0
# mainWindow.setWindowTitle(self.study_name)
return(True)
else:
return(False)
def update_imageDataT2(self, imageData):
self.ImageDataT2 = imageData
def overlayRoisOnImage(self, slice_pos, roi_data):
print("Entering overlayRoisOnImage", slice_pos)
print("roi_data",roi_data)
if roi_data in self.t2_data_summary_df.columns:
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
t2_data_query_df = self.t2_data_summary_df.query('slice == {}'.format(str(slice_pos)))
roi_image_layer[t2_data_query_df.pixel_index] = t2_data_query_df[roi_data]
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
elif roi_data in self.dixon_data_summary_df.columns:
# print("slice_pos", slice_pos)
# print("self.T2slices.index(slice_pos)",self.T2slices.index(slice_pos))
# print("self.dixonSlices[self.T2slices.index(slice_pos)]",self.dixonSlices[self.T2slices.index(slice_pos)])
if slice_pos in self.T2slices:
dixon_slice = self.dixonSlices[self.T2slices.index(slice_pos)]
else:
dixon_slice = slice_pos
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
#df_t2 = self.t2_data_summary_df[roi_data, 'pixel_index','roi'].groupby('slice')
dixon_data_query_df = self.dixon_data_summary_df.query('slice == {}'.format(str(dixon_slice)))
# roi_image_layer[dixon_data_query_df.pixels] = dixon_data_query_df[roi_data]/dixon_data_query_df[roi_data].max()
roi_image_layer[dixon_data_query_df.pixel_index] = dixon_data_query_df[roi_data]
# self.img1[:,:,2] = roi_image_layer.reshape((self.numRowsT2,self.numColsT2))
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
else:
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
| {"/visionplot_widgets.py": ["/t2fit.py", "/ImageData.py", "/epgT2paramsDialog.py", "/azzT2paramsDialog.py"], "/simple_pandas_plot.py": ["/visionplot_widgets.py", "/mriplotwidget.py", "/ImageData.py"]} |
550 | EricHughesABC/T2EPGviewer | refs/heads/master | /azzT2paramsDialog.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'azz_fit_parameters_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class AzzT2paramsDialog(object):
def __init__(self, lmparams):
self.lmparams = lmparams
self.params = self.lmparams['azzt2fitparams']
def setupAzzT2paramsDialog(self, Dialog):
self.dialog = Dialog
Dialog.setObjectName("Azzabou")
Dialog.resize(398, 335)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(230, 280, 156, 23))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(20, 10, 361, 252))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_11 = QtWidgets.QLabel(self.layoutWidget)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 7, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(self.layoutWidget)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 8, 0, 1, 1)
self.echoTimeValue = QtWidgets.QLineEdit(self.layoutWidget)
self.echoTimeValue.setValidator(QtGui.QDoubleValidator())
self.echoTimeValue.setObjectName("echoTimeValue")
self.gridLayout.addWidget(self.echoTimeValue, 8, 1, 1, 1)
self.longFatT2value = QtWidgets.QLineEdit(self.layoutWidget)
self.longFatT2value.setValidator(QtGui.QDoubleValidator())
self.longFatT2value.setObjectName("longFatT2value")
self.gridLayout.addWidget(self.longFatT2value, 6, 1, 1, 1)
self.shortFatT2value = QtWidgets.QLineEdit(self.layoutWidget)
self.shortFatT2value.setValidator(QtGui.QDoubleValidator())
self.shortFatT2value.setObjectName("shortFatT2value")
self.gridLayout.addWidget(self.shortFatT2value, 7, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 0, 3, 1, 1)
self.muscleT2minimum = QtWidgets.QLineEdit(self.layoutWidget)
self.muscleT2minimum.setValidator(QtGui.QDoubleValidator())
self.muscleT2minimum.setObjectName("muscleT2minimum")
self.gridLayout.addWidget(self.muscleT2minimum, 1, 2, 1, 1)
self.fatFractionMinimum = QtWidgets.QLineEdit(self.layoutWidget)
self.fatFractionMinimum.setValidator(QtGui.QDoubleValidator())
self.fatFractionMinimum.setObjectName("fatFractionMinimum")
self.gridLayout.addWidget(self.fatFractionMinimum, 3, 2, 1, 1)
self.fatFractionMaximum = QtWidgets.QLineEdit(self.layoutWidget)
self.fatFractionMaximum.setValidator(QtGui.QDoubleValidator())
self.fatFractionMaximum.setObjectName("fatFractionMaximum")
self.gridLayout.addWidget(self.fatFractionMaximum, 3, 3, 1, 1)
self.muscleFractionMinimum = QtWidgets.QLineEdit(self.layoutWidget)
self.muscleFractionMinimum.setValidator(QtGui.QDoubleValidator())
self.muscleFractionMinimum.setObjectName("muscleFractionMinimum")
self.gridLayout.addWidget(self.muscleFractionMinimum, 2, 2, 1, 1)
self.optimizeMuscleFraction = QtWidgets.QCheckBox(self.layoutWidget)
self.optimizeMuscleFraction.setText("")
self.optimizeMuscleFraction.setChecked(True)
self.optimizeMuscleFraction.setObjectName("optimizeMuscleFraction")
self.gridLayout.addWidget(self.optimizeMuscleFraction, 2, 4, 1, 1)
self.muscleFractionMaximum = QtWidgets.QLineEdit(self.layoutWidget)
self.muscleFractionMaximum.setValidator(QtGui.QDoubleValidator())
self.muscleFractionMaximum.setObjectName("muscleFractionMaximum")
self.gridLayout.addWidget(self.muscleFractionMaximum, 2, 3, 1, 1)
self.optimizeFatFraction = QtWidgets.QCheckBox(self.layoutWidget)
self.optimizeFatFraction.setText("")
self.optimizeFatFraction.setChecked(True)
self.optimizeFatFraction.setObjectName("optimizeFatFraction")
self.gridLayout.addWidget(self.optimizeFatFraction, 3, 4, 1, 1)
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.layoutWidget)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 4, 0, 1, 1)
self.optimizeMuscleT2 = QtWidgets.QCheckBox(self.layoutWidget)
self.optimizeMuscleT2.setText("")
self.optimizeMuscleT2.setChecked(True)
self.optimizeMuscleT2.setObjectName("optimizeMuscleT2")
self.gridLayout.addWidget(self.optimizeMuscleT2, 1, 4, 1, 1)
self.fatFractionLongT2value = QtWidgets.QLineEdit(self.layoutWidget)
self.fatFractionLongT2value.setValidator(QtGui.QDoubleValidator())
self.fatFractionLongT2value.setObjectName("fatFractionLongT2value")
self.gridLayout.addWidget(self.fatFractionLongT2value, 4, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 4, 1, 1)
self.muscleT2value = QtWidgets.QLineEdit(self.layoutWidget)
self.muscleT2value.setObjectName("muscleT2value")
self.gridLayout.addWidget(self.muscleT2value, 1, 1, 1, 1)
self.fatFractionShortT2value = QtWidgets.QLineEdit(self.layoutWidget)
self.fatFractionShortT2value.setValidator(QtGui.QDoubleValidator())
self.fatFractionShortT2value.setObjectName("fatFractionShortT2value")
self.gridLayout.addWidget(self.fatFractionShortT2value, 5, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.layoutWidget)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.layoutWidget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 5, 0, 1, 1)
self.muscleT2maximum = QtWidgets.QLineEdit(self.layoutWidget)
self.muscleT2maximum.setValidator(QtGui.QDoubleValidator())
self.muscleT2maximum.setObjectName("muscleT2maximum")
self.gridLayout.addWidget(self.muscleT2maximum, 1, 3, 1, 1)
self.label_10 = QtWidgets.QLabel(self.layoutWidget)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 6, 0, 1, 1)
self.muscleFractionValue = QtWidgets.QLineEdit(self.layoutWidget)
self.muscleFractionValue.setValidator(QtGui.QDoubleValidator())
self.muscleFractionValue.setObjectName("muscleFractionValue")
self.gridLayout.addWidget(self.muscleFractionValue, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 1)
self.fatFractionValue = QtWidgets.QLineEdit(self.layoutWidget)
self.fatFractionValue.setValidator(QtGui.QDoubleValidator())
self.fatFractionValue.setObjectName("fatFractionValue")
self.gridLayout.addWidget(self.fatFractionValue, 3, 1, 1, 1)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(self.dialog_ok_clicked)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Azzabou", "Azzabout T2 model"))
self.label_11.setText(_translate("Azzabou", "Short Fat T<sub>2</sub> (ms)"))
self.label_12.setText(_translate("Azzabou", "Echo Time (ms)"))
self.echoTimeValue.setText(_translate("Azzabou", "10.0"))
self.longFatT2value.setText(_translate("Azzabou", "250.0"))
self.shortFatT2value.setText(_translate("Azzabou", "43.0"))
self.label_2.setText(_translate("Azzabou", "minimum"))
self.label_3.setText(_translate("Azzabou", "maximum"))
self.muscleT2minimum.setText(_translate("Azzabou", "0.0"))
self.fatFractionMinimum.setText(_translate("Azzabou", "0.0"))
self.fatFractionMaximum.setText(_translate("Azzabou", "10.0"))
self.muscleFractionMinimum.setText(_translate("Azzabou", "0.0"))
self.muscleFractionMaximum.setText(_translate("Azzabou", "10.0"))
self.label_7.setText(_translate("Azzabou", "Fat Fraction"))
self.label_8.setText(_translate("Azzabou", "Fat Fraction (Long T<sub>2</sub>)"))
self.fatFractionLongT2value.setText(_translate("Azzabou", "0.6"))
self.label_4.setText(_translate("Azzabou", "optimized"))
self.muscleT2value.setText(_translate("Azzabou", "35.0"))
self.fatFractionShortT2value.setText(_translate("Azzabou", "0.4"))
self.label_5.setText(_translate("Azzabou", "Muscle T<sub>2</sub> (ms)"))
self.label_6.setText(_translate("Azzabou", "Muscle Fraction"))
self.label_9.setText(_translate("Azzabou", "Fat Fraction (Short T<sub>2</sub>)"))
self.muscleT2maximum.setText(_translate("Azzabou", "100.0"))
self.label_10.setText(_translate("Azzabou", "Long Fat T<sub>2</sub> (ms)"))
self.muscleFractionValue.setText(_translate("Azzabou", "0.8"))
self.label.setText(_translate("Azzabou", "value"))
self.fatFractionValue.setText(_translate("Azzabou", "0.2"))
def dialog_ok_clicked(self):
print("dialog_ok_clicked")
self.dialog.setResult(1)
worked =self.get_fitparameters()
if worked:
self.params.pretty_print()
self.dialog.accept()
def get_fitparameters( self ):
print("self.optimizeFatFraction.isChecked()", self.optimizeFatFraction.isChecked() )
#epgt2fitparams = lm.Parameters()
worked = True
try:
self.params.add(name='T2muscle', value = float(self.muscleT2value.text()),
min = float(self.muscleT2minimum.text()),
max = float(self.muscleT2maximum.text()),
vary = self.optimizeMuscleT2.isChecked())
self.params.add(name='Amuscle', value = float(self.muscleFractionValue.text()),
min = float(self.muscleFractionMinimum.text()),
max = float(self.muscleFractionMaximum.text()),
vary = self.optimizeMuscleFraction.isChecked())
self.params.add(name='Afat', value = float(self.fatFractionValue.text()),
min = float(self.fatFractionMinimum.text()),
max = float(self.fatFractionMaximum.text()),
vary = self.optimizeFatFraction.isChecked())
self.params.add(name='c_l', value=float(self.fatFractionLongT2value.text()), vary=False)
self.params.add(name='c_s', value=float(self.fatFractionShortT2value.text()), vary=False)
self.params.add(name='t2_fl', value=float(self.longFatT2value.text()), vary=False)
self.params.add(name='t2_fs', value=float(self.shortFatT2value.text()), vary=False)
self.params.add(name='echo', value=float(self.echoTimeValue.text()), vary=False)
buttonsUnChecked = [not self.optimizeFatFraction.isChecked(),
not self.optimizeMuscleFraction.isChecked(),
not self.optimizeMuscleT2.isChecked()]
print(buttonsUnChecked)
if all(buttonsUnChecked):
print("all buttuns unchecked")
worked=False
self.lmparams['azzt2fitparams'] = self.params
except:
print("exception occurred")
worked = False
return worked
if __name__ == "__main__":
import sys
import lmfit as lm
lmparams = {}
epgt2fitparams = lm.Parameters()
azzt2fitparams = lm.Parameters()
epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False)
epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True )
epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True )
epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True )
epgt2fitparams.add('T1fat', value = 365.0, vary=False)
epgt2fitparams.add('T1muscle', value = 1400, vary=False)
epgt2fitparams.add('echo', value = 10.0, vary=False)
azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None),
('Amuscle', 40.0, True, 0, 250, None),
('T2muscle', 40.0, True, 0, 100, None),
('c_l', 0.55, False, 0, 2000, None),
('c_s', 0.45, False, 0, 2000, None),
('t2_fl', 250.0, False, 0, 2000, None),
('t2_fs', 43.0, False, 0, 2000, None),
('echo', 10.0, False, 0, 2000, None))
lmparams['epgt2fitparams'] = epgt2fitparams
lmparams['azzt2fitparams'] = azzt2fitparams
app = QtWidgets.QApplication(sys.argv)
Azzabou = QtWidgets.QDialog()
ui = AzzT2paramsDialog(lmparams)
ui.setupAzzT2paramsDialog(Azzabou)
Azzabou.show()
sys.exit(app.exec_())
| {"/visionplot_widgets.py": ["/t2fit.py", "/ImageData.py", "/epgT2paramsDialog.py", "/azzT2paramsDialog.py"], "/simple_pandas_plot.py": ["/visionplot_widgets.py", "/mriplotwidget.py", "/ImageData.py"]} |
553 | olof98johansson/SentimentAnalysisNLP | refs/heads/main | /main.py | import train
import preprocessing
def run():
'''
Training function to run the training process after specifying parameters
'''
preprocessing.config.paths = ['./training_data/depressive1.json',
'./training_data/depressive2.json',
'./training_data/depressive3.json',
'./training_data/depressive4.json',
'./training_data/depressive5.json',
'./training_data/depressive6.json',
'./training_data/non-depressive1.json',
'./training_data/non-depressive2.json',
'./training_data/non-depressive3.json',
'./training_data/non-depressive4.json',
'./training_data/non-depressive5.json',
'./training_data/non-depressive6.json']
preprocessing.config.save_path = './training_data/all_training_data.csv'
preprocessing.config.labels = ['depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'depressive',
'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive',
'not-depressive', 'not-depressive']
preprocessing.config.keywords = ['depressed', 'lonely', 'sad', 'depression', 'tired', 'anxious',
'happy', 'joy', 'thankful', 'health', 'hopeful', 'glad']
preprocessing.config.nr_of_tweets = [1000, 1000, 1000, 1000, 1000, 1000,
1000, 1000, 1000, 1000, 1000, 1000]
history, early_stop_check = train.train_rnn(save_path='./weights/lstm_model_2.pth', collect=True) # Collect=False if already collected data
train.show_progress(history=history, save_name='./plots/training_progress.png')
train.animate_progress(history=history, save_path='./plots/training_animation_progress_REAL.gif',
early_stop_check=early_stop_check)
run()
| {"/main.py": ["/train.py", "/preprocessing.py"], "/train.py": ["/models.py", "/preprocessing.py"], "/models.py": ["/preprocessing.py"], "/preprocessing.py": ["/data_cleaning.py", "/twint_scraping.py"], "/predict.py": ["/models.py", "/train.py", "/preprocessing.py", "/data_cleaning.py", "/twint_scraping.py"]} |
554 | olof98johansson/SentimentAnalysisNLP | refs/heads/main | /train.py | import torch
import torch.nn as nn
import models
import preprocessing
from collections import defaultdict
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
from celluloid import Camera
def Logger(elapsed_time, epoch, epochs, tr_loss, tr_acc, val_loss, val_acc):
'''
Logger function to track training progress
Input: elapsed_time - the current elapsed training time
epoch - current epoch
epochs - total number of epochs
tr_loss/val_loss - current training/validation loss
tr_acc/val_acc - current training/validation accuracy
'''
tim = 'sec'
if elapsed_time > 60 and elapsed_time <= 3600:
elapsed_time /= 60
tim = 'min'
elif elapsed_time > 3600:
elapsed_time /= 3600
tim = 'hrs'
elapsed_time = format(elapsed_time, '.2f')
print(f'Elapsed time: {elapsed_time} {tim} Epoch: {epoch}/{epochs} ',
f'Train Loss: {tr_loss:.4f} Val Loss: {val_loss:.4f} ',
f'Train Acc: {tr_acc:.2f}% Val Acc: {val_acc:.2f}%')
class EarlyStopping(object):
'''
Stops the training progress if the performance has not improved for
a number of epochs to avoid overfitting
'''
def __init__(self, patience):
super().__init__()
self.best_loss = 1e5
self.patience = patience
self.nr_no_improved = 0
def update(self, curr_loss):
if curr_loss < self.best_loss:
self.best_loss = curr_loss
self.nr_no_improved = 0
return False
else:
self.nr_no_improved+=1
if self.nr_no_improved >= self.patience:
print(f'Early stopping! Model did not improve for last {self.nr_no_improved} epochs')
return True
else:
return False
class rnn_params:
'''
Configuration to store and tune RNN specific hyperparameters
'''
rnn_type = 'lstm'
emb_dim = 64
rnn_size = 64
nr_layers = 1
dropout = 0.5
lr = 1e-3
batch_size = 64
n_epochs = 30
decay = 1e-5
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
patience = 5
def train_rnn(save_path = None, collect=True):
'''
Training function for the rnn model that trains and validates the models performance
Input: save_path - path and file name to where to save the trained weights (type: string)
collect - specify if to collect data or not (type: boolean)
Output: history - history of the models training progression (type: defaultdict of lists)
early_stop_check - if early stopping has been executed or not (type: boolean)
'''
dataloaders, vocab_size, n_classes = preprocessing.preprocess(rnn_params.batch_size, collect=collect)
train_loader, val_loader = dataloaders
model = models.RNNModel(rnn_type=rnn_params.rnn_type, nr_layers=rnn_params.nr_layers,
voc_size=vocab_size, emb_dim=rnn_params.emb_dim, rnn_size=rnn_params.rnn_size,
dropout=rnn_params.dropout, n_classes=n_classes)
loss_fn = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=rnn_params.lr, weight_decay=rnn_params.decay)
model.to(rnn_params.device)
history = defaultdict(list)
init_training_time = time.time()
early_stopping = EarlyStopping(patience=rnn_params.patience)
for epoch in range(1, rnn_params.n_epochs):
model.train()
h = model.init_hidden(rnn_params.batch_size, device=rnn_params.device)
n_correct, n_instances, total_loss = 0,0,0
for inputs, labels in train_loader:
model.zero_grad()
inputs = inputs.to(rnn_params.device)
labels = labels.to(rnn_params.device)
h = tuple([each.data for each in h])
outputs, h = model(inputs, h)
loss = loss_fn(outputs.squeeze(), labels.float())
total_loss+=loss.item()
n_instances+=labels.shape[0]
predictions = torch.round(outputs.squeeze())
n_correct += (torch.sum(predictions == labels.float())).item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss = total_loss / (len(train_loader))
epoch_acc = n_correct / n_instances
n_correct_val, n_instances_val, total_loss_val = 0, 0, 0
model.eval()
val_h = model.init_hidden(rnn_params.batch_size, device=rnn_params.device)
for val_inp, val_lab in val_loader:
val_inp = val_inp.to(rnn_params.device)
val_lab = val_lab.to(rnn_params.device)
val_h = tuple([each.data for each in val_h])
val_out, val_h = model(val_inp, val_h)
val_loss = loss_fn(val_out.squeeze(), val_lab.float())
total_loss_val += val_loss.item()
n_instances_val += val_lab.shape[0]
val_preds = torch.round(val_out.squeeze())
n_correct_val += (torch.sum(val_preds == val_lab.float())).item()
epoch_val_loss = total_loss_val / len(val_loader)
epoch_val_acc = n_correct_val / n_instances_val
curr_time = time.time()
Logger(curr_time-init_training_time, epoch, rnn_params.n_epochs, epoch_loss,
epoch_acc, epoch_val_loss, epoch_val_acc)
history['training loss'].append(epoch_loss)
history['training acc'].append(epoch_acc)
history['validation loss'].append(epoch_val_loss)
history['validation acc'].append(epoch_val_acc)
early_stop_check = early_stopping.update(epoch_val_loss)
if early_stop_check:
models.ModelUtils.save_model(save_path=save_path, model=model)
return history, early_stop_check
if save_path:
root, ext = os.path.splitext(save_path)
save_path = root + '.pth'
models.ModelUtils.save_model(save_path=save_path, model=model)
return history, early_stop_check
def show_progress(history, save_name = None):
fig, axes = plt.subplots(1, 2, figsize=(21, 7))
fig.suptitle('Training progression', fontsize=18)
axes[0].plot(history['training loss'], linewidth=2, color='#99ccff', alpha=0.9, label='Training')
axes[0].plot(history['validation loss'], linewidth=2, color='#cc99ff', alpha=0.9, label='Validation')
axes[0].set_xlabel(xlabel='Epochs', fontsize=12)
axes[0].set_ylabel(ylabel=r'$\mathcal{L}(\hat{y}, y)$', fontsize=12)
axes[0].set_title(label='Losses', fontsize=14)
axes[1].plot(history['training acc'], linewidth=2, color='#99ccff', alpha=0.9, label='Training')
axes[1].plot(history['validation acc'], linewidth=2, color='#cc99ff', alpha=0.9, label='Validation')
axes[1].set_xlabel(xlabel='Epochs', fontsize=12)
axes[1].set_ylabel(ylabel=r'%', fontsize=12)
axes[1].set_title(label='Accuracies', fontsize=14)
axes[0].legend()
axes[1].legend()
if save_name:
plt.savefig(save_name, bbox_inches='tight')
plt.show()
def animate_progress(history, save_path, early_stop_check):
root, ext = os.path.splitext(save_path)
save_path = root + '.gif'
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
camera = Camera(fig)
fig.suptitle('Training progression', fontsize=18)
axes[0].set_xlabel(xlabel='Epochs', fontsize=12)
axes[0].set_ylabel(ylabel=r'$\mathcal{L}(\hat{y}, y)$', fontsize=12)
axes[0].set_title(label='Losses', fontsize=14)
axes[1].set_xlabel(xlabel='Epochs', fontsize=12)
axes[1].set_ylabel(ylabel=r'%', fontsize=12)
axes[1].set_title(label='Accuracies', fontsize=14)
epochs = np.arange(len(history['training loss']))
for e in epochs:
axes[0].plot(epochs[:e], history['training loss'][:e], linewidth=2, color='#99ccff')
axes[0].plot(epochs[:e], history['validation loss'][:e], linewidth=2, color='#cc99ff')
axes[1].plot(epochs[:e], history['training acc'][:e], linewidth=2, color='#99ccff')
axes[1].plot(epochs[:e], history['validation acc'][:e], linewidth=2, color='#cc99ff')
axes[0].legend(['Training', 'Validation'])
axes[1].legend(['Training', 'Validation'])
camera.snap()
for i in range(10):
axes[0].plot(epochs, history['training loss'], linewidth=2, color='#99ccff')
axes[0].plot(epochs, history['validation loss'], linewidth=2, color='#cc99ff')
axes[1].plot(epochs, history['training acc'], linewidth=2, color='#99ccff')
axes[1].plot(epochs, history['validation acc'], linewidth=2, color='#cc99ff')
axes[0].legend(['Training', 'Validation'])
axes[1].legend(['Training', 'Validation'])
camera.snap()
animation = camera.animate()
animation.save(save_path, writer='imagemagick')
| {"/main.py": ["/train.py", "/preprocessing.py"], "/train.py": ["/models.py", "/preprocessing.py"], "/models.py": ["/preprocessing.py"], "/preprocessing.py": ["/data_cleaning.py", "/twint_scraping.py"], "/predict.py": ["/models.py", "/train.py", "/preprocessing.py", "/data_cleaning.py", "/twint_scraping.py"]} |
555 | olof98johansson/SentimentAnalysisNLP | refs/heads/main | /twint_scraping.py | # NOTE: TWINT NEEDS TO BE INSTALLEED BY THE FOLLOWING COMMAND:
# pip install --user --upgrade git+https://github.com/twintproject/twint.git@origin/master#egg=twint
# OTHERWISE IT WON'T WORK
import twint
import nest_asyncio
nest_asyncio.apply()
from dateutil import rrule
from datetime import datetime, timedelta
def get_weeks(start_date, end_date):
'''
Finds collection of weeks chronologically from a starting date to a final date
Input: start_date - date of which to start collecting with format [year, month, day] (type: list of ints)
end_date - date of which to stop collecting with format [year, month, day] (type: list of ints)
Output: weeks - list containing the lists of starting and ending date for each week with format
"%Y-%m-%d %h-%m-%s" (type: list of lists of strings)
'''
start_year, start_month, start_day = start_date
final_year, final_month, final_day = end_date
start = datetime(start_year, start_month, start_day)
end = datetime(final_year, final_month, final_day)
dates = rrule.rrule(rrule.WEEKLY, dtstart=start, until=end)
nr_weeks = 0
for _ in dates:
nr_weeks+=1
weeks = []
for idx, dt in enumerate(dates):
if idx < nr_weeks-1:
week = [dates[idx].date().strftime('%Y-%m-%d %H:%M:%S'),
dates[idx+1].date().strftime('%Y-%m-%d %H:%M:%S')]
weeks.append(week)
return weeks
def collect_tweets(keywords = None, nr_tweets = None,
output_file=None, coord=None, timespan=[None, None]):
'''
Collectiing tweets using twint based on different attributes and save to json file
Input: keywords - keywords that the tweet should contain (type: string)
nr_tweets - number of tweets to collect (type: int)
output_file - path and name to where the file should be saved (type: string, extension: .json)
near - location or city of which the tweets were tweeted (type: string)
timespan - timespan of when the tweet was tweeted in format "%Y-%m-%d %h-%m-%s" (type: string)
Output: Returns twint object
'''
# configuration
config = twint.Config()
# Search keyword
config.Search = keywords
# Language
config.Lang = "en"
# Number of tweets
config.Limit = nr_tweets
#Dates
config.Since = timespan[0]
config.Until = timespan[1]
# Output file format (alternatives: json, csv, SQLite)
config.Store_json = True
# Name of output file with format extension (i.e NAME.json, NAME.csv etc)
config.Output = output_file
config.Geo = coord
# running search
twint.run.Search(config)
return twint
# EXAMPLE
def test():
config = twint.Config()
config.Search = None
config.Near = "london"
config.Lang = "en"
config.Limit = 10
config.Since = "2016-10-29 00:00:00"
config.Until = "2016-11-29 12:15:19"
config.Store_json = True
config.Output = "test2.json"
#running search
twint.run.Search(config)
#test()
| {"/main.py": ["/train.py", "/preprocessing.py"], "/train.py": ["/models.py", "/preprocessing.py"], "/models.py": ["/preprocessing.py"], "/preprocessing.py": ["/data_cleaning.py", "/twint_scraping.py"], "/predict.py": ["/models.py", "/train.py", "/preprocessing.py", "/data_cleaning.py", "/twint_scraping.py"]} |
556 | olof98johansson/SentimentAnalysisNLP | refs/heads/main | /models.py | import torch
import torch.nn as nn
import preprocessing
import os
import numpy as np
class ModelUtils:
'''
A utility class to save and load model weights
'''
def save_model(save_path, model):
root, ext = os.path.splitext(save_path)
if not ext:
save_path = root + '.pth'
try:
torch.save(model.state_dict(), save_path)
print(f'Successfully saved to model to "{save_path}"!')
except Exception as e:
print(f'Unable to save model, check save path!')
print(f'Exception:\n{e}')
return None
def load_model(load_path, model):
try:
model.load_state_dict(torch.load(load_path))
print(f'Successfully loaded the model from path "{load_path}"')
except Exception as e:
print(f'Unable to load the weights, check if different model or incorrect path!')
print(f'Exception:\n{e}')
return None
class RNNModel(nn.Module):
'''
RNN classifier with different available RNN types (basic RNN, LSTM, GRU)
'''
def __init__(self, rnn_type, nr_layers, voc_size, emb_dim, rnn_size, dropout, n_classes):
'''
Initiates the RNN model
Input: rnn_type - specifies the rnn model type between "rnn", "lstm" or "gru" (type: string)
nr_layers - number of rnn layers (type: int)
voc_size - size of vocabulary of the encoded input data (type: int)
emb_dim - size of embedding layer (type: int)
rnn_size - number of hidden layers in RNN model (type: int)
dropout - probability of dropout layers (type: float in between [0, 1])
n_classes - number of different classes/labels (type: int)
'''
super().__init__()
self.rnn_size = rnn_size
self.rnn_type = rnn_type
self.nr_layers = nr_layers
self.embedding = nn.Embedding(voc_size, emb_dim)
if self.rnn_type == 'rnn':
self.rnn = nn.RNN(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0,
bidirectional=False, num_layers=nr_layers, batch_first=True)
elif self.rnn_type == 'lstm':
self.rnn = nn.LSTM(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0,
bidirectional=False, num_layers=nr_layers, batch_first=True)
elif self.rnn_type == 'gru':
self.rnn = nn.GRU(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0,
bidirectional=False, num_layers=nr_layers, batch_first=True)
else:
print('Invalid or no choice for RNN type, please choose one of "rnn", "lstm" or "gru"')
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(in_features=rnn_size, out_features=n_classes)
self.sigmoid = nn.Sigmoid()
def forward(self, X, hidden):
'''
Forward propagation of the RNN model
Input: X - batch of input data (type: torch tensor)
hidden - batch of input to the hidden cells (type: torch tensor)
Output: out - model prediction (type: torch tensor)
hidden - output of the hidden cells (torch.tensor)
'''
self.batch_size = X.size(0)
embedded = self.embedding(X)
if self.rnn_type == 'rnn' or self.rnn_type == 'gru':
rnn_out, hidden = self.rnn(embedded, hidden)
elif self.rnn_type == 'lstm':
rnn_out, hidden = self.rnn(embedded, hidden)
else:
print(f'Invalid rnn type! Rebuild the model with a correct rnn type!')
return None
rnn_out = rnn_out.contiguous().view(-1, self.rnn_size)
drop = self.dropout(rnn_out)
out = self.linear(drop)
out = self.sigmoid(out)
# reshape such that batch size is first and get labels of last batch
out = out.view(self.batch_size, -1)
out = out[:, -1]
return out, hidden
def init_hidden(self, batch_size, device):
'''
Initializes hidden state
'''
# initialized to zero, for hidden state and cell state of LSTM
h0 = torch.zeros((self.nr_layers, batch_size, self.rnn_size)).to(device)
c0 = torch.zeros((self.nr_layers, batch_size, self.rnn_size)).to(device)
hidden = (h0, c0)
return hidden
| {"/main.py": ["/train.py", "/preprocessing.py"], "/train.py": ["/models.py", "/preprocessing.py"], "/models.py": ["/preprocessing.py"], "/preprocessing.py": ["/data_cleaning.py", "/twint_scraping.py"], "/predict.py": ["/models.py", "/train.py", "/preprocessing.py", "/data_cleaning.py", "/twint_scraping.py"]} |
557 | olof98johansson/SentimentAnalysisNLP | refs/heads/main | /preprocessing.py | import data_cleaning
import twint_scraping
import os
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
import torch
class config:
'''
Configuration class to store and tune global variables
'''
PAD = '___PAD___'
UNKNOWN = '___UNKNOWN___'
paths = ['./training_data/depressive1.json',
'./training_data/depressive2.json',
'./training_data/depressive3.json',
'./training_data/depressive4.json',
'./training_data/depressive5.json',
'./training_data/depressive6.json',
'./training_data/non-depressive1.json',
'./training_data/non-depressive2.json',
'./training_data/non-depressive3.json',
'./training_data/non-depressive4.json',
'./training_data/non-depressive5.json',
'./training_data/non-depressive6.json']
labels = ['depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'depressive',
'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive',
'not-depressive', 'not-depressive']
save_path = './training_data/all_training_data.csv'
keywords = ['depressed', 'lonely', 'sad', 'depression', 'tired', 'anxious',
'happy', 'joy', 'thankful', 'hope', 'hopeful', 'glad']
nr_of_tweets = [5000, 5000, 5000, 5000, 5000, 5000,
5000, 5000, 5000, 5000, 5000, 5000]
hashtags_to_remove = []
encoder = None
vocab = None
vocab_size = 0
n_classes = 0
def collect_dataset(paths, keywords, nr_of_tweets, hashtags_to_remove, collect=True):
'''
Collecting the dataset and cleans the data
Input: paths - path to where to save the collected tweets (type: list of strings)
keywords - keywords to be used for collecting tweets (type: list of strings)
nr_of_tweets - number of tweets to be collected for each collecting process (type: list of ints)
collect - specifying if to collect tweets or not (type: boolean)
Output: dataset - cleaned dataset of the tweet texts and their labels (type: list if lists)
'''
roots, exts = [], []
for path in paths:
root, ext = os.path.splitext(path)
roots.append(root)
exts.append(ext)
#roots, exts = [os.path.splitext(path) for path in paths]
save_root, save_exts = os.path.splitext(config.save_path)
json_paths = [root+'.json' for root in roots]
csv_path = save_root+'.csv'
if collect:
for idx, json_path in enumerate(json_paths):
twint_scraping.collect_tweets(keywords=keywords[idx], nr_tweets=nr_of_tweets[idx], output_file=json_path)
dataset, keys = data_cleaning.datacleaning(paths=json_paths, labels=config.labels, hashtags_to_remove=hashtags_to_remove,
save_path=csv_path)
return dataset, keys
class DocumentDataset(Dataset):
'''
Basic class for creating dataset from the input and label data
'''
def __init__(self, X, Y):
self.X = X
self.Y = Y
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
def __len__(self):
return len(self.X)
class DocumentBatcher:
'''
Process the batches to desired output by transform into torch tensors and pads uneven input text data
to the same length
'''
def __init__(self, voc):
self.pad = voc.get_pad_idx()
def __call__(self, XY):
max_len = max(len(x) for x, _ in XY)
Xpadded = torch.as_tensor([x + [self.pad] * (max_len - len(x)) for x, _ in XY])
Y = torch.as_tensor([y for _, y in XY])
return Xpadded, Y
class Vocab:
'''
Encoding the documents
'''
def __init__(self):
# Splitting the tweets into words as tokenizer
self.tokenizer = lambda s: s.split()
def build_vocab(self, docs):
'''
Building the vocabulary from the documents, i.e creating the
word-to-encoding and encoding-to-word dicts
Input: docs - list of all the lines in the corpus
'''
freqs = Counter(w for doc in docs for w in self.tokenizer(doc))
freqs = sorted(((f, w) for w, f in freqs.items()), reverse=True)
self.enc_to_word = [config.PAD, config.UNKNOWN] + [w for _, w in freqs]
self.word_to_enc = {w: i for i, w in enumerate(self.enc_to_word)}
def encode(self, docs):
'''
Encoding the documents
Input: docs - list of all the lines in the corpus
'''
unkn_index = self.word_to_enc[config.UNKNOWN]
return [[self.word_to_enc.get(w, unkn_index) for w in self.tokenizer(doc)] for doc in docs]
def get_unknown_idx(self):
return self.word_to_enc[config.UNKNOWN]
def get_pad_idx(self):
return self.word_to_enc[config.PAD]
def __len__(self):
return len(self.enc_to_word)
def preprocess(batch_size=64, collect=True):
'''
Function for preprocessing the data which splits the data into train/val, builds the vocabulary, fits
the label encoder and creates the dataloaders for the train and validation set
Input: batch_size - batch size to be used in the data loaders (type: int)
collect - specifying if to collect data or not (type: boolean)
Output: dataloaders - the created data loaders for training and validation set (type: list of data loaders)
vocab_size - size of the built vocabulary (type: int)
n_classes - number of classes/ladels in the dataset
'''
data, keys = collect_dataset(paths=config.paths, keywords=config.keywords,
nr_of_tweets=config.nr_of_tweets,
hashtags_to_remove=config.hashtags_to_remove,
collect=collect)
X, Y = data
x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.2, shuffle=True, random_state=1)
vocab = Vocab()
vocab.build_vocab(x_train)
config.vocab = vocab
encoder = LabelEncoder()
encoder.fit(y_train)
config.encoder = encoder
vocab_size = len(vocab)
n_classes = len(encoder.classes_)
config.vocab_size = vocab_size
config.n_classes = n_classes
batcher = DocumentBatcher(vocab)
train_dataset = DocumentDataset(vocab.encode(x_train),
encoder.transform(y_train))
train_loader = DataLoader(train_dataset, batch_size,
shuffle=True, collate_fn=batcher, drop_last=True)
val_dataset = DocumentDataset(vocab.encode(x_val), encoder.transform(y_val))
val_loader = DataLoader(val_dataset, batch_size,
shuffle=True, collate_fn=batcher, drop_last=True)
dataloaders = [train_loader, val_loader]
return dataloaders, vocab_size, n_classes
| {"/main.py": ["/train.py", "/preprocessing.py"], "/train.py": ["/models.py", "/preprocessing.py"], "/models.py": ["/preprocessing.py"], "/preprocessing.py": ["/data_cleaning.py", "/twint_scraping.py"], "/predict.py": ["/models.py", "/train.py", "/preprocessing.py", "/data_cleaning.py", "/twint_scraping.py"]} |
558 | olof98johansson/SentimentAnalysisNLP | refs/heads/main | /predict.py | import models
import train
import preprocessing
import data_cleaning
import os
import torch
import twint_scraping
import numpy as np
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style('darkgrid')
import pandas_alive
class Config:
'''
Configuration class to store and tune global variables
'''
test_set_keywords = []
test_set_nr_of_tweets = [5000]
# Coordinates spread out in UK to cover as wide geographical range as possible
test_set_locations = ["54.251186,-4.463196,550km"]
len_locations = len(test_set_locations)
time_to = twint_scraping.get_weeks([2019, 12, 24], [2020, 3, 17]) # UK lockdown and 3 months back
time_from = twint_scraping.get_weeks([2020, 3, 24], [2020, 6, 24]) # UK lockdown and 3 months forward
test_set_time_spans = []
for tt in time_to:
test_set_time_spans.append(tt)
for tf in time_from:
test_set_time_spans.append(tf)
len_timespan = len(test_set_time_spans)
test_set_json_paths = []
for t_idx in range(len_timespan):
time_spec_path = []
for l_idx in range(len_locations):
time_spec_path.append(f'./forecast_data/testdata_{l_idx}_{t_idx}.json')
test_set_json_paths.append(time_spec_path)
test_set_csv_paths = [f'./forecast_data/all_loc_{t_idx}.csv' for t_idx in range(len_timespan)]
path_to_weights = './weights/lstm_model_2.pth'
class TestDataset(Dataset):
'''
Basic class for creating dataset from the test input data
'''
def __init__(self, X):
self.X = X
def __getitem__(self, idx):
return self.X[idx]
def __len__(self):
return len(self.X)
def get_testdata(paths, save_path, timespans, collect_test_data = False):
'''
Builds vocabulary and encoder based on the training data and collects, clean and builds data loaders
for the test data
Input: paths - path to store the collected test data with json extension (type: list of strings)
save_path - path to where to save the cleaned and final test dataset with csv
extension (type: list of strings)
timespans - timespans of when the collected test tweets where tweeted (type: list of lists of strings)
collect_test_data - specifying if to collect test data or not (type: boolean)
Output: test_loader - data loader for the collected test data (type: DataLoader)
encoder - encoder trained on the training labels (type: LabelEncoder)
vocab_size - size of the vocabulary built from the training data (type: int)
n_classes: number of classes/labels from the training data (type: int)
'''
roots, exts = [], []
for path in paths:
root, ext = os.path.splitext(path)
roots.append(root)
exts.append(ext)
save_root, save_exts = os.path.splitext(save_path)
json_paths = [root + '.json' for root in roots]
csv_path = save_root + '.csv'
rnn_params = train.rnn_params()
_, vocab_size, n_classes = preprocessing.preprocess(rnn_params.batch_size, collect=False)
encoder = preprocessing.config.encoder
vocab = preprocessing.config.vocab
if collect_test_data:
for idx, json_path in enumerate(json_paths):
twint_scraping.collect_tweets(nr_tweets=Config.test_set_nr_of_tweets[idx],
output_file=json_path,
coord=Config.test_set_locations[idx],
timespan=timespans)
testdata, keys = data_cleaning.datacleaning(paths=json_paths, labels=[],
hashtags_to_remove=[],
save_path=csv_path, train=False)
cleaned_csv_path = save_root + '_cleaned.csv'
df = pd.DataFrame(data={"test docs": testdata})
df.to_csv(cleaned_csv_path, sep=',', index=False)
pad = vocab.get_pad_idx()
max_len = max(len(x) for x in testdata)
testdata = vocab.encode(testdata)
testdata_padded = torch.as_tensor([x + [pad] * (max_len - len(x)) for x in testdata])
test_dataset = TestDataset(testdata_padded)
test_loader = DataLoader(test_dataset, batch_size=1)
return test_loader, encoder, vocab_size, n_classes
def predict(testdata, path_to_weights, vocab_size, n_classes):
'''
Creates, loads and initiates the model and making predictions on the test data
Input: testdata - data loader of the test data (type: DataLoader)
path_to_weights - relative path and file name of the saved model weights with .pth extension (type:string)
vocab_size - size of the vocabulary (type: int)
n_classes - number of labels/classes that can be predicted (type: int)
Output: preds_prob_list - list of all the probabilities of which the model predicted
the corresponding label (type: list of floats)
preds_status_list - list of all the reencoded labels that were predicted (type: list of strings)
'''
rnn_params = train.rnn_params
model = models.RNNModel(rnn_type=rnn_params.rnn_type, nr_layers=rnn_params.nr_layers,
voc_size=vocab_size, emb_dim=rnn_params.emb_dim, rnn_size=rnn_params.rnn_size,
dropout=rnn_params.dropout, n_classes=n_classes)
models.ModelUtils.load_model(path_to_weights, model)
model.to(rnn_params.device)
batch_size = 1
h = model.init_hidden(batch_size, device=rnn_params.device)
model.zero_grad()
preds_prob_list, preds_status_list = [], []
for x_test in testdata:
x_test = x_test.to(train.rnn_params.device)
h = tuple([each.data for each in h])
out, h = model(x_test, h)
pred = torch.round(out.squeeze()).item()
pred_status = "depressive" if pred < 0.5 else "non-depressive"
prob = (1-pred) if pred_status == "depressive" else pred
preds_status_list.append(pred_status)
preds_prob_list.append(prob)
return preds_prob_list, preds_status_list
def run_predictions(collect_test_data=False):
'''
Collect, preprocess and predicts the test data
Input: collect_test_data - weither or not to collect test data (type: boolean)
Output: status_results - all the predicted labels (type: dictionary of lists of strings)
preds_results - all the predicted values, i.e the certainties of
the predictions (type: dictionary of lists of strings)
'''
status_results = {}
preds_results = {}
for idx, ind_paths in enumerate(Config.test_set_json_paths):
try:
testdata, encoder, vocab_size, n_classes = get_testdata(ind_paths,
Config.test_set_csv_paths[idx],
timespans=Config.test_set_time_spans[idx],
collect_test_data=collect_test_data)
preds_list, preds_status_list = predict(testdata, Config.path_to_weights,
vocab_size, n_classes)
status_results[f'timespan_{idx}'] = preds_status_list
preds_results[f'timespan_{idx}'] = preds_list
except Exception as e:
print(f'Unable to get test data!')
print(f'Exception:\n{e}')
return None
return status_results, preds_results
def plot_predictions(status_results, preds_results, save_name='./predictions_forecast.png', color=None):
'''
Plot the predictions in time order, i.e a time-based forecast of the predictions
Input: status_results - all the predicted labels (type: dictionary of lists of strings)
preds_results - all the predicted values, i.e the certainties of
the predictions (type: dictionary of lists of strings)
save_name - path and filename to where to save the forecasting plot
'''
timespans = list(status_results.keys())
nr_depressive = [(np.array(status_results[timespans[t_idx]]) == 'depressive').sum() for t_idx in range(len(timespans))]
percentage_dep = [((np.array(status_results[timespans[t_idx]]) == 'depressive').sum())/len(status_results[timespans[t_idx]]) for t_idx in range(len(timespans))]
text_perc_dep = [format(percentage_dep[i]*100, '.2f') for i in range(len(percentage_dep))]
ave_probs = [np.mean(np.array(preds_results[timespans[t_idx]])) for t_idx in range(len(timespans))]
text_ave_probs = [format(ave_probs[i]*100, '.2f') for i in range(len(ave_probs))]
weeks = Config.test_set_time_spans
indexes = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks]
if color:
color_bar = color
else:
color_bar = "#ff3399"
if not len(indexes) == len(percentage_dep):
print('Time indexes does not equal number of values')
indexes = timespans
fig = plt.figure(figsize=(28, 12))
plt.bar(indexes, percentage_dep, color=color_bar, width=0.55, alpha=0.35)
plt.plot(indexes, percentage_dep, color="#cc99ff", alpha=0.5)
for i, p in enumerate(percentage_dep):
plt.text(indexes[i], p + 0.02, f'{text_perc_dep[i]}%', verticalalignment='center', color='black',
horizontalalignment='center', fontweight='bold', fontsize=8)
# plt.text(timespans[i], p+0.005, f'Average target prob: {text_ave_probs[i]}%', verticalalignment='center',
# horizontalalignment='center', color='black', fontweight='bold', fontsize=8)
plt.xlabel('Time period', fontsize=16)
plt.ylabel('Percentage %', fontsize=16)
plt.ylim(-0.05, 0.5)
plt.xticks(fontsize=7.4)
plt.yticks(fontsize=11)
plt.title(f'Percentage of depressive tweets weekly from {indexes[0].split()[0]} to {indexes[len(indexes)-1].split()[1]}', fontsize=20)
if save_name:
root, ext = os.path.splitext(save_name)
save_name = root + '.png'
plt.savefig(save_name, bbox_inches='tight')
plt.show()
def plot_all_predictions(status_results1, status_results2, status_results3, weeks, save_name='./predictions_forecast.png', colors=None):
timespans1 = list(status_results1.keys())
timespans2 = list(status_results2.keys())
timespans3 = list(status_results3.keys())
percentage_dep1 = [((np.array(status_results1[timespans1[t_idx]]) == 'depressive').sum())/len(status_results1[timespans1[t_idx]]) for t_idx in range(len(timespans1))]
percentage_dep2 = [((np.array(status_results2[timespans2[t_idx]]) == 'depressive').sum())/len(status_results2[timespans2[t_idx]]) for t_idx in range(len(timespans2))]
percentage_dep3 = [((np.array(status_results3[timespans3[t_idx]]) == 'depressive').sum())/len(status_results3[timespans3[t_idx]]) for t_idx in range(len(timespans3))]
weeks1, weeks2, weeks3 = weeks
indexes1 = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks1]
indexes2 = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks2]
indexes3 = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks3]
x = np.arange(len(indexes1))
lengths = [len(indexes1), len(indexes2), len(indexes3)]
if not all(l == lengths[0] for l in lengths):
shortest = np.min(lengths)
percentage_dep1 = percentage_dep1[:shortest]
percentage_dep2 = percentage_dep2[:shortest]
percentage_dep3 = percentage_dep3[:shortest]
x = np.arange(shortest)
fig = plt.figure(figsize=(28, 12))
plt.bar(x-0.2, percentage_dep1, color=colors[0], width=0.2, alpha=0.4,
label=f'{indexes1[0].split()[0]} to {indexes1[len(indexes1)-1].split()[1]}')
plt.bar(x, percentage_dep2, color=colors[1], width=0.2, alpha=0.4,
label=f'{indexes2[0].split()[0]} to {indexes2[len(indexes2) - 1].split()[1]}')
plt.bar(x+0.2, percentage_dep3, color=colors[2], width=0.2, alpha=0.4,
label=f'{indexes3[0].split()[0]} to {indexes3[len(indexes3) - 1].split()[1]}')
plt.xlabel('Time periods', fontsize=16)
plt.ylabel('Percentage %', fontsize=16)
plt.ylim(-0.05, 0.5)
plt.yticks(fontsize=12)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False)
plt.legend(fontsize=21)
plt.title(f'Comparison of the percentage of depressive tweets weekly from for different time periods', fontsize=20)
if save_name:
root, ext = os.path.splitext(save_name)
save_name = root + '.png'
plt.savefig(save_name, bbox_inches='tight')
plt.show()
def forecast_bar_race(status_results, preds_results, save_name='./plots/forecast_bar_race.mp4'):
timespans = list(status_results.keys())
nr_depressive = [(np.array(status_results[timespans[t_idx]]) == 'depressive').sum() for t_idx in
range(len(timespans))]
nr_nondepressive = [(np.array(status_results[timespans[t_idx]]) == 'non-depressive').sum() for t_idx in
range(len(timespans))]
percentage_dep = [
((np.array(status_results[timespans[t_idx]]) == 'depressive').sum()) / len(status_results[timespans[t_idx]]) for
t_idx in range(len(timespans))]
text_perc_dep = [format(percentage_dep[i] * 100, '.2f') for i in range(len(percentage_dep))]
ave_probs = [np.mean(np.array(preds_results[timespans[t_idx]])) for t_idx in range(len(timespans))]
text_ave_probs = [format(ave_probs[i] * 100, '.2f') for i in range(len(ave_probs))]
percentage_antidep = [1 - percentage_dep[i] for i in range(len(percentage_dep))]
df_dict = {'depressive': percentage_dep,
'non-depressive': percentage_antidep}
weeks = Config.test_set_time_spans
indexes = [f'{w[0].split()[0]}' for w in weeks]
predictions_df = pd.DataFrame(df_dict, index=pd.DatetimeIndex(indexes))
predictions_df.index.rename('date', inplace=True)
root, ext = os.path.splitext(save_name)
save_name = root + '.gif'
save_name_pie = root + '.gif'
#predictions_df.plot_animated(filename=save_name, period_fmt="%Y-%m-%d")
predictions_df.plot_animated(filename=save_name_pie, period_fmt="%Y-%m-%d", period_label={'x': 0, 'y': 0.05},
title= f'Weekly ratio between non-depressive and depressive tweets in the UK',
kind="pie", rotatelabels=True)
def run():
'''
Predict function to run the prediction process after specifying parameters for all three time periods
'''
preprocessing.config.paths = ['./training_data/depressive1.json',
'./training_data/depressive2.json',
'./training_data/depressive3.json',
'./training_data/depressive4.json',
'./training_data/depressive5.json',
'./training_data/depressive6.json',
'./training_data/non-depressive1.json',
'./training_data/non-depressive2.json',
'./training_data/non-depressive3.json',
'./training_data/non-depressive4.json',
'./training_data/non-depressive5.json',
'./training_data/non-depressive6.json']
preprocessing.config.labels = ['depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'depressive',
'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive',
'not-depressive', 'not-depressive']
preprocessing.config.save_path = './training_data/all_training_data.csv'
status_results, preds_results = run_predictions(collect_test_data=True) # collect_test_data=False if already collected
plot_predictions(status_results, preds_results, save_name='./plots/forecast_orig.png')
forecast_bar_race(status_results, preds_results, save_name='./plots/forecast_bar_race_orig.gif')
week1 = Config.test_set_time_spans
# comparing to same period year before
Config.time_to = twint_scraping.get_weeks([2018, 12, 24], [2019, 3, 24])
Config.time_from = twint_scraping.get_weeks([2019, 3, 24], [2019, 6, 24])
test_set_time_spans = []
for tt in Config.time_to:
test_set_time_spans.append(tt)
for tf in Config.time_from:
test_set_time_spans.append(tf)
len_timespan = len(test_set_time_spans)
Config.test_set_time_spans = test_set_time_spans
Config.len_timespan = len_timespan
test_set_json_paths = []
for t_idx in range(len_timespan):
time_spec_path = []
for l_idx in range(Config.len_locations):
time_spec_path.append(f'./forecast_data/testdata_yearbefore_{l_idx}_{t_idx}.json')
test_set_json_paths.append(time_spec_path)
Config.test_set_json_paths = test_set_json_paths
Config.test_set_csv_paths = [f'./forecast_data/all_loc_year_before_{t_idx}.csv' for t_idx in range(len_timespan)]
week2 = Config.test_set_time_spans
status_results_before, preds_results_before = run_predictions(collect_test_data=True) # collect_test_data=False if already collected
plot_predictions(status_results_before, preds_results_before, save_name='./plots/forecast_year_before.png', color="#3366ff")
forecast_bar_race(status_results_before, preds_results_before, save_name='./plots/forecast_bar_race_last_year.gif')
# Comparing to from 3 months after lockdown to recent
Config.time_to = twint_scraping.get_weeks([2020, 6, 24], [2020, 9, 24])
Config.time_from = twint_scraping.get_weeks([2020, 9, 24], [2020, 12, 17])
test_set_time_spans = []
for tt in Config.time_to:
test_set_time_spans.append(tt)
for tf in Config.time_from:
test_set_time_spans.append(tf)
len_timespan = len(test_set_time_spans)
Config.test_set_time_spans = test_set_time_spans
Config.len_timespan = len_timespan
test_set_json_paths = []
for t_idx in range(len_timespan):
time_spec_path = []
for l_idx in range(Config.len_locations):
time_spec_path.append(f'./forecast_data/testdata_uptorecent_{l_idx}_{t_idx}.json')
test_set_json_paths.append(time_spec_path)
Config.test_set_json_paths = test_set_json_paths
Config.test_set_csv_paths = [f'./forecast_data/all_loc_up_to_recent_{t_idx}.csv' for t_idx in range(len_timespan)]
week3 = Config.test_set_time_spans
status_results_uptonow, preds_results_uptonow = run_predictions(collect_test_data=True) # collect_test_data=False if already collected
plot_predictions(status_results_uptonow, preds_results_uptonow, save_name='./plots/forecast_up_to_now.png', color="#00cc66")
forecast_bar_race(status_results_uptonow, preds_results_uptonow, save_name='./plots/forecast_bar_race_up_to_now.gif')
##### COMPARISON #####
weeks = [week1, week2, week3]
colors = ["#ff3399", "#3366ff", "#00cc66"]
plot_all_predictions(status_results, status_results_before, status_results_uptonow, weeks,
save_name='./plots/comparison.png', colors=colors)
run()
| {"/main.py": ["/train.py", "/preprocessing.py"], "/train.py": ["/models.py", "/preprocessing.py"], "/models.py": ["/preprocessing.py"], "/preprocessing.py": ["/data_cleaning.py", "/twint_scraping.py"], "/predict.py": ["/models.py", "/train.py", "/preprocessing.py", "/data_cleaning.py", "/twint_scraping.py"]} |
559 | olof98johansson/SentimentAnalysisNLP | refs/heads/main | /data_cleaning.py |
import json
import csv
import re
def load_json(path):
'''
Loads collected data in json format, checks it and then converts to csv format
Input: path - path and file name to the collected json data (type: string)
Output: keys - list of features/keys of the dataframe (type: list of strings)
df_list - list containing all the dataframes from the json data (type: list of dataframes)
'''
if not path.endswith('.json'):
print('File path not JSON file...')
return None
with open(path, 'r', encoding='utf8') as handle:
df_list = [json.loads(line) for line in handle]
nr_keys = [len(df_list[i].keys()) for i in range(len(df_list))]
if not all(k == nr_keys[0] for k in nr_keys):
print('Some features missing, review the data!')
return None
else:
keys = df_list[0].keys()
return keys, df_list
def combine_and_label(paths, labels, train=True):
'''
Combining multiple collections of data files and adds corresponding label (i.e depressive or non-depressive).
List of labels in correct order with respect to the paths order must be specified manually
Input: paths - list containing all the paths to the json files (type: list of strings)
labels - list containing all the labels to the corresponding json files (type: list of strings)
Output: df_list - list of all the combined dataframes from the json data (type: list of dataframes)
'''
if not type(paths)==type(list()):
print('"paths" argument is not of type list! Please pass list of the paths to the collected data to be combined!')
return None
if train:
if not len(paths) == len(labels):
print(f'Number of datafile paths of {len(paths)} is not the same as number of labels of {len(labels)}!')
return None
df_list = []
for idx, path in enumerate(paths):
try:
curr_keys, curr_df_list = load_json(path)
except Exception as e:
print(f'Unable to load data from path "{path}", check path name and file!')
print(f'Exception:\n{e}')
return None
for df in curr_df_list:
if train:
df['label'] = labels[idx]
df_list.append(df)
return df_list
def datacleaning(paths, labels, hashtags_to_remove = [], save_path=None, train=True):
'''
Cleans the data based on unwanted hashtags, duplication of tweets occured due
to sharing of keywords, removal of mentions, urls, non-english alphabetic tokens
and empty tweets obtained after cleaning
Input: paths - list containing all the paths to the json files (type: list of strings)
labels - list containing all the labels to the corresponding json files (type: list of strings)
hashtags_to_remove - list containing hashtags wished to be removed (type: list of strings)
save_path - path and file name to were to save the cleaned dataset (type: string or None)
train - specify if it is training mode or not, i.e if to use labels or not (type: boolean)
Output: dataset_doc - list of all the text documents and corresponding labels if train (type: list of strings)
keys - list of features/keys of the dataframe (type: list of strings)
'''
if len(labels) > 0:
train = True
df_list = combine_and_label(paths, labels, train=train)
# Remove tweets with specific hashtags
nr_removed_tweets = 0
for idx, df in enumerate(df_list):
hashtags = df.copy()['hashtags']
if any([h in hashtags_to_remove for h in hashtags]):
df_list.pop(idx)
print(f'Tweet nr {idx} removed!')
nr_removed_tweets += 1
print(f'Removed total of {nr_removed_tweets} tweets')
# Removes duplicate of tweets
unique_ids = {}
for idx, df in enumerate(df_list):
tweet_id = df.copy()['id']
if not tweet_id in unique_ids:
unique_ids[str(tweet_id)] = 1
else:
print('Found douplicate of tweet id, removing the duplicate!')
df_list.pop(idx)
# Cleaning the tweet texts
for idx, df in enumerate(df_list):
tweet = df.copy()['tweet']
# Removing URLs
tweet = re.sub(r"http\S+", " ", tweet)
tweet = re.sub(r"\S+\.com\S", " ", tweet)
# Remove mentions
tweet = re.sub(r'\@\w+', ' ', tweet)
# Remove non-alphabetic tokens
tweet = re.sub('[^A-Za-z]', ' ', tweet.lower())
# Remove double spacings
tweet = re.sub(' +', ' ', tweet)
# Remove from dataset if tweet empty after cleaning
if tweet == 0:
df_list.pop(idx)
else:
df['tweet'] = tweet
print('Successfully cleaned data!')
# Saving list of tweet dicts to csv format
if save_path:
print(f'Saving data...')
if not save_path.endswith('.csv'):
print('Save path is missing .csv format extension!')
save_path = save_path + '.csv'
try:
with open(save_path, 'w', encoding='utf8', newline='') as output_file:
csv_file = csv.DictWriter(output_file,
fieldnames=df_list[0].keys(),
)
csv_file.writeheader()
csv_file.writerows(df_list)
print(f'Data succesfully saved to "{save_path}"')
except Exception as e:
print(f'Unable to save data to "{save_path}", check the path and data!')
print(f'Exception:\n{e}')
dataset_docs = [df['tweet'] for df in df_list]
keys = df_list[0].keys()
if train:
dataset_labels = [df['label'] for df in df_list]
return [dataset_docs, dataset_labels], keys
else:
return dataset_docs, keys
| {"/main.py": ["/train.py", "/preprocessing.py"], "/train.py": ["/models.py", "/preprocessing.py"], "/models.py": ["/preprocessing.py"], "/preprocessing.py": ["/data_cleaning.py", "/twint_scraping.py"], "/predict.py": ["/models.py", "/train.py", "/preprocessing.py", "/data_cleaning.py", "/twint_scraping.py"]} |
566 | DiegoArcelli/BlocksWorld | refs/heads/main | /cnn.py | import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.layers import Conv2D
from keras.layers import MaxPool2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras import Sequential
# file per allenare e salvare la rete neurale che effettua il riconoscimento delle cifre
# il modello viene allenato sul dataset del MNIST
BATCH_SIZE = 64
EPOCHS = 10
# si estraggono e si
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# si aggiunge la dimensione del canale e si normalizza il valore dei pixel tra 0 e 1
x_train = np.expand_dims(x_train, -1)
x_train = x_train / 255
x_test = np.expand_dims(x_test, -1)
x_test = x_test / 255
# definizione del modello
model = Sequential()
model.add(Conv2D(filters=24, kernel_size=(3, 3), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(filters=36, kernel_size=(3, 3)))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(10, activation="softmax"))
model.predict(x_train[[0]])
model.summary()
model.compile(optimizer="adam",
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# allenamento del modello
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(x_test, y_test))
# calcolo della precisione e dell'errore nel validation set
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test loss', test_loss)
print('Test accuracy:', test_acc)
# plot dei grafici relativi all'andamento di accuracy e loss
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
model.save("./model/model.h5")
| {"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"], "/search_algs.py": ["/utils.py", "/blocks_world.py"], "/blocks_world.py": ["/utils.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"]} |
567 | DiegoArcelli/BlocksWorld | refs/heads/main | /launch.py | import tkinter as tk
from tkinter.filedialog import askopenfilename
from PIL import Image, ImageTk
from load_state import prepare_image
from utils import draw_state
from blocks_world import BlocksWorld
from search_algs import *
# file che contiene l'implementazione dell'interfaccia grafica per utilizzare il programma
class Window(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.initial_state = None
self.goal_state = None
self.create_widgets()
self.create_images("insert_image.png", "insert_image.png")
def create_widgets(self):
initial_label = tk.Label(self, text = "Seleziona stato iniziale:")
goal_label = tk.Label(self, text = "Seleziona stato finale:")
initial_label.grid(row = 0, column = 0, padx = 10, pady = 10)
goal_label.grid(row = 0, column = 2, padx = 10, pady = 10)
initial_button = tk.Button(self, text="Seleziona file", command=self.open_initial)
goal_button = tk.Button(self, text="Seleziona file", command=self.open_goal)
initial_button.grid(row = 1, column = 0, padx = 10, pady = 10)
goal_button.grid(row = 1, column = 2, padx = 10, pady = 10)
alg_label = tk.Label(self, text = "Seleziona algoritmo di ricerca:")
alg_label.grid(row = 0, column = 1, padx = 10, pady = 10)
frame = tk.Frame(self)
frame.grid(row = 1, column = 1, padx = 10, pady = 10)
self.selected = tk.StringVar(self)
self.selected.set("BFS")
select_alg_menu = tk.OptionMenu(frame, self.selected, "BFS", "DFS", "IDS", "UCS", "A*", "RBFS", command=self.read_algorithm).pack()
start_button = tk.Button(frame, text="Start search", command=self.start_search).pack()
def create_images(self, initial, goal):
self.initial_image_path = initial
self.initial_image = ImageTk.PhotoImage(Image.open("./images/" + initial).resize((300, 300)))
initial_image_label = tk.Label(self, image=self.initial_image)
initial_image_label.grid(row = 2, column = 0, padx = 10, pady = 10)
self.goal_image_path = goal
self.goal_image = ImageTk.PhotoImage(Image.open("./images/" + goal).resize((300, 300)))
goal_image_label = tk.Label(self, image=self.goal_image)
goal_image_label.grid(row = 2, column = 2, padx = 10, pady = 10)
def open_initial(self):
self.initial_file = askopenfilename()
if self.initial_file == ():
return
self.initial_state = prepare_image(self.initial_file, False)
print(self.initial_state)
draw_state(self.initial_state, "initial")
self.create_images("/temp/initial.jpg", self.goal_image_path)
def read_algorithm(self, alg):
return alg
def open_goal(self):
self.goal_file = askopenfilename()
if self.goal_file == ():
return
self.goal_state = prepare_image(self.goal_file, False)
print(self.goal_state)
draw_state(self.goal_state, "goal")
self.create_images(self.initial_image_path, "/temp/goal.jpg")
def start_search(self):
if self.goal_state is None and self.initial_state is None:
return
alg = self.selected.get()
problem = BlocksWorld(self.initial_state, self.goal_state)
print("Inizio ricerca:")
if alg == "BFS":
problem.solution(graph_bfs(problem).solution())
if alg == "A*":
problem.solution(a_star(problem, lambda n: problem.misplaced_blocks(n)).solution())
if alg == "DFS":
problem.solution(graph_dfs(problem).solution())
if alg == "IDS":
problem.solution(ids(problem).solution())
if alg == "RBFS":
problem.solution(rbfs(problem, lambda n: problem.misplaced_blocks(n)).solution())
if alg == "UCS":
problem.solution(a_star(problem, lambda n: problem.depth(n)).solution())
root = tk.Tk()
root.title("Blocks World")
root.resizable(0, 0)
app = Window(master=root)
app.mainloop() | {"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"], "/search_algs.py": ["/utils.py", "/blocks_world.py"], "/blocks_world.py": ["/utils.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"]} |
568 | DiegoArcelli/BlocksWorld | refs/heads/main | /utils.py | import heapq
import functools
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
class PriorityQueue:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("Order must be either 'min' or 'max'.")
def append(self, item):
"""Insert item at its correct position."""
heapq.heappush(self.heap, (self.f(item), item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value)
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __contains__(self, key):
"""Return True if the key is in PriorityQueue."""
return any([item == key for _, item in self.heap])
def __getitem__(self, key):
"""Returns the first value associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for value, item in self.heap:
if item == key:
return value
raise KeyError(str(key) + " is not in the priority queue")
def __delitem__(self, key):
"""Delete the first occurrence of key."""
try:
del self.heap[[item == key for _, item in self.heap].index(True)]
except ValueError:
raise KeyError(str(key) + " is not in the priority queue")
heapq.heapify(self.heap)
def get_item(self, key):
"""Returns the first node associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for _, item in self.heap:
if item == key:
return item
raise KeyError(str(key) + " is not in the priority queue")
def is_in(elt, seq):
"""Similar to (elt in seq), but compares with 'is', not '=='."""
return any(x is elt for x in seq)
def memoize(fn, slot=None, maxsize=32):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, use lru_cache for caching the values."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
@functools.lru_cache(maxsize=maxsize)
def memoized_fn(*args):
return fn(*args)
return memoized_fn
def draw_state(state, file_path):
blocks = [*state[0:-1]]
w = state[-1]
blocks.sort(key=lambda l: l[1], reverse=True)
h = blocks[0][1]
image = np.zeros(((h+1)*100, w*100), np.uint8)
for block in blocks:
n, i, j = block
i = h - i
digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
digit = cv.resize(digit, (100, 100))
image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
size = (len(state) - 1)*100
padded = np.zeros((size, w*100), np.uint8)
padded[size - (h+1)*100 : size, :] = image
h = len(state) - 1
bg = np.zeros((h*100 + 40, w*100 + 40), np.uint8)
bg[20: h*100 + 20, 20: w*100 + 20] = padded
bg[0:10, :] = 255
bg[h*100 + 30 : h*100 + 40, :] = 255
bg[:, 0:10] = 255
bg[h*100 + 30 : h*100 + 40, :] = 255
bg[:,w*100 + 30 : w*100 + 40] = 255
w, h = (w*100 + 40, h*100 + 40)
l = max(w, h)
adjust = np.zeros((l, l), np.uint8)
d_w = (l - w) // 2
d_h = (l - h) // 2
adjust[d_h: d_h + h, d_w: d_w + w] = bg
cv.imwrite("./images/temp/" + str(file_path) + ".jpg", ~adjust) | {"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"], "/search_algs.py": ["/utils.py", "/blocks_world.py"], "/blocks_world.py": ["/utils.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"]} |
569 | DiegoArcelli/BlocksWorld | refs/heads/main | /search_algs.py | from aima3.search import *
from utils import *
from collections import deque
from blocks_world import BlocksWorld
import sys
# file che contiene le implementazioni degli algoritmi di ricerca
node_expanded = 0 # numero di nodi espansi durante la ricerca
max_node = 0 # massimo numero di nodi presenti nella frontiera durante la ricerca
f_dim = 0 # dimensione della frontiera in un dato momento
total_node = 0
def init_param():
global node_expanded, total_node, max_node, f_dim
node_expanded = 0
max_node = 0
total_node = 0
f_dim = 0
def print_param():
print(f"Nodi espansi: {node_expanded}")
print(f"Max dimensione della frontiera: {max_node}")
print(f"Dim media della frontiera: {int(total_node/node_expanded)}")
# def get_item(queue, key):
# """Returns the first node associated with key in PriorityQueue.
# Raises KeyError if key is not present."""
# for _, item in queue.heap:
# if item == key:
# return item
# raise KeyError(str(key) + " is not in the priority queue")
def show_solution(name_algo, node):
try:
print(name_algo + ":", node.solution())
except:
if type(Node) == str:
print(name_algo + ":", node)
else:
print(name_algo + ":", "No solution found")
# Graph Breadth First Search
def graph_bfs(problem):
global node_expanded, total_node, max_node, f_dim
init_param()
frontier = deque([Node(problem.initial)])
f_dim += 1
explored = set()
while frontier:
node_expanded += 1
total_node += f_dim
node = frontier.popleft()
f_dim -= 1
explored.add(node.state)
if problem.goal_test(node.state):
# print(node_expanded)
print_param()
return node
for child_node in node.expand(problem):
if child_node.state not in explored and child_node not in frontier:
f_dim += 1
max_node = f_dim if f_dim > max_node else max_node
frontier.append(child_node)
# Graph Depth First Search
def graph_dfs(problem):
global node_expanded, total_node, max_node, f_dim
init_param()
frontier = deque([Node(problem.initial)])
f_dim += 1
explored = set()
while frontier:
total_node += f_dim
node = frontier.pop()
node_expanded += 1
f_dim -= 1
if problem.goal_test(node.state):
print_param()
return node
explored.add(node.state)
for child_node in node.expand(problem):
if child_node.state not in explored and child_node not in frontier:
f_dim += 1
max_node = f_dim if f_dim > max_node else max_node
frontier.append(child_node)
# Uniform Cost Search
def ucs(problem, f):
global node_expanded, total_node, max_node, f_dim
init_param()
if problem.goal_test(problem.initial):
return Node(problem.initial)
f = memoize(f, 'f')
node_expanded += 1
frontier = PriorityQueue('min', f)
frontier.append(Node(problem.initial))
f_dim += 1
explored = set()
while frontier:
total_node += f_dim
node_expanded += 1
node = frontier.pop()
f_dim -= 1
# print(node, f(node))
if problem.goal_test(node.state):
print_param()
return node
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
f_dim += 1
frontier.append(child)
max_node = f_dim if f_dim > max_node else max_node
elif child in frontier:
next_node = frontier.get_item(child)
if f(child) < f(next_node):
del frontier[next_node]
frontier.append(child)
# Depth Limited Search
def dls(problem, limit):
def recursive_dls(problem, node, limit):
global node_expanded, total_node, max_node, f_dim
node_expanded += 1
total_node += f_dim
if problem.goal_test(node.state):
return node
elif limit == 0:
return 'cutoff'
cutoff_occurred = False
for child_node in node.expand(problem):
f_dim+=1
max_node = f_dim if f_dim > max_node else max_node
result = recursive_dls(problem, child_node, limit-1)
f_dim -= 1
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return 'cutoff' if cutoff_occurred else None
return recursive_dls(problem, Node(problem.initial), limit)
# Iterative Deepening Search
def ids(problem):
global node_expanded, total_node, max_node, f_dim
init_param()
prevexp = 0
for depth in range(sys.maxsize):
f_dim += 1
result = dls(problem, depth)
print(node_expanded - prevexp)
prevexp = node_expanded
f_dim = 0
if result != 'cutoff':
print_param()
return result
return None
# A*
def a_star(problem: BlocksWorld, h=None):
global node_expanded
h = memoize(h or problem.h)
return ucs(problem, lambda n: problem.depth(n) + h(n))
# Recursive Best First Search
def rbfs(problem, h):
global node_expanded, total_node, max_node, f_dim
init_param()
h = memoize(h or problem.h, 'h')
g = memoize(lambda n: problem.depth(n), 'g')
f = memoize(lambda n: g(n) + h(n), 'f')
def rbfs_search(problem, node, f_limit=np.inf):
global node_expanded, total_node, max_node, f_dim
node_expanded += 1
if problem.goal_test(node.state):
print_param()
return node, 0
successors = [*node.expand(problem)]
f_dim += len(successors)
total_node += f_dim
max_node = f_dim if f_dim > max_node else max_node
if len(successors) == 0:
return None, np.inf
for child in successors:
child.f = max(f(child), node.f)
while True:
successors.sort(key=lambda x: x.f)
best = successors[0]
if best.f > f_limit:
f_dim -= len(successors)
return None, best.f
alt = successors[1].f if len(successors) > 1 else np.inf
# importante, sovrascrivere best.f
result, best.f = rbfs_search(problem, best, min(f_limit, alt))
# return result
if result is not None:
f_dim -= len(successors)
return result, best.f
node = Node(problem.initial)
f(node)
f_dim += 1
return rbfs_search(problem, node)[0] | {"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"], "/search_algs.py": ["/utils.py", "/blocks_world.py"], "/blocks_world.py": ["/utils.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"]} |
570 | DiegoArcelli/BlocksWorld | refs/heads/main | /blocks_world.py | from aima3.search import *
from utils import *
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# file che contine l'implementazione del problema basata con AIMA
class BlocksWorld(Problem):
def __init__(self, initial, goal):
super().__init__(initial, goal)
# restituisce il numero di blocchi
def get_blocks_number(self):
return len(self.initial)
# restituisce la lista delle possibili azioni nello stato corrente
def actions(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
actions = []
for block in tops:
n, i, j = block
for col in range(size):
if col != j:
if col in columns:
actions.append((n, columns[col][1]+1, col))
else:
actions.append((n, 0, col))
return actions
#
def result(self, state, actions):
blocks = [*state[0:-1]]
size = state[-1]
to_delete = ()
for block in blocks:
if block[0] == actions[0]:
to_delete = block
blocks.remove(to_delete)
blocks.append((actions))
blocks.append(size)
return tuple(blocks)
# verifica se lo stato passato è lo stato finale
def goal_test(self, state):
op_1 = [*state[0:-1]]
op_2 = [*self.goal[0:-1]]
op_1.sort(key=lambda l: l[0])
op_2.sort(key=lambda l: l[0])
return str(op_1) == str(op_2)
# restituisce i blocchi che possono essere spostati nello stato che viene passato
def get_movable(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
return tops
# euristica che calcola il numero di blocchi in posizione errata
def misplaced_blocks(self, node):
blocks = [*node.state[0:-1]]
target = [*self.goal[0:-1]]
target.sort(key=lambda l: l[0])
value = 0
for block in blocks:
n, i, j = block
if target[n-1][1:3] != (i, j):
value += 1
# if block not in self.get_movable(node.state):
# value += 1
return value
# ritorna la profondità di un nodo nell'albero di ricerca
def depth(self, node):
return node.depth
# stampa la lista delle azioni che portano dallo stato iniziale allo stato finale
def solution(self, actions, output=True):
if len(actions) is None:
return
state = self.initial
successor = None
n = 1
print("Lunghezza soluzione: " + str(len(actions)))
for action in actions:
print(action)
successor = self.result(state, action)
if output:
figue_1 = self.draw_state(state)
figue_2 = self.draw_state(successor)
_, axarr = plt.subplots(1, 2)
axarr[0].imshow(figue_1, cmap=plt.cm.binary)
axarr[0].set_xticks([])
axarr[0].set_yticks([])
axarr[0].set_xlabel(f"\nStato {n}")
axarr[1].imshow(figue_2, cmap=plt.cm.binary)
axarr[1].set_xticks([])
axarr[1].set_yticks([])
axarr[1].set_xlabel(f"\nStato {n+1}")
figManager = plt.get_current_fig_manager()
figManager.full_screen_toggle()
plt.show()
state = successor
n += 1
# metodo che fornisce una rappresentazione grafica dello stato che gli viene passato
def draw_state(self, state):
blocks = [*state[0:-1]]
w = state[-1]
blocks.sort(key=lambda l: l[1], reverse=True)
h = blocks[0][1]
image = np.zeros(((h+1)*100, w*100), np.uint8)
for block in blocks:
n, i, j = block
i = h - i
digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
digit = cv.resize(digit, (100, 100))
image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
size = (len(state) - 1)*100
adjust = np.zeros((size, w*100), np.uint8)
adjust[size - (h+1)*100 : size, :] = image
return adjust | {"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"], "/search_algs.py": ["/utils.py", "/blocks_world.py"], "/blocks_world.py": ["/utils.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"]} |
571 | DiegoArcelli/BlocksWorld | refs/heads/main | /main.py | from PIL import Image, ImageTk
from load_state import prepare_image
from utils import draw_state
from blocks_world import BlocksWorld
from search_algs import *
import argparse
from inspect import getfullargspec
# file che definisce lo script da linea di comando per utilizzare il programma
if __name__ == "__main__":
search_algs = {
"astar": a_star,
"ucs": ucs,
"rbfs": rbfs,
"bfs": graph_bfs,
"dfs": graph_dfs,
"ids": ids
}
parser = argparse.ArgumentParser(description="Blocks World")
parser.add_argument("--initial", "-i", type=str, default=None, required=True, help="The image representing the initial state")
parser.add_argument("--goal", "-g", type=str, default=None, required=True, help="The image representing the goal state")
parser.add_argument("--algorithm", "-a", type=str, default=None, required=True, help="The search algorithm used")
parser.add_argument("--debug", "-d", default=False, required=False, action='store_true', help="Shows the steps of the image processing")
parser.add_argument("--output", "-o", default=False, required=False, action='store_true', help="The solution is printed graphically")
args = vars(parser.parse_args())
initial_state_path = args["initial"]
goal_state_path = args["goal"]
search_alg = args["algorithm"]
debug = args["debug"]
output = args["output"]
initial_state = prepare_image(initial_state_path, debug)
goal_state = prepare_image(goal_state_path, debug)
print(initial_state)
print(goal_state)
functions = {
"ucs": lambda n: problem.depth(n),
"astar": lambda n: problem.misplaced_blocks(n),
"rbfs": lambda n: problem.misplaced_blocks(n)
}
problem = BlocksWorld(initial_state, goal_state)
if len(getfullargspec(search_algs[search_alg]).args) == 2:
problem.solution(search_algs[search_alg](problem, functions[search_alg]).solution(), output)
else:
problem.solution(search_algs[search_alg](problem).solution(), output) | {"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"], "/search_algs.py": ["/utils.py", "/blocks_world.py"], "/blocks_world.py": ["/utils.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"]} |
572 | DiegoArcelli/BlocksWorld | refs/heads/main | /load_state.py | import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import glob
from tensorflow import keras
from math import ceil
deteced = [np.array([]) for x in range(6)] # lista che contiene le immagini delle cifre
poisitions = [None for x in range(6)] # lista che contiene la posizione delle cifre nell'immagine
debug_mode = False
model = keras.models.load_model("./model/model.h5") # carica il modello allenato sul datase del MNIST
# funzione che si occupa del riconoscimento della cifra presente nell'immagine
# che gli viene passato come parametro
def predict(image):
h, w = image.shape
l = int(max(image.shape)*1.2)
n_h = int((l - h)/2)
n_w = int((l - w)/2)
img = np.zeros((l, l), np.uint8)
img[n_h : n_h + h, n_w : n_w + w] = image
img = (img / 255).astype('float64')
img = cv.resize(img, (28, 28), interpolation = cv.INTER_AREA)
_in = np.array([img])
_in = np.expand_dims(_in, -1)
digit = np.argmax(model.predict(_in))
if debug_mode:
print(digit)
show(img)
return digit - 1 if digit > 0 else -1
# stampa a schermo l'immagine che gli veiene passata come parametro
def show(img):
figManager = plt.get_current_fig_manager()
figManager.full_screen_toggle()
plt.xticks([])
plt.yticks([])
plt.imshow(img)
plt.show()
# prime modifiche all'immagine che consistono nell'applicazione di blur
def preprocess(image):
image = cv.medianBlur(image, 3)
image = cv.GaussianBlur(image, (3, 3), 0)
return 255 - image
def postprocess(image):
image = cv.medianBlur(image, 5)
image = cv.medianBlur(image, 5)
kernel = np.ones((3, 3), np.uint8)
image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel)
kernel = np.ones((3, 3), np.uint8)
image = cv.erode(image, kernel, iterations=2)
return image
def get_block_index(image_shape, yx, block_size):
y = np.arange(max(0, yx[0]-block_size),
min(image_shape[0], yx[0]+block_size))
x = np.arange(max(0, yx[1]-block_size),
min(image_shape[1], yx[1]+block_size))
return np.meshgrid(y, x)
def adaptive_median_threshold(img_in):
med = np.median(img_in)
threshold = 40
img_out = np.zeros_like(img_in)
img_out[img_in - med < threshold] = 255
return img_out
def block_image_process(image, block_size):
out_image = np.zeros_like(image)
for row in range(0, image.shape[0], block_size):
for col in range(0, image.shape[1], block_size):
idx = (row, col)
block_idx = get_block_index(image.shape, idx, block_size)
out_image[block_idx] = adaptive_median_threshold(image[block_idx])
return out_image
def clean(image):
contours, hierarchy = cv.findContours(
image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
if search_noise(contour, approx, image.shape[::-1]):
cv.drawContours(image, [approx], 0, 255, -1)
return image
def search_noise(contour, approx, image_size):
i_h, i_w = image_size
x, y, w, h = cv.boundingRect(approx)
image_area = i_w*i_h
if cv.contourArea(contour) >= image_area/1000:
return False
if w >= i_w/50 or h >= i_h/50:
return False
return True
def find_digits(image, org_image, org):
contours, hierarchy = cv.findContours(image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
i = 0
for contour in contours:
approx = cv.approxPolyDP(contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
if hierarchy[0][i][3] == -1:
prev = predict(org_image[y:y+h, x:x+w])
if prev != -1:
deteced[prev] = org[y:y+h, x:x+w]
poisitions[prev] = (x, y, x + w, y + h)
i += 1
# funzione che individua il box che contiene i blocchi ed individua le cifre
def find_box(image):
o_h, o_w = image.shape[0:2]
contours, hierarchy = cv.findContours(
image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
contours.sort(reverse=True, key=lambda c: cv.contourArea(c))
contour = contours[1]
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
box = (x, y, x + w, y + h)
img = image[y:y+h, x:x+w]
sub = img.copy()
bg = ~np.zeros((h + 50, w + 50), np.uint8)
bg[25: 25 + h, 25: 25 + w] = img
img = bg
i = 0
i_h, i_w = img.shape[0:2]
tot = np.zeros(shape=(i_h, i_w))
if debug_mode:
print(image)
contours, hierarchy = cv.findContours(img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
if hierarchy[0][i][3] == 0:
cv.drawContours(tot, [approx], 0, 255, -1)
if hierarchy[0][i][3] == 1:
cv.drawContours(tot, [approx], 0, 0, -1)
i += 1
tot = tot[25: 25 + h, 25: 25 + w]
kernel = np.ones((5, 5), np.uint8)
tot = cv.dilate(tot, kernel, iterations=3)
tot = tot.astype('uint32')
sub = sub.astype('uint32')
res = sub + tot
res = np.where(res == 0, 255, 0)
result = np.zeros((o_h, o_w), np.uint8)
result[y:y+h, x:x+w] = res
if debug_mode:
show(result)
return (result, box)
def get_block_borders(dims, image):
x_i, y_i, x_f, y_f = dims
kernel = np.ones((5, 5), np.uint8)
image = cv.erode(image, kernel, iterations=1)
y_m = (y_f + y_i) // 2
x_m = (x_f + x_i) // 2
t = x_i - 1
while image[y_m, t] != 255:
t-=1
x_i = t
t = x_f + 1
while image[y_m, t] != 255:
t+=1
x_f = t
t = y_i - 1
while image[t, x_m] != 255:
t-=1
y_i = t
t = y_f + 1
while image[t, x_m] != 255:
t+=1
y_f = t
return (x_i, y_i, x_f, y_f)
def process_image_file(filename):
global deteced, poisitions, explored, debug_mode
block_size = 50
deteced = [np.array([]) for x in range(6)]
poisitions = [None for x in range(6)]
explored = []
image_in = cv.cvtColor(cv.imread(filename), cv.COLOR_BGR2GRAY)
if debug_mode:
show(image_in)
image_in_pre = preprocess(image_in)
image_out = block_image_process(image_in_pre, block_size)
image_out = postprocess(image_out)
image_out = clean(image_out)
if debug_mode:
show(image_out)
digits, box = find_box(image_out)
find_digits(digits, ~image_out, image_in)
for i in range(6):
if deteced[i].size > 0:
image = deteced[i]
x, y, w, h = get_block_borders(poisitions[i], ~image_out)
poisitions[i] = (x, y, w, h)
cv.rectangle(image_in, (x, y), (w, h), 255, 2)
if debug_mode:
show(image_in)
return box
def check_intersection(values):
v1_i, v1_f, v2_i, v2_f = values
v2_m = (v2_i + v2_f) // 2
if v1_i < v2_m and v1_f > v2_m:
return True
return False
def create_state(poisitions, box):
cols = [[] for x in range(6)]
mean_points = []
for i in range(6):
if poisitions[i] is not None:
x1_i, y1_i, x1_f, y1_f = poisitions[i]
mean_points.append(((x1_f + x1_i) // 2, ((y1_f + y1_i) // 2)))
c = [i+1]
for j in range(6):
if poisitions[j] is not None and j != i:
x2_i, y2_i, x2_f, y2_f = poisitions[j]
if check_intersection((x1_i, x1_f, x2_i, x2_f)):
c.append(j+1)
c.sort()
cols[i] = tuple([*c])
else:
cols[i] = ()
temp_cols = list(set(tuple(cols)))
if () in temp_cols:
temp_cols.remove(())
cols = []
for t_col in temp_cols:
col = list(t_col)
col.sort(reverse=True, key=lambda e: mean_points[e-1][1])
cols.append(tuple(col))
cols.sort(key=lambda e: mean_points[e[0]-1][0])
bottoms = [col[0] for col in cols]
distances = []
xb_i, _, xb_f, _ = box
x_i, _, x_f, _ = poisitions[bottoms[0]-1]
dist = abs(x_i - xb_i)
dist = dist / (x_f - x_i)
distances.append(dist)
for i in range(len(bottoms)-1):
x1_i, _, x1_f, _ = poisitions[bottoms[i]-1]
x2_i, _, _, _ = poisitions[bottoms[i+1]-1]
dist = abs(x2_i - x1_f)
dist = dist / (x1_f - x1_i)
distances.append(dist)
x_i, _, x_f, _ = poisitions[bottoms[-1]-1]
dist = abs(xb_f - x_f)
dist = dist / (x_f - x_i)
distances.append(dist)
for i in range(len(distances)):
dist = distances[i]
if dist - int(dist) >= 0.5:
distances[i] = int(dist) + 1
else:
distances[i] = int(dist)
n = sum(distances) + len(cols)
i = distances[0]
state = []
pos = 1
for col in cols:
j = 0
for block in col:
state.append((block, j, i))
j += 1
i += distances[pos] + 1
pos += 1
state.append(n)
return tuple(state)
def prepare_image(file_path, debug):
global debug_mode
debug_mode = True if debug else False
box = process_image_file(file_path)
state = create_state(poisitions, box)
return state
| {"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"], "/search_algs.py": ["/utils.py", "/blocks_world.py"], "/blocks_world.py": ["/utils.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py", "/search_algs.py"]} |
592 | digital-sustainability/swiss-procurement-classifier | refs/heads/master | /runOldIterations.py | from train import ModelTrainer
from collection import Collection
import pandas as pd
import logging
import traceback
import os
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# === THESIS ===
anbieter_config = {
'Construction': [
'Alpiq AG',
'Swisscom',
'Kummler + Matter AG',
'Siemens AG'
],
'IT': [
'G. Baumgartner AG',
'ELCA Informatik AG',
'Thermo Fisher Scientific (Schweiz) AG',
'Arnold AG',
],
'Other': [
'Riget AG',
'isolutions AG',
'CSI Consulting AG',
'Aebi & Co. AG Maschinenfabrik',
],
'Divers': [
'DB Schenker AG',
'IT-Logix AG',
'AVS Syteme AG',
'Sajet SA'
]
}
# === TESTING ===
#anbieter = 'Marti AG' #456
#anbieter = 'Axpo AG' #40
#anbieter = 'Hewlett-Packard' #90
#anbieter = 'BG Ingénieurs Conseils' SA #116
#anbieter = 'Pricewaterhousecoopers' #42
#anbieter = 'Helbling Beratung + Bauplanung AG' #20
#anbieter = 'Ofrex SA' #52
#anbieter = 'PENTAG Informatik AG' #10
#anbieter = 'Wicki Forst AG' #12
#anbieter = 'T-Systems Schweiz' #18
#anbieter = 'Bafilco AG' #20
#anbieter = '4Video-Production GmbH' #3
#anbieter = 'Widmer Ingenieure AG' #6
#anbieter = 'hmb partners AG' #2
#anbieter = 'Planmeca' #4
#anbieter = 'K & M Installationen AG' #4
select_anbieter = (
"anbieter.anbieter_id, "
"anbieter.institution as anbieter_institution, "
"cpv_dokument.cpv_nummer as anbieter_cpv, "
"ausschreibung.meldungsnummer"
)
# anbieter_CPV are all the CPVs the Anbieter ever won a procurement for. So all the CPVs they are interested in.
select_ausschreibung = (
"anbieter.anbieter_id, "
"auftraggeber.institution as beschaffungsstelle_institution, "
"auftraggeber.beschaffungsstelle_plz, "
"ausschreibung.gatt_wto, "
"ausschreibung.sprache, "
"ausschreibung.auftragsart_art, "
"ausschreibung.lose, "
"ausschreibung.teilangebote, "
"ausschreibung.varianten, "
"ausschreibung.projekt_id, "
# "ausschreibung.titel, "
"ausschreibung.bietergemeinschaft, "
"cpv_dokument.cpv_nummer as ausschreibung_cpv, "
"ausschreibung.meldungsnummer as meldungsnummer2"
)
attributes = ['ausschreibung_cpv', 'auftragsart_art','beschaffungsstelle_plz','gatt_wto','lose','teilangebote', 'varianten','sprache']
# attributes = ['auftragsart_art']
config = {
# ratio that the positive and negative responses have to each other
'positive_to_negative_ratio': 0.5,
# Percentage of training set that is used for testing (Recommendation of at least 25%)
'test_size': 0.25,
'runs': 100,
#'enabled_algorithms': ['random_forest'],
'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'],
'random_forest': {
# Tune Random Forest Parameter
'n_estimators': 100,
'max_features': 'sqrt',
'max_depth': None,
'min_samples_split': 2
},
'decision_tree': {
'max_depth': 15,
'max_features': 'sqrt'
},
'gradient_boost': {
'n_estimators': 100,
'learning_rate': 0.1,
'max_depth': 15,
'max_features': 'sqrt'
}
}
# Prepare Attributes
def cleanData(df, filters):
# if 'beschaffungsstelle_plz' in filters:
# df[['beschaffungsstelle_plz']] = df[['beschaffungsstelle_plz']].applymap(ModelTrainer.tonumeric)
if 'gatt_wto' in filters:
df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo)
if 'anzahl_angebote' in filters:
df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric)
if 'teilangebote' in filters:
df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo)
if 'lose' in filters:
df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNo)
if 'varianten' in filters:
df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo)
if 'auftragsart_art' in filters:
auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt',dummy_na=True)
df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'],axis=1)
if 'sprache' in filters:
sprache_df = pd.get_dummies(df['sprache'], prefix='lang',dummy_na=True)
df = pd.concat([df,sprache_df],axis=1).drop(['sprache'],axis=1)
if 'auftragsart' in filters:
auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr',dummy_na=True)
df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'],axis=1)
if 'beschaffungsstelle_plz' in filters:
plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz',dummy_na=True)
df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'],axis=1)
return df
class IterationRunner():
def __init__(self, anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData):
self.anbieter_config = anbieter_config
self.select_anbieter = select_anbieter
self.select_ausschreibung = select_ausschreibung
self.attributes = attributes
self.config = config
self.cleanData = cleanData
self.trainer = ModelTrainer(select_anbieter, select_ausschreibung, '', config, cleanData, attributes)
self.collection = Collection()
def run(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr_id in range(len(self.attributes)-1):
att_list = self.attributes[:attr_id+1]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runAttributesEachOne(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr in self.attributes:
att_list = [attr]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runSimpleAttributeList(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
self.singleRun(anbieter, self.attributes, label)
self.trainer.resetSQLData()
def singleRun(self, anbieter, att_list, label):
logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list))
try:
self.trainer.attributes = att_list
self.trainer.anbieter = anbieter
output = self.trainer.run()
output['label'] = label
self.collection.append(output)
filename = os.getenv('DB_FILE', 'dbs/auto.json')
self.collection.to_file(filename)
except Exception as e:
traceback.print_exc()
print(e)
print('one it done')
runner = IterationRunner(anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData)
if __name__ == '__main__':
# runner.collection.import_file('dbs/auto.json')
runner.run()
runner.runAttributesEachOne()
# label, anbieters = next(iter(runner.anbieter_config.items()))
# print(label)
| {"/runOldIterations.py": ["/train.py", "/collection.py"], "/train.py": ["/db.py"], "/learn.py": ["/db.py"], "/helpers.py": ["/db.py"], "/runIterations.py": ["/learn.py", "/collection.py"]} |
593 | digital-sustainability/swiss-procurement-classifier | refs/heads/master | /train.py | import pandas as pd
import math
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef
from db import connection, engine
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ModelTrainer():
def __init__(self, select_anbieter, select_ausschreibung, anbieter, config, cleanData, attributes=[]):
self.anbieter = anbieter
self.select_anbieter = select_anbieter
self.select_ausschreibung = select_ausschreibung
self.attributes = attributes
self.config = config
self.cleanData = cleanData
def run(self):
positive_sample, negative_samples = self.createSamples()
positive_and_negative_samples = self.prepareForRun(
positive_sample,
negative_samples
)
# most certainly used to resolve the naming functions like getFalseProjectTitle
merged_samples_for_names = self.prepareUnfilteredRun(
positive_sample,
negative_samples
)
result = self.trainSpecifiedModels(positive_and_negative_samples)
return result
# xTests, yTests = self.trainModel(positive_and_negative_samples)
def resetSQLData(self):
try:
del self.positives
del self.negatives
except:
pass
def createSamples(self):
if not hasattr(self, 'positives') or not hasattr(self, 'negatives'):
self.queryData()
negative_samples = []
negative_sample_size = math.ceil(len(self.positives) * (self.config['positive_to_negative_ratio'] + 1))
for count in range(self.config['runs']):
negative_samples.append(self.negatives.sample(negative_sample_size, random_state=count))
self.positives['Y'] = 1
for negative_sample in negative_samples:
negative_sample['Y']=0
return (self.positives, negative_samples)
def queryData(self):
self.positives = self.__runSql(True)
self.negatives = self.__runSql(False)
logger.info('sql done')
return self.positives, self.negatives
def __runSql(self, response):
resp = '='
if (not response):
resp = '!='
query = """SELECT * FROM (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag
INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer)
INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id)
INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id)
INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id)
INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id)
INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer)
WHERE anbieter.institution {} "{}" ) anbieter
JOIN (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag
INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer)
INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id)
INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id)
INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id)
INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id)
INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer)
WHERE anbieter.institution {} "{}"
) ausschreibung ON ausschreibung.meldungsnummer2 = anbieter.meldungsnummer
ORDER BY ausschreibung.meldungsnummer2;
""".format(self.select_anbieter, resp, self.anbieter, self.select_ausschreibung, resp, self.anbieter)
return pd.read_sql(query, engine)
def prepareForRun(self, positive_sample, negative_samples):
# What attributes the model will be trained by
filters = ['Y', 'projekt_id'] + self.attributes
positive_and_negative_samples = []
for negative_sample in negative_samples:
# Merge positive and negative df into one, only use selected attributes
merged_samples = positive_sample.append(negative_sample, ignore_index=True)[filters].copy()
# Clean the data of all selected attributes
cleaned_merged_samples = self.cleanData(merged_samples, self.attributes)
positive_and_negative_samples.append(cleaned_merged_samples)
return positive_and_negative_samples
def prepareUnfilteredRun(self, positive_sample, negative_samples):
merged_samples_for_names = []
for negative_sample in negative_samples:
# Merge positive and negative df into one
merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy())
return merged_samples_for_names
def trainSpecifiedModels(self, positive_and_negative_samples):
result = {}
for algorithm in self.config['enabled_algorithms']:
if algorithm == 'random_forest':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
min_samples_split = self.config[algorithm]['min_samples_split']
classifier = lambda randomState: RandomForestClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
min_samples_split=min_samples_split,
random_state=randomState,
n_jobs=-1
)
elif algorithm == 'gradient_boost':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
learning_rate = self.config[algorithm]['learning_rate']
classifier = lambda randomState: GradientBoostingClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
learning_rate=learning_rate,
random_state=randomState
)
elif algorithm == 'decision_tree':
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
classifier = lambda randomState: DecisionTreeClassifier(
max_depth=max_depth,
max_features=max_features
)
else:
raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm))
result[algorithm] = {}
xTests, yTests = self.trainModel(positive_and_negative_samples, classifier, algorithm)
result['attributes'] = self.attributes
result['anbieter'] = self.anbieter
result['timestamp'] = datetime.now().isoformat()
#result[algorithm]['xTests'] = xTests
#result[algorithm]['yTests'] = yTests
result[algorithm]['metrics'] = self.config[algorithm]
evaluation_dataframe =pd.concat([self.__getConfusionMatices(yTests), self.__getAccuracies(yTests)], axis=1, sort=False)
result[algorithm]['data'] = evaluation_dataframe.to_dict()
result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe)
return result
def trainModel(self, positive_and_negative_samples, classifier, algorithm):
xTests = []
yTests = []
for idx, df in enumerate(positive_and_negative_samples): # enum to get index
x_and_y_test, x_and_y_train = self.unique_train_and_test_split(df, random_state=idx)
# Select all attributes
xtest = x_and_y_test.drop(['Y'], axis=1)
xtrain = x_and_y_train.drop(['Y'], axis=1)
# Only select the response result attributes
ytest = x_and_y_test['Y']
ytrain = x_and_y_train['Y']
# Create the model
clf = classifier(randomState=idx)
# Compute cross validation (5-fold)
scores = self.__cross_val_score(clf, xtest, ytest, cv=5)
print(scores)
print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, idx, round(sum(scores)/len(scores), 4)))
xtest = xtest.drop(['projekt_id'], axis=1)
xtrain = xtrain.drop(['projekt_id'], axis=1)
# Train the model on training sets
clf = clf.fit(xtrain, ytrain)
# Predict on the test sets
prediction = clf.predict(xtest)
# Convert pandas.series to data frame
df_ytest = ytest.to_frame()
# Add run number to df
df_ytest['run'] = idx
xtest['run'] = idx
# add prediction to df
df_ytest['prediction']= prediction
# add result of run to df
df_ytest['correct'] = df_ytest['prediction']==df_ytest['Y']
# add run to run arrays
xTests.append(xtest)
yTests.append(df_ytest)
return xTests, yTests
def __getAccuracies(self, dfys):
res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate'])
for dfy in dfys:
acc = round(accuracy_score(dfy.Y, dfy.prediction), 4)
# f1 = round(f1_score(dfy.Y, dfy.prediction), 4)
mcc = matthews_corrcoef(dfy.Y, dfy.prediction)
matrix = confusion_matrix(dfy.Y, dfy.prediction)
fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4)
# add row to end of df, *100 for better % readability
res.loc[len(res)] = [ acc*100, mcc, fnr*100 ]
return res
def __getConfusionMatices(self, dfys):
res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn'])
for dfy in dfys:
# ConfusionMatrix legende:
# [tn, fp]
# [fn, tp]
matrix = confusion_matrix(dfy.Y, dfy.prediction)
res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ]
# res.loc['sum'] = res.sum() # Summarize each column
return res
def __getIterationMetadata(self, df):
res = {}
res['acc_mean'] = df['accuracy'].mean()
res['acc_median'] = df['accuracy'].median()
res['acc_min'] = df['accuracy'].min()
res['acc_max'] = df['accuracy'].max()
res['acc_quantile_25'] = df['accuracy'].quantile(q=.25)
res['acc_quantile_75'] = df['accuracy'].quantile(q=.75)
res['mcc_mean'] = df['MCC'].mean()
res['mcc_median'] = df['MCC'].median()
res['mcc_min'] = df['MCC'].min()
res['mcc_max'] = df['MCC'].max()
res['mcc_quantile_25'] = df['MCC'].quantile(q=.25)
res['mcc_quantile_75'] = df['MCC'].quantile(q=.75)
res['fn_rate_mean'] = df['fn_rate'].mean()
res['fn_rate_median'] = df['fn_rate'].median()
res['fn_rate_min'] = df['fn_rate'].min()
res['fn_rate_max'] = df['fn_rate'].max()
res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25)
res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75)
res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean()
return res
def __cross_val_score(self, clf, x_values, y_values, cv):
x_and_y_values = pd.concat([y_values, x_values], axis=1)
cross_val_scores = []
for validation_run_index in range(cv):
x_and_y_test, x_and_y_train = self.unique_train_and_test_split(x_and_y_values, random_state=validation_run_index)
# Select all attributes but meldungsnummer
xtest = x_and_y_test.drop(['projekt_id', 'Y'], axis=1)
xtrain = x_and_y_train.drop(['projekt_id', 'Y'], axis=1)
# Only select the response result attributes
ytest = x_and_y_test['Y']
ytrain = x_and_y_train['Y']
clf = clf.fit(xtrain, ytrain)
prediction = clf.predict(xtest)
cross_val_scores.append(accuracy_score(ytest, prediction))
return cross_val_scores
def unique_train_and_test_split(self, df, random_state):
run = shuffle(df, random_state=random_state) # run index as random state
# Get each runs unique meldungsnummer
unique_mn = run.projekt_id.unique()
# Split the meldungsnummer between test and trainings set so there will be no bias in test set
x_unique_test, x_unique_train = train_test_split(unique_mn, test_size=self.config['test_size'], random_state=random_state)
# Add the remaining attributes to meldungsnummer
x_and_y_test = run[run['projekt_id'].isin(x_unique_test)].copy()
x_and_y_train = run[run['projekt_id'].isin(x_unique_train)].copy()
return x_and_y_test, x_and_y_train
# @param val: a value to be casted to numeric
# @return a value that has been casted to an integer. Returns 0 if cast was not possible
def tonumeric(val):
try:
return int(val)
except:
return 0
# @param val: a string value to be categorised
# @return uniffied gatt_wto resulting in either "Yes", "No" or "?"
def unifyYesNo(val):
switcher = {
'Ja': 1,
'Sì': 1,
'Oui': 1,
'Nein': 0,
'Nei': 0,
'Non': 0,
}
return switcher.get(val, 0)
| {"/runOldIterations.py": ["/train.py", "/collection.py"], "/train.py": ["/db.py"], "/learn.py": ["/db.py"], "/helpers.py": ["/db.py"], "/runIterations.py": ["/learn.py", "/collection.py"]} |
594 | digital-sustainability/swiss-procurement-classifier | refs/heads/master | /learn.py | import pandas as pd
import numpy as np
import math
import re
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef
from sklearn import tree
from db import connection, engine
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ModelTrainer():
def __init__(self, select, anbieter, config, attributes=[]):
self.anbieter = anbieter
self.select = select
self.attributes = attributes
self.config = config
def run(self):
self.queryData()
prepared_positives, prepared_negatives, duplicates = self.prepare_data()
result = self.trainAllModels(prepared_positives, prepared_negatives)
result['duplicates'] = duplicates.to_dict()
return result
def resetSQLData(self):
try:
del self.positives
del self.negatives
except:
pass
def trainAllModels(self, positives, negatives):
result = {
'attributes': self.attributes,
'anbieter': self.anbieter,
'timestamp': datetime.now().isoformat()
}
samples = self.createSamples(positives, negatives)
result = {**result, **self.trainAllAlgorithms(samples)}
return result
def createSamples(self, positives, negatives):
negative_sample_size = math.ceil(len(positives) * (self.config['positive_to_negative_ratio'] + 1))
samples = []
for runIndex in range(self.config['runs']):
negative_sample = negatives.sample(negative_sample_size, random_state=runIndex)
sample = positives.append(negative_sample, ignore_index=True)
sample.reset_index(drop=True, inplace=True)
sample.fillna(0, inplace=True)
sample = shuffle(sample, random_state=runIndex)
samples.append(sample)
return samples
def trainAllAlgorithms(self, samples):
result = {}
for algorithm in self.config['enabled_algorithms']:
if algorithm == 'random_forest':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
min_samples_split = self.config[algorithm]['min_samples_split']
classifier = lambda randomState: RandomForestClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
min_samples_split=min_samples_split,
random_state=randomState,
n_jobs=-1
)
elif algorithm == 'gradient_boost':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
learning_rate = self.config[algorithm]['learning_rate']
classifier = lambda randomState: GradientBoostingClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
learning_rate=learning_rate,
random_state=randomState
)
elif algorithm == 'decision_tree':
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
classifier = lambda randomState: DecisionTreeClassifier(
max_depth=max_depth,
max_features=max_features
)
else:
raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm))
result[algorithm] = {}
x_tests, y_tests = self.trainModel(samples, classifier, algorithm)
result[algorithm]['metrics'] = self.config[algorithm]
evaluation_dataframe = pd.concat([self.__getConfusionMatices(y_tests), self.__getAccuracies(y_tests)], axis=1, sort=False)
result[algorithm]['data'] = evaluation_dataframe.to_dict()
result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe)
return result
def trainModel(self, samples, get_classifier, algorithm):
x_tests = []
y_tests = []
for runIndex, sample in enumerate(samples):
classifier = get_classifier(runIndex)
train, test = train_test_split(sample, random_state=runIndex)
if 'skip_cross_val' not in self.config or not self.config['skip_cross_val']:
# Compute cross validation (5-fold)
scores = self.__cross_val_score(classifier, train, cv=5)
print(scores)
print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, runIndex, round(sum(scores)/len(scores), 4)))
# Select all attributes
x_test = test.drop(['Y'], axis=1)
x_train = train.drop(['Y'], axis=1)
# Only select the response result attributes
y_test = test[['Y']].copy()
y_train = train[['Y']]
# Create the model
# Train the model on training sets
classifier = classifier.fit(x_train, y_train['Y'])
# print the max_depths of all classifiers in a Random Forest
if algorithm == 'random_forest':
print('Random Forest Depts:', [self.dt_max_depth(t.tree_) for t in classifier.estimators_])
# Create a file displaying the tree
if 'draw_tree' in self.config and self.config['draw_tree'] and algorithm == 'decision_tree' and runIndex == 0:
tree.export_graphviz(classifier, out_file='tree.dot', feature_names=x_train.columns)
# Predict on the test sets
prediction = classifier.predict(x_test)
# Add run number to df
y_test['run'] = runIndex
x_test['run'] = runIndex
# add prediction to df
y_test['prediction'] = prediction
# add result of run to df
y_test['correct'] = y_test['prediction'] == y_test['Y']
# add run to run arrays
x_tests.append(x_test)
y_tests.append(y_test)
return x_tests, y_tests
def queryData(self):
if not hasattr(self, 'positives') or not hasattr(self, 'negatives'):
self.positives = self.__runSql(True)
self.negatives = self.__runSql(False)
logger.info('sql done')
return self.positives, self.negatives
def __runSql(self, response):
resp = '='
if (not response):
resp = '!='
query = """SELECT {} from beruecksichtigteanbieter_zuschlag
JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer
JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id
JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id
JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id
JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id
JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer
WHERE anbieter.institution {} "{}"
ORDER BY ausschreibung.meldungsnummer;
""".format(self.select, resp, self.anbieter)
return pd.read_sql(query, engine)
def prepareUnfilteredRun(self, positive_sample, negative_samples):
merged_samples_for_names = []
for negative_sample in negative_samples:
# Merge positive and negative df into one
merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy())
return merged_samples_for_names
def __getAccuracies(self, dfys):
res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate'])
for dfy in dfys:
acc = round(accuracy_score(dfy.Y, dfy.prediction), 4)
# f1 = round(f1_score(dfy.Y, dfy.prediction), 4)
mcc = matthews_corrcoef(dfy.Y, dfy.prediction)
matrix = confusion_matrix(dfy.Y, dfy.prediction)
fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4)
# add row to end of df, *100 for better % readability
res.loc[len(res)] = [ acc*100, mcc, fnr*100 ]
return res
def __getConfusionMatices(self, dfys):
res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn'])
for dfy in dfys:
# ConfusionMatrix legende:
# [tn, fp]
# [fn, tp]
matrix = confusion_matrix(dfy.Y, dfy.prediction)
res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ]
# res.loc['sum'] = res.sum() # Summarize each column
return res
def __getIterationMetadata(self, df):
res = {}
res['acc_mean'] = df['accuracy'].mean()
res['acc_median'] = df['accuracy'].median()
res['acc_min'] = df['accuracy'].min()
res['acc_max'] = df['accuracy'].max()
res['acc_quantile_25'] = df['accuracy'].quantile(q=.25)
res['acc_quantile_75'] = df['accuracy'].quantile(q=.75)
res['mcc_mean'] = df['MCC'].mean()
res['mcc_median'] = df['MCC'].median()
res['mcc_min'] = df['MCC'].min()
res['mcc_max'] = df['MCC'].max()
res['mcc_quantile_25'] = df['MCC'].quantile(q=.25)
res['mcc_quantile_75'] = df['MCC'].quantile(q=.75)
res['fn_rate_mean'] = df['fn_rate'].mean()
res['fn_rate_median'] = df['fn_rate'].median()
res['fn_rate_min'] = df['fn_rate'].min()
res['fn_rate_max'] = df['fn_rate'].max()
res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25)
res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75)
res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean()
return res
def __cross_val_score(self, clf, sample, cv):
cross_val_scores = []
for validation_run_index in range(cv):
train, test = train_test_split(sample, random_state=validation_run_index)
# Select all attributes but meldungsnummer
xtest = test.drop(['Y'], axis=1)
xtrain = train.drop(['Y'], axis=1)
# Only select the response result attributes
ytest = test[['Y']]
ytrain = train[['Y']]
clf = clf.fit(xtrain, ytrain['Y'])
prediction = clf.predict(xtest)
cross_val_scores.append(accuracy_score(ytest, prediction))
return cross_val_scores
def prepare_data(self):
filter_attributes = ['meldungsnummer'] + self.attributes
# filter only specified attributes
positives = self.positives[filter_attributes].copy()
negatives = self.negatives[filter_attributes].copy()
positives['Y'] = 1
negatives['Y'] = 0
merged = positives.append(negatives, ignore_index=True)
if hasattr(self, 'cleanData'):
positives = self.cleanData(positives, self.attributes)
negatives = self.cleanData(negatives, self.attributes)
else:
# positives = self.preprocess_data(positives, self.attributes)
# negatives = self.preprocess_data(negatives, self.attributes)
merged, duplicates = self.preprocess_data(merged, self.attributes)
positives = merged[merged['Y']==1]
negatives = merged[merged['Y']==0]
return positives, negatives, duplicates
def preprocess_data(self, df, filters):
df = df.copy()
# drop duplicates before starting to preprocess
df = df.drop_duplicates()
if 'ausschreibung_cpv' in filters:
split = {
'division': lambda x: math.floor(x/1000000),
'group': lambda x: math.floor(x/100000),
'class': lambda x: math.floor(x/10000),
'category': lambda x: math.floor(x/1000)
}
for key, applyFun in split.items():
df['cpv_' + key ] = df['ausschreibung_cpv'].apply(applyFun)
tmpdf = {}
for key in split.keys():
key = 'cpv_' + key
tmpdf[key] = df[['meldungsnummer']].join(pd.get_dummies(df[key], prefix=key)).groupby('meldungsnummer').max()
encoded_df = pd.concat([tmpdf['cpv_'+ key] for key in split.keys()], axis=1)
df = df.drop(['cpv_' + key for key, fun in split.items()], axis=1)
df = df.drop(['ausschreibung_cpv'], axis=1)
df = df.drop_duplicates()
df = df.join(encoded_df, on='meldungsnummer')
if 'gatt_wto' in filters:
df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo)
if 'anzahl_angebote' in filters:
df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric)
if 'teilangebote' in filters:
df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo)
if 'lose' in filters:
df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNoOrInt)
if 'varianten' in filters:
df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo)
if 'auftragsart_art' in filters:
auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt', dummy_na=True)
df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'], axis=1)
if 'sprache' in filters:
sprache_df = pd.get_dummies(df['sprache'], prefix='lang', dummy_na=True)
df = pd.concat([df,sprache_df],axis=1).drop(['sprache'], axis=1)
if 'auftragsart' in filters:
auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr', dummy_na=True)
df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'], axis=1)
if 'beschaffungsstelle_plz' in filters:
# plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz', dummy_na=True)
# df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'], axis=1)
df['beschaffungsstelle_plz'] = df['beschaffungsstelle_plz'].apply(ModelTrainer.transformToSingleInt)
split = {
'district': lambda x: math.floor(x/1000) if not math.isnan(x) else x,
'area': lambda x: math.floor(x/100) if not math.isnan(x) else x,
}
prefix = 'b_plz_'
for key, applyFun in split.items():
df[prefix + key] = df['beschaffungsstelle_plz'].apply(applyFun)
df.rename(columns={'beschaffungsstelle_plz': prefix + 'ganz'}, inplace=True)
for key in ['ganz'] + list(split.keys()):
key = prefix + key
df = pd.concat([df, pd.get_dummies(df[key], prefix=key, dummy_na=True)], axis=1).drop(key, axis=1)
df.drop_duplicates(inplace=True)
if any(df.duplicated(['meldungsnummer'])):
logger.warning("duplicated meldungsnummer")
duplicates = df[df.duplicated(['meldungsnummer'])]
df = df.drop(['meldungsnummer'], axis=1)
return df, duplicates
def dt_max_depth(self, tree):
n_nodes = tree.node_count
children_left = tree.children_left
children_right = tree.children_right
def walk(node_id):
if (children_left[node_id] != children_right[node_id]):
left_max = 1 + walk(children_left[node_id])
right_max = 1 + walk(children_right[node_id])
return max(left_max, right_max)
else: # is leaf
return 1
root_node_id = 0
return walk(root_node_id)
# @param val: a value to be casted to numeric
# @return a value that has been casted to an integer. Returns 0 if cast was not possible
def tonumeric(val):
try:
return int(val)
except:
return 0
# @param val: a string value to be categorised
# @return uniffied gatt_wto resulting in either "Yes", "No" or "?"
@staticmethod
def unifyYesNo(val):
switcher = {
'Ja': 1,
'Sì': 1,
'Oui': 1,
'YES': 1,
'Nein': 0,
'Nei': 0,
'Non': 0,
'NO': 0,
}
return switcher.get(val, 0)
@staticmethod
def unifyYesNoOrInt(val):
try:
return int(val)
except ValueError:
return ModelTrainer.unifyYesNo(val)
@staticmethod
def transformToSingleInt(plz):
try:
result = int(plz)
except ValueError:
try:
result = int(re.search(r"\d{4}", plz).group())
except AttributeError:
return np.nan
return result if result >= 1000 and result <= 9999 else np.nan
| {"/runOldIterations.py": ["/train.py", "/collection.py"], "/train.py": ["/db.py"], "/learn.py": ["/db.py"], "/helpers.py": ["/db.py"], "/runIterations.py": ["/learn.py", "/collection.py"]} |
595 | digital-sustainability/swiss-procurement-classifier | refs/heads/master | /helpers.py | from db import connection, engine
import math
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc
# =====================
# SQL SELECT STATEMENTS
# =====================
# @param select: SELECT argument formatted as string
# @return a Pandas dataframe from the full Simap datanbase depending on the SQL SELECT Query
def getFromSimap(select):
query = """SELECT {} from (((((beruecksichtigteanbieter_zuschlag
INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer)
INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id)
INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id)
INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id)
INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id
INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer)
INNER JOIN cpv ON cpv_dokument.cpv_nummer = cpv.cpv_nummer;
""".format(select)
return pd.read_sql(query, connection);
# @param bidder: anbieter.institution name formatted as string
# @return a Pandas dataframe showing the most important CPV codes per bidder. (Zuschläge pro CPV Code)
def getCpvCount(bidder):
query = """SELECT cpv.cpv_nummer, cpv.cpv_deutsch, COUNT(cpv_dokument.cpv_nummer)
FROM cpv, cpv_dokument, zuschlag, beruecksichtigteanbieter_zuschlag, anbieter WHERE
cpv.cpv_nummer = cpv_dokument.cpv_nummer AND
cpv_dokument.meldungsnummer = zuschlag.meldungsnummer AND
zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer AND
beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id AND
anbieter.institution = "{}"
GROUP BY cpv_nummer
ORDER BY COUNT(cpv_dokument.cpv_nummer) DESC;
""".format(bidder)
return pd.read_sql(query, connection);
# @param bidder: anbieter.institution formatted as string of which you want to see the CPV code diversity
# @return a Pandas Dataframe that contains a the diversity of CPV codes per bidder
def getCpvDiversity(bidder):
query = """SELECT anbieter.institution, COUNT(beruecksichtigteanbieter_zuschlag.anbieter_id)
AS "Anzahl Zuschläge", COUNT(DISTINCT cpv_dokument.cpv_nummer) AS "Anzahl einzigartige CPV-Codes",
SUM(IF(beruecksichtigteanbieter_zuschlag.preis_summieren = 1,beruecksichtigteanbieter_zuschlag.preis,0))
AS "Ungefähres Zuschlagsvolumen", MIN(zuschlag.datum_publikation) AS "Von", MAX(zuschlag.datum_publikation) AS "Bis"
FROM cpv, cpv_dokument, zuschlag, beruecksichtigteanbieter_zuschlag, anbieter
WHERE cpv.cpv_nummer = cpv_dokument.cpv_nummer AND
cpv_dokument.meldungsnummer = zuschlag.meldungsnummer AND
zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer AND
beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id
AND anbieter.institution="{}"
GROUP BY anbieter.institution
ORDER BY `Anzahl einzigartige CPV-Codes` DESC
""".format(bidder)
return pd.read_sql(query, connection);
# @param select_anbieter: SQL SELECT for the bidder side. Backup:
'''
select_an = (
"anbieter.anbieter_id, "
"anbieter.anbieter_plz, "
"anbieter.institution as anbieter_insitution, "
"cpv_dokument.cpv_nummer as anbieter_cpv, "
"ausschreibung.meldungsnummer" )
'''
# @param select_aus: SQL SELECT for the open tenders. Backup:
'''
select_aus = (
"anbieter.anbieter_id, "
"auftraggeber.institution as beschaffungsstelle_institution, "
"auftraggeber.beschaffungsstelle_plz, "
"ausschreibung.gatt_wto, "
"cpv_dokument.cpv_nummer as ausschreibung_cpv, "
"ausschreibung.meldungsnummer" )
'''
# @param bidder: the bidder formatted as string you or do not want the corresponding responses from
# @param response: True if you want all the tenders of the bidder or False if you do not want any (the negative response)
# @return a dataframe containing negative or positive bidding cases of a chosen bidder
def getResponses(select_anbieter, select_ausschreibung, bidder, response):
resp = '=';
if (not response):
resp = '!='
query = """SELECT * FROM (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag
INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer)
INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id)
INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id)
INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id)
INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id)
INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer)
WHERE anbieter.institution {} "{}" ) anbieter
JOIN (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag
INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer)
INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id)
INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id)
INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id)
INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id)
INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer)
WHERE anbieter.institution {} "{}"
) ausschreibung ON ausschreibung.meldungsnummer2 = anbieter.meldungsnummer
ORDER BY ausschreibung.meldungsnummer2;
""".format(select_anbieter, resp, bidder, select_ausschreibung, resp, bidder)
return pd.read_sql(query, connection);
# @return
def getCpvRegister():
return pd.read_sql("SELECT * FROM cpv", connection);
# @param select_an
# @param select_aus
# @param anbieter
# @return
def createAnbieterDf(select_an, select_aus, anbieter):
# Create a new DFs one containing all positiv, one all the negative responses
data_pos = getResponses(select_an, select_aus, anbieter, True)
data_neg = getResponses(select_an, select_aus, anbieter, False)
return data_pos.copy(), data_neg.copy()
# ========================
# MODEL CREATION FUNCTIONS
# ========================
# @param df_pos_full
# @param df_neg_full
# @param negSampleSize
# @return
def decisionTreeRun(df_pos_full, df_neg_full , neg_sample_size):
df_pos = df_pos_full
# Create a random DF subset ussed to train the model on
df_neg = df_neg_full.sample(neg_sample_size)
# Assign pos/neg lables to both DFs
df_pos['Y']=1
df_neg['Y']=0
# Merge the DFs into one
df_appended = df_pos.append(df_neg, ignore_index=True)
# Clean PLZ property
df_appended[['anbieter_plz']] = df_appended[['anbieter_plz']].applymap(tonumeric)
df_appended[['beschaffungsstelle_plz']] = df_appended[['beschaffungsstelle_plz']].applymap(tonumeric)
# Shuffle the df
df_tree = df_appended.sample(frac=1)
# Put responses in one arry and all diesired properties in another
y = df_tree.iloc[:,[11]]
x = df_tree.iloc[:,[1,3,7,9]]
# create sets
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25)
# train the model on training sets
clf = tree.DecisionTreeClassifier()
clf = clf.fit(xtrain, ytrain)
# predict on the test sets
res = clf.predict(xtest)
ytest["res"]= res
ytest['richtig'] = ytest['res']==ytest['Y']
tp = ytest[(ytest['Y']==1) & (ytest['res']==1)]
tn = ytest[(ytest['Y']==0) & (ytest['res']==0)]
fp = ytest[(ytest['Y']==0) & (ytest['res']==1)]
fn = ytest[(ytest['Y']==1) & (ytest['res']==0)]
return len(df_pos.index) / neg_sample_size, accuracy_score(ytest.Y, res), confusion_matrix(ytest.Y, res);
# @param full_neg: dataframe containing all negative responses for that bidder
# @param df_pos_size: amount of data in the positive dataframe
# @param amount_neg_def: how many response_negative dataframes the function will produce
# @param pos_neg_ratio: what the ratio of positive to negative responses will be
# @return a list of negative response dataframes, each considered for one run
def createNegativeResponses(full_neg, pos_df_size, amount_neg_df, pos_neg_ratio):
all_negatives = [];
sample_size = math.ceil(pos_df_size * (pos_neg_ratio + 1));
for count in range(amount_neg_df):
all_negatives.append(full_neg.sample(sample_size, random_state=count));
return all_negatives;
# =======================
# DATA CLEANING FUNCTIONS
# =======================
# @param val: a value to be casted to numeric
# @return a value that has been casted to an integer. Returns 0 if cast was not possible
def tonumeric(val):
try:
return int(val)
except:
return 0
# @param val: a string value to be categorised
# @return uniffied gatt_wto resulting in either "Yes", "No" or "?"
def unifyYesNo(val):
switcher = {
'Ja': 1,
'Sì': 1,
'Oui': 1,
'Nein': 0,
'Nei': 0,
'Non': 0,
}
return switcher.get(val, 0)
# TODO: Kategorien mit Matthias absprechen
# @param v: the price of a procurement
# @return map prices to 16 categories
def createPriceCategory(val):
try:
val = int(val)
except:
val = -1
if val == 0:
return 0
if 0 < val <= 100000:
return 1
if 100000 < val <= 250000:
return 2
if 250000 < val <= 500000:
return 3
if 500000 < val <= 750000:
return 4
if 750000 < val <= 1000000:
return 5
if 1000000 < val <= 2500000:
return 6
if 2500000 < val <= 5000000:
return 7
if 5000000 < val <= 10000000:
return 8
if 10000000 < val <= 25000000:
return 9
if 25000000 < val <= 50000000:
return 10
if 50000000 < val <= 100000000:
return 11
if 100000000 < val <= 200000000:
return 12
if 200000000 < val <= 500000000:
return 13
if val > 500000000:
return 14
else:
return -1
| {"/runOldIterations.py": ["/train.py", "/collection.py"], "/train.py": ["/db.py"], "/learn.py": ["/db.py"], "/helpers.py": ["/db.py"], "/runIterations.py": ["/learn.py", "/collection.py"]} |
596 | digital-sustainability/swiss-procurement-classifier | refs/heads/master | /collection.py | import json
import pandas as pd
import warnings
class Collection():
algorithms = ['gradient_boost', 'decision_tree', 'random_forest']
def __init__(self):
self.list = []
def append(self, item):
self.list.append(item)
def __iter__(self):
return iter(self.list)
def get_all_as_df(self, algorithm):
try:
tmp = []
for iteration in self.list:
tmp.append(iteration[algorithm]['metadata'])
return pd.DataFrame(tmp, index=[iteration['anbieter'] for iteration in self.list])
except:
warnings.warn('Select an algorithm: "random_forest", "gradient_boost" or "decision_tree"')
def df_row_per_algorithm(self):
tmp = []
for iteration in self.list:
for algorithm in self.algorithms:
output = iteration[algorithm]['metadata']
evaluation_dataframe = pd.DataFrame.from_dict(iteration[algorithm]['data'])
# missing metrics
output['acc_std'] = evaluation_dataframe['accuracy'].std()
evaluation_dataframe['MCC'] = evaluation_dataframe['MCC']*100
output['mcc_std'] = evaluation_dataframe['MCC'].std()
output['fn_std'] = evaluation_dataframe['fn_rate'].std()
output['anbieter'] = iteration['anbieter']
output['label'] = iteration['label']
output['algorithm'] = algorithm
output['attributes'] = ",".join(iteration['attributes'])
tmp.append(output)
return pd.DataFrame(tmp)
def to_json(self, **kwargs):
return json.dumps(self.list, **kwargs)
def to_file(self, filename):
with open(filename, 'w') as fp:
json.dump(self.list, fp, indent=4, sort_keys=True)
def import_file(self, filename, force=False):
if len(self.list) and not force:
warnings.warn("Loaded Collection, pls add force=True")
else:
with open(filename, 'r') as fp:
self.list = json.load(fp)
| {"/runOldIterations.py": ["/train.py", "/collection.py"], "/train.py": ["/db.py"], "/learn.py": ["/db.py"], "/helpers.py": ["/db.py"], "/runIterations.py": ["/learn.py", "/collection.py"]} |
597 | digital-sustainability/swiss-procurement-classifier | refs/heads/master | /db.py | import configparser
import sqlalchemy
# git update-index --skip-worktree config.ini
config = configparser.ConfigParser()
config.read("config.ini")
connection_string = 'mysql+' + config['database']['connector'] + '://' + config['database']['user'] + ':' + config['database']['password'] + '@' + config['database']['host'] + '/' + config['database']['database']
if __name__ == "__main__":
for item, element in config['database'].items():
print('%s: %s' % (item, element))
print(connection_string)
else:
engine = sqlalchemy.create_engine(connection_string)
connection = engine.connect()
| {"/runOldIterations.py": ["/train.py", "/collection.py"], "/train.py": ["/db.py"], "/learn.py": ["/db.py"], "/helpers.py": ["/db.py"], "/runIterations.py": ["/learn.py", "/collection.py"]} |
598 | digital-sustainability/swiss-procurement-classifier | refs/heads/master | /runIterations.py | from learn import ModelTrainer
from collection import Collection
import pandas as pd
import logging
import traceback
import os
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# === THESIS ===
anbieter_config = {
'Construction': [
'Alpiq AG',
'KIBAG',
'Egli AG',
],
'IT': [
'Swisscom',
'ELCA Informatik AG',
'Unisys',
],
'Other': [
'Kummler + Matter AG',
'Thermo Fisher Scientific (Schweiz) AG',
'AXA Versicherung AG',
],
'Diverse': [
'Siemens AG',
'ABB',
'Basler & Hofmann West AG',
]
}
# === TESTING ===
#anbieter = 'Marti AG' #456
#anbieter = 'Axpo AG' #40
#anbieter = 'Hewlett-Packard' #90
#anbieter = 'BG Ingénieurs Conseils' SA #116
#anbieter = 'Pricewaterhousecoopers' #42
#anbieter = 'Helbling Beratung + Bauplanung AG' #20
#anbieter = 'Ofrex SA' #52
#anbieter = 'PENTAG Informatik AG' #10
#anbieter = 'Wicki Forst AG' #12
#anbieter = 'T-Systems Schweiz' #18
#anbieter = 'Bafilco AG' #20
#anbieter = '4Video-Production GmbH' #3
#anbieter = 'Widmer Ingenieure AG' #6
#anbieter = 'hmb partners AG' #2
#anbieter = 'Planmeca' #4
#anbieter = 'K & M Installationen AG' #4
select = (
"ausschreibung.meldungsnummer, "
"anbieter.institution as anbieter_institution, "
"auftraggeber.beschaffungsstelle_plz, "
"ausschreibung.gatt_wto, "
"ausschreibung.sprache, "
"ausschreibung.auftragsart, "
"ausschreibung.auftragsart_art, "
"ausschreibung.lose, "
"ausschreibung.teilangebote, "
"ausschreibung.varianten, "
"ausschreibung.bietergemeinschaft, "
"cpv_dokument.cpv_nummer as ausschreibung_cpv"
)
attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache']
#attributes = ['auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'ausschreibung_cpv', 'gatt_wto','teilangebote', 'sprache']
#attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache']
# attributes = [
# [ 'ausschreibung_cpv', 'auftragsart_art' ],
# [ 'ausschreibung_cpv', 'beschaffungsstelle_plz' ],
# [ 'ausschreibung_cpv', 'auftragsart' ],
# [ 'ausschreibung_cpv', 'gatt_wto' ],
# [ 'ausschreibung_cpv', 'lose' ],
# [ 'ausschreibung_cpv', 'teilangebote' ],
# [ 'ausschreibung_cpv', 'varianten' ],
# [ 'ausschreibung_cpv', 'sprache' ]
# ]
config = {
# ratio that the positive and negative responses have to each other
'positive_to_negative_ratio': 0.5,
# Percentage of training set that is used for testing (Recommendation of at least 25%)
'test_size': 0.25,
'runs': 100,
#'enabled_algorithms': ['random_forest'],
'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'],
'random_forest': {
# Tune Random Forest Parameter
'n_estimators': 100,
'max_features': 'sqrt',
'max_depth': None,
'min_samples_split': 4
},
'decision_tree': {
'max_depth': 30,
'max_features': 'sqrt',
'min_samples_split': 4
},
'gradient_boost': {
'n_estimators': 100,
'learning_rate': 0.1,
'max_depth': 30,
'min_samples_split': 4,
'max_features': 'sqrt'
}
}
class IterationRunner():
def __init__(self, anbieter_config, select, attributes, config):
self.anbieter_config = anbieter_config
self.select = select
self.attributes = attributes
self.config = config
self.trainer = ModelTrainer(select, '', config, attributes)
self.collection = Collection()
def run(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr_id in range(len(self.attributes)):
att_list = self.attributes[:attr_id+1]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runAttributesEachOne(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr in self.attributes:
att_list = [attr]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runAttributesList(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for att_list in self.attributes:
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runSimpleAttributeList(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
self.singleRun(anbieter, self.attributes, label)
self.trainer.resetSQLData()
def singleRun(self, anbieter, att_list, label):
logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list))
try:
self.trainer.attributes = att_list
self.trainer.anbieter = anbieter
output = self.trainer.run()
output['label'] = label
self.collection.append(output)
filename = os.getenv('DB_FILE', 'dbs/auto.json')
self.collection.to_file(filename)
except Exception as e:
traceback.print_exc()
print(e)
print('one it done')
runner = IterationRunner(anbieter_config, select, attributes, config)
if __name__ == '__main__':
# runner.collection.import_file('dbs/auto.json')
runner.run()
runner.runAttributesEachOne()
runner.runAttributesList()
# label, anbieters = next(iter(runner.anbieter_config.items()))
# print(label)
| {"/runOldIterations.py": ["/train.py", "/collection.py"], "/train.py": ["/db.py"], "/learn.py": ["/db.py"], "/helpers.py": ["/db.py"], "/runIterations.py": ["/learn.py", "/collection.py"]} |
610 | AmosGarner/PyLife | refs/heads/master | /pylife.py | import sys, argparse
import numpy as np
import matplotlib.pyplot as plot
import matplotlib.animation as animation
from helper import *
from displayTextSpawner import displayText
from inputValidator import validateInput
paused = True
iteration = 0
def update(frameNumber, image, grid, gridSize):
newGrid = grid.copy()
global paused
global iteration
if paused is True and iteration > 0:
value = raw_input('Press any [Key] to start simulation:')
image.set_data(newGrid)
grid[:] = newGrid[:]
paused = False
else:
for index in range(gridSize):
for subIndex in range(gridSize):
total = int((grid[index, (subIndex-1)%gridSize] + grid[index, (subIndex+1)%gridSize] +
grid[(index-1)%gridSize, subIndex] + grid[(index+1)%gridSize, subIndex] +
grid[(index-1)%gridSize, (subIndex-1)%gridSize] + grid[(index-1)%gridSize, (subIndex+1)%gridSize] +
grid[(index+1)%gridSize, (subIndex-1)%gridSize] + grid[(index+1)%gridSize, (subIndex+1)%gridSize])/ON)
if iteration > 0:
if grid[index, subIndex] == ON:
if (total < 2) or (total > 3):
newGrid[index, subIndex] = OFF
else:
if total == 3:
newGrid[index, subIndex] = ON
image.set_data(newGrid)
grid[:] = newGrid[:]
iteration += 1
return image
def main():
parser = argparse.ArgumentParser(description="Runs Conway's Game of Life simulation.")
parser.add_argument('--grid-size', dest='gridSize', required=False)
parser.add_argument('--mov-file', dest='movfile', required=False)
parser.add_argument('--interval', dest='interval', required=False)
parser.add_argument('--glider', dest='glider', required=False)
parser.add_argument('--gosper', dest='gosper', required=False)
parser.add_argument('--display', dest='displayText', required=False)
args = parser.parse_args()
gridSize = 100
if args.gridSize and int(args.gridSize) > 8:
gridSize = int(args.gridSize)
updateInterval = 50
if args.interval:
updateInterval = int(args.interval)
grid = np.array([])
if args.glider:
grid = np.zeros(gridSize*gridSize).reshape(gridSize, gridSize)
addGlider(1, 1, grid)
elif args.gosper:
grid = np.zeros(gridSize*gridSize).reshape(gridSize, gridSize)
addGosperGliderGun(10, 10, grid)
elif args.displayText and validateInput(args.displayText):
if args.displayText == 'alphanumspec':
grid = displayText('abcdefghijklmnopqrstuvwxyz_0123456789_', gridSize)
elif args.displayText == 'david':
grid = displayText('happy_birthday___david!!!!', gridSize)
else:
grid = displayText(args.displayText, gridSize)
else:
grid = randomGrid(gridSize)
fig, ax = plot.subplots()
img = ax.imshow(grid, interpolation='nearest')
plot.title("PyLife V1.0")
ani = animation.FuncAnimation(fig, update, fargs=(img, grid, gridSize),
frames = 10,
interval=updateInterval,
save_count=50)
if args.movfile:
ani.save(args.movfile, fps=30, extra_args=['-vcodec', 'libx264'])
plot.show()
if __name__ == '__main__':
main()
| {"/pylife.py": ["/helper.py", "/displayTextSpawner.py", "/inputValidator.py"]} |
611 | AmosGarner/PyLife | refs/heads/master | /inputValidator.py | from alphaNumLib import *
alphaNumArray = alphaArray + numArray + specialArray
def validateInput(input):
if(checkInAlphaNumSpec(input)):
return True
else:
return False
def checkInAlphaNumSpec(input):
inputCharArray = list(input.lower())
for value in inputCharArray:
if value not in alphaNumArray:
return False
return True
| {"/pylife.py": ["/helper.py", "/displayTextSpawner.py", "/inputValidator.py"]} |
612 | AmosGarner/PyLife | refs/heads/master | /displayTextSpawner.py | import numpy as np
ON = 255
OFF = 0
vals = [ON, OFF]
def displayText(input, gridSize):
grid = generateBlankGroup(gridSize)
index = 1
x = gridSize / 2
for value in list(input):
print(5 * index)
print(gridSize)
if 5*index >= gridSize:
index = 1
x = gridSize/2 + 6
grid = spawnValue(value, x, 5 * index, grid)
index += 1
return grid
def spawnValue(char, row, col, grid):
if(char == 'a'):
value = np.array([[OFF, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'b'):
value = np.array([[ON, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == 'c'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == 'd'):
value = np.array([[ON, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == 'e'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == 'f'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == 'g'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, OFF, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'h'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'i'):
value = np.array([[ON, ON, ON, ON],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[ON, ON, ON, ON],])
if(char == 'j'):
value = np.array([[OFF, ON, ON, ON],
[OFF, OFF, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'k'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, ON, OFF],
[ON, ON, OFF, OFF],
[ON, OFF, ON, OFF],
[ON, OFF, OFF, ON],])
if(char == 'l'):
value = np.array([[ON, ON, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'm'):
value = np.array([[ON, ON, ON, ON],
[ON, ON, ON, ON],
[ON, OFF, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'n'):
value = np.array([[ON, ON, OFF, ON],
[ON, ON, OFF, ON],
[ON, OFF, ON, ON],
[ON, OFF, ON, ON],
[ON, OFF, OFF, ON],])
if(char == 'o'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'p'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],])
if(char == 'q'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, ON, ON],
[ON, ON, ON, ON],])
if(char == 'r'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],
[ON, OFF, ON, OFF],
[ON, OFF, OFF, ON],])
if(char == 's'):
value = np.array([[OFF, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[OFF, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == 't'):
value = np.array([[ON, ON, ON, ON],
[ON, ON, ON, ON],
[OFF, OFF, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],])
if(char == 'u'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, ON, ON],
[ON, ON, ON, ON],])
if(char == 'v'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[OFF, ON, ON, OFF],])
if(char == 'w'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, OFF, ON],
[ON, ON, ON, ON],
[ON, ON, OFF, ON],])
if(char == 'x'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[OFF, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'y'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[OFF, ON, ON, OFF],
[OFF, ON, OFF, OFF],
[OFF, ON, OFF, OFF],])
if(char == 'z'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, ON, OFF],
[OFF, ON, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == '0'):
value = np.array([[ON, ON, ON, ON],
[ON, ON, OFF, ON],
[ON, ON, OFF, ON],
[ON, OFF, ON, ON],
[ON, ON, ON, ON],])
if(char == '1'):
value = np.array([[OFF, ON, ON, OFF],
[ON, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],])
if(char == '2'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == '3'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[OFF, ON, ON, ON],
[OFF, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == '4'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[OFF, OFF, OFF, ON],])
if(char == '5'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[OFF, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == '6'):
value = np.array([[ON, ON, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == '7'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[OFF, ON, ON, OFF],
[OFF, ON, OFF, OFF],
[OFF, ON, OFF, OFF],])
if(char == '8'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == '9'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, ON, OFF, OFF],
[ON, ON, OFF, OFF],])
if(char == '_'):
value = np.array([[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],])
if(char == '!'):
value = np.array([[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, OFF, OFF, OFF],
[OFF, ON, ON, OFF],])
if(char == '?'):
value = np.array([[OFF, ON, ON, OFF],
[ON, OFF, OFF, ON],
[OFF, OFF, ON, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, ON, OFF],])
if(char == '.'):
value = np.array([[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],])
grid[row-2:row+3, col-2:col+2] = value
return grid
def generateBlankGroup(gridSize):
return np.zeros(gridSize*gridSize).reshape(gridSize, gridSize)
| {"/pylife.py": ["/helper.py", "/displayTextSpawner.py", "/inputValidator.py"]} |
613 | AmosGarner/PyLife | refs/heads/master | /helper.py | import numpy as np
import matplotlib.pyplot as plot
import matplotlib.animation as animation
ON = 255
OFF = 0
vals = [ON, OFF]
def randomGrid(gridSize):
return np.random.choice(vals, gridSize*gridSize, p=[0.2, 0.8]).reshape(gridSize, gridSize)
def addGlider(row, col, grid):
glider = np.array([[OFF, OFF, ON],
[ON, OFF, ON],
[OFF, OFF, OFF]])
grid[row:row+3, col:col+3] = glider
def addGosperGliderGun(row, col, grid):
gun = np.zeros(11*38).reshape(11, 38)
gun[5][1] = gun[5][2] = ON
gun[6][1] = gun[6][2] = ON
gun[3][13] = gun[3][14] = ON
gun[4][12] = gun[4][16] = ON
gun[5][11] = gun[5][17] = ON
gun[6][11] = gun[6][15] = gun[6][17] = gun[6][18] = ON
gun[7][11] = gun[7][17] = ON
gun[8][12] = gun[8][16] = ON
gun[9][13] = gun[9][14] = ON
gun[1][25] = ON
gun[2][23] = gun[2][25] = ON
gun[3][21] = gun[3][22] = ON
gun[4][21] = gun[4][22] = ON
gun[5][21] = gun[5][22] = ON
gun[6][23] = gun[6][25] = ON
gun[7][25] = ON
gun[3][35] = gun[3][36] = ON
gun[4][35] = gun[4][36] = ON
grid[row:row+11, col:col+38] = gun
| {"/pylife.py": ["/helper.py", "/displayTextSpawner.py", "/inputValidator.py"]} |
624 | jettaponB/Practice | refs/heads/main | /Test07.py | import tkinter as tk
def show_output():
number = int(number_input.get())
if number == 0:
output_label.configure(text='ผิด')
return
output = ''
for i in range(1, 13):
output += str(number) + ' * ' + str(i)
output += ' = ' + str(number * i) + '\n'
output_label.configure(text=output)
window = tk.Tk()
window.title('JustDoIT')
window.minsize(width=400, height=400)
title_label = tk.Label(master=window, text='สูตรคูณแม่')
title_label.pack(pady=20)
number_input = tk.Entry(master=window, width=15)
number_input.pack()
ok_button = tk.Button(
master=window, text='คือ', command=show_output,
width=6, height=1
)
ok_button.pack()
output_label = tk.Label(master=window)
output_label.pack(pady=20)
window.mainloop() | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
625 | jettaponB/Practice | refs/heads/main | /Test13.py | class Tank:
def __init__(self, name, ammo) -> None:
self.name = name
self.ammo = ammo
first_tank = Tank('Serie1', 3)
print(first_tank.name)
second_tank = Tank('Serie2', 5)
print(second_tank.name) | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
626 | jettaponB/Practice | refs/heads/main | /class_tank.py | class Tank:
def __init__(self, name, ammo) -> None:
self.name = name
self.ammo = ammo
def add_ammo(self, ammo):
if self.ammo + ammo <= 10:
self.ammo += ammo
def fire_ammo(self):
if self.ammo > 0:
self.ammo -= 1 | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
627 | jettaponB/Practice | refs/heads/main | /Test12.py | # message = 'วัชราวลี'
# result = len(message)
# print(result)
# message = 'วัชราวลี'
# result = 'วัช' in message
# print(result)
# message = '0982612325'
# result = message.isdigit()
# print(result)
# message = 'Just Python'
# result = message.replace('Python', 'Rabbit')
# print(result)
message = 'กระต่าย, กระรอก, หมี'
animals = message.split(', ')
new_message = '+'.join(animals)
print(new_message)
print(animals)
| {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
628 | jettaponB/Practice | refs/heads/main | /Test10.py | # quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
# if 'ล้างปลา' in quests:
# print('ทำงานเสร็จ')
#----------------------------------------------------
# quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
# max_quests = 5
# if len(quests) < max_quests:
# quests.append('จับปลาดุก')
# print(quests)
#----------------------------------------------------
# quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
# for quest in quests:
# print(quest)
#----------------------------------------------------
quests = ['ปลูกต้นมะม่วง', 'ล้างปลา', 'เผาถ่าน']
for i in range(len(quests)):
print(str(i + 1) + '. ' + quests[i])
| {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
629 | jettaponB/Practice | refs/heads/main | /shape.py | def get_circle_area(radius):
return 22 / 7 * (radius ** 2)
def get_triangle_area(width, heigth):
return 1 / 2 * width * heigth
def get_rectangle_area(width, heigth):
return width * heigth | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
630 | jettaponB/Practice | refs/heads/main | /Test14.py | import class_tank as CT
first_tank = CT.Tank('Serie1', 3)
first_tank.fire_ammo()
print(first_tank.ammo)
first_tank.fire_ammo()
first_tank.fire_ammo()
print(first_tank.ammo)
first_tank.add_ammo(4)
print(first_tank.ammo)
| {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
631 | jettaponB/Practice | refs/heads/main | /test09.py | import tkinter as tk
def show_output():
number = int(input_number.get())
output = ''
for i in range(1, 13):
output += str(number) + ' * ' + str(i) + ' = ' + str(number * i) + '\n'
output_label.configure(text=output)
window = tk.Tk()
window.title('โปรแกรมคำนวนสูตรคูณ')
window.minsize(width=500, height=400)
title_label = tk.Label(master=window, text='กรุณาระบุแม่สูตรคูณ')
title_label.pack()
input_number = tk.Entry(master=window)
input_number.pack()
cal_button = tk.Button(master=window, text='คำนวน', command=show_output)
cal_button.pack()
output_label = tk.Label(master=window)
output_label.pack()
window.mainloop() | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
632 | jettaponB/Practice | refs/heads/main | /Test02.py | score = 55
if score >= 80:
print('Grade A')
print('dafdaf')
elif score >= 70:
print('Grade B')
elif score >= 60:
print('Grade C')
else:
print('Grade F')
| {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
633 | jettaponB/Practice | refs/heads/main | /Test03.py | # number = 1
# double = number * 2
# print(number)
# for i in range(1, 7):
# double = i * 2
# print(double)
# for i in range(1, 7):
# if i % 3 == 0:
# continue
# print(i)
for i in range(1, 7):
if i % 3 == 0:
break
print(i) | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
634 | jettaponB/Practice | refs/heads/main | /Test01.py | # x = '4.5'
# y = str(12)
# z = x + y
# print(z)
# final_score = 15
#
# age = 25 # ตัวเลขจำนวนเต็ม (integer)
# weight = 66.6 # ตัวเลขทศนิยม (Float)
# first_name = 'ศักรินทร์' # ข้อความ (String)
# has_notebook = True # Boolean
x = 5
y = 2
a1 = x + y # 7
a2 = x - y # 3
a3 = x * y # 10
a4 = x / y # 2.5
a5 = x % y # 1
a6 = x ** y # 25
a7 = x // y # 2
a8 = (x + 1) * (y - 1)
x = x + 3 # x += 3
print(a8) | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
635 | jettaponB/Practice | refs/heads/main | /Test08.py | import tkinter as tk
def set_message():
text = text_input.get()
title_label.configure(text=text)
window = tk.Tk()
window.title('Desktop Application')
window.minsize(width=300, height=400)
title_label = tk.Label(master=window, text='กรุณาระบุข้อความ')
title_label.pack()
text_input = tk.Entry(master=window)
text_input.pack()
ok_button = tk.Button(master=window, text='OK', command=set_message)
ok_button.pack()
window.mainloop()
| {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
636 | jettaponB/Practice | refs/heads/main | /Test04.py | # def get_box_area(width, length, height):
# box_area = width * length * height
# print(box_area)
#
# get_box_area(4, 4, 2)
# get_box_area(width=1, length=1, height=2)
def get_box_area(width, length, height):
if width < 0 or length < 0 or height < 0:
return 0
box_area = width * length * height
return box_area
box1 = get_box_area(4, -4, 2)
box2 = get_box_area(width=1, length=1, height=2)
print(box1, box2) | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
637 | jettaponB/Practice | refs/heads/main | /Test11.py | book = {
'name': 'C++',
'price': '299',
'page': '414'
}
# #ตัวแปลทีละตัว ... ตัวแปรจะเยอะเกิน
# book_name = 'C++'
# book_price = 299
# book_page = 414
# #เก็บใน List ... ลืมว่าข้อมูลไหนอยู่ที่ index ไหน
# book_data = ['C++', 299, 414]
#book['place'] = 'MU Salaya'
book.pop('price')
print(book) | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
638 | jettaponB/Practice | refs/heads/main | /Test05.py | import shape as sh
circle = sh.get_circle_area(10)
print(circle)
triangle = sh.get_triangle_area(width=6, heigth=7)
print(triangle) | {"/Test14.py": ["/class_tank.py"], "/Test05.py": ["/shape.py"]} |
640 | jerry5841314/Ensemble-Pytorch | refs/heads/master | /torchensemble/utils/logging.py | import os
import time
import logging
def set_logger(log_file=None, log_console_level="info", log_file_level=None):
"""Bind the default logger with console and file stream output."""
def _get_level(level):
if level.lower() == 'debug':
return logging.DEBUG
elif level.lower() == 'info':
return logging.INFO
elif level.lower() == 'warning':
return logging.WARN
elif level.lower() == 'error':
return logging.ERROR
elif level.lower() == 'critical':
return logging.CRITICAL
else:
msg = (
"`log_console_level` must be one of {{DEBUG, INFO,"
" WARNING, ERROR, CRITICAL}}, but got {} instead."
)
raise ValueError(msg.format(level.upper()))
_logger = logging.getLogger()
# Reset
for h in _logger.handlers:
_logger.removeHandler(h)
rq = time.strftime('%Y_%m_%d_%H_%M', time.localtime(time.time()))
log_path = os.path.join(os.getcwd(), 'logs')
ch_formatter = logging.Formatter(
"%(asctime)s - %(levelname)s: %(message)s"
)
ch = logging.StreamHandler()
ch.setLevel(_get_level(log_console_level))
ch.setFormatter(ch_formatter)
_logger.addHandler(ch)
if log_file is not None:
print('Log will be saved in \'{}\'.'.format(log_path))
if not os.path.exists(log_path):
os.mkdir(log_path)
print('Create folder \'logs/\'')
log_name = os.path.join(log_path, log_file + '-' + rq + '.log')
print('Start logging into file {}...'.format(log_name))
fh = logging.FileHandler(log_name, mode='w')
fh.setLevel(
logging.DEBUG
if log_file_level is None
else _get_level(log_file_level)
)
fh_formatter = logging.Formatter(
"%(asctime)s - %(filename)s[line:%(lineno)d] - "
"%(levelname)s: %(message)s"
)
fh.setFormatter(fh_formatter)
_logger.addHandler(fh)
_logger.setLevel("DEBUG")
return _logger
| {"/torchensemble/tests/test_fast_geometric.py": ["/torchensemble/utils/logging.py"]} |
641 | jerry5841314/Ensemble-Pytorch | refs/heads/master | /torchensemble/tests/test_fast_geometric.py | import torch
import pytest
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from torchensemble import FastGeometricClassifier as clf
from torchensemble import FastGeometricRegressor as reg
from torchensemble.utils.logging import set_logger
set_logger("pytest_fast_geometric")
# Testing data
X_test = torch.Tensor(np.array(([0.5, 0.5], [0.6, 0.6])))
y_test_clf = torch.LongTensor(np.array(([1, 0])))
y_test_reg = torch.FloatTensor(np.array(([0.5, 0.6])))
y_test_reg = y_test_reg.view(-1, 1)
# Base estimator
class MLP_clf(nn.Module):
def __init__(self):
super(MLP_clf, self).__init__()
self.linear1 = nn.Linear(2, 2)
self.linear2 = nn.Linear(2, 2)
def forward(self, X):
X = X.view(X.size()[0], -1)
output = self.linear1(X)
output = self.linear2(output)
return output
class MLP_reg(nn.Module):
def __init__(self):
super(MLP_reg, self).__init__()
self.linear1 = nn.Linear(2, 2)
self.linear2 = nn.Linear(2, 1)
def forward(self, X):
X = X.view(X.size()[0], -1)
output = self.linear1(X)
output = self.linear2(output)
return output
def test_fast_geometric_workflow_clf():
"""
This unit test checks the error message when calling `predict` before
calling `ensemble`.
"""
model = clf(estimator=MLP_clf, n_estimators=2, cuda=False)
model.set_optimizer("Adam")
# Prepare data
test = TensorDataset(X_test, y_test_clf)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Training
with pytest.raises(RuntimeError) as excinfo:
model.evaluate(test_loader)
assert "Please call the `ensemble` method to build" in str(excinfo.value)
def test_fast_geometric_workflow_reg():
"""
This unit test checks the error message when calling `predict` before
calling `ensemble`.
"""
model = reg(estimator=MLP_reg, n_estimators=2, cuda=False)
model.set_optimizer("Adam")
# Prepare data
test = TensorDataset(X_test, y_test_reg)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Training
with pytest.raises(RuntimeError) as excinfo:
model.evaluate(test_loader)
assert "Please call the `ensemble` method to build" in str(excinfo.value)
| {"/torchensemble/tests/test_fast_geometric.py": ["/torchensemble/utils/logging.py"]} |
653 | AlenaPliusnina/Flask_API | refs/heads/main | /app/api.py | import json
from datetime import datetime
from flask import request, make_response
from flask_restful import Resource, Api
from flask import g
from app import app, db
from flask_httpauth import HTTPBasicAuth
from app.models import User, Post, Comment
from app.schemes import posts_schema, post_schema, comment_schema, comments_schema, users_schema, user_schema
api = Api(app, prefix="/api/v1")
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
user = User.query.filter_by(username=username).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
class UserListResource(Resource):
@auth.login_required
def get(self):
if g.user.username == 'admin':
users = User.query.all()
return users_schema.dump(users)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'Only the superuser can access.'}
resp = make_response(json.dumps(data), 403)
return resp
def post(self):
body = request.get_json()
user = User(**body)
exist_email = User.query.filter_by(email=user.email).first()
exist_username = User.query.filter_by(username=user.username).first()
if not exist_email and not exist_username:
try:
user.hash_password()
user.save()
data = {'message': 'You registered successfully. Please log in.'}
resp = make_response(json.dumps(data), 201)
return resp
except Exception as e:
return {'message': str(e)}, 401
else:
data = {'message': 'User already exists. Please login.'}
resp = make_response(json.dumps(data), 202)
return resp
class UserResource(Resource):
@auth.login_required
def get(self, user_id):
if g.user.username == 'admin' or g.user.id == user_id:
user = User.query.get_or_404(user_id)
return user_schema.dump(user)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only access your registration information.'}
resp = make_response(json.dumps(data), 403)
return resp
@auth.login_required
def delete(self, user_id):
user = User.query.get_or_404(user_id)
if user.id == g.user.id or g.user.username == 'admin':
db.session.delete(user)
db.session.commit()
data = {'message': 'The user was successfully deleted.'}
resp = make_response(json.dumps(data), 200)
return resp
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only delete your account.'}
resp = make_response(json.dumps(data), 403)
return resp
class PostListResource(Resource):
def get(self):
posts = Post.query.all()
return posts_schema.dump(posts)
@auth.login_required
def post(self):
new_post = Post(
author_id=g.user.id,
title=request.json['title'],
content=request.json['content'],
publication_datetime=datetime.now(),
)
db.session.add(new_post)
db.session.commit()
return post_schema.dump(new_post)
class PostResource(Resource):
def get(self, post_id):
post = Post.query.get_or_404(post_id)
return post_schema.dump(post)
@auth.login_required
def patch(self, post_id):
post = Post.query.get_or_404(post_id)
if post.author_id == g.user.id:
if 'title' in request.json:
post.title = request.json['title']
if 'content' in request.json:
post.content = request.json['content']
db.session.commit()
return post_schema.dump(post)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only edit your posts.'}
resp = make_response(json.dumps(data), 403)
return resp
@auth.login_required
def delete(self, post_id):
post = Post.query.get_or_404(post_id)
if post.author_id == g.user.id:
db.session.delete(post)
db.session.commit()
data = {'message': 'The post was successfully deleted.'}
resp = make_response(json.dumps(data), 200)
return resp
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only delete your posts.'}
resp = make_response(json.dumps(data), 403)
return resp
class CommentListResource(Resource):
def get(self):
comments = Comment.query.all()
return comments_schema.dump(comments)
@auth.login_required
def post(self):
new_comment = Comment(
author_id=g.user.id,
post_id=request.json['post_id'],
title=request.json['title'],
content=request.json['content'],
publication_datetime=datetime.now()
)
post = Post.query.filter_by(id=request.json['post_id']).first()
if post:
db.session.add(new_comment)
db.session.commit()
return comment_schema.dump(new_comment)
else:
data = {'error': 'HTTP 404: Not Found',
'message': 'Post with this id was not found.'}
resp = make_response(json.dumps(data), 404)
return resp
class CommentResource(Resource):
def get(self, comment_id):
comment = Comment.query.get_or_404(comment_id)
return comment_schema.dump(comment)
@auth.login_required
def patch(self, comment_id):
comment = Comment.query.get_or_404(comment_id)
if comment.author_id == g.user.id:
if 'title' in request.json:
comment.title = request.json['title']
if 'content' in request.json:
comment.content = request.json['content']
db.session.commit()
return comment_schema.dump(comment)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only edit your comments.'}
resp = make_response(json.dumps(data), 403)
return resp
@auth.login_required
def delete(self, comment_id):
comment = Comment.query.get_or_404(comment_id)
if comment.author_id == g.user.id:
db.session.delete(comment)
db.session.commit()
data = {'message': 'The comment was successfully deleted.'}
resp = make_response(json.dumps(data), 200)
return resp
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only delete your comments.'}
resp = make_response(json.dumps(data), 403)
return resp
api.add_resource(UserListResource, '/users')
api.add_resource(UserResource, '/users/<int:user_id>')
api.add_resource(PostListResource, '/posts')
api.add_resource(PostResource, '/posts/<int:post_id>')
api.add_resource(CommentListResource, '/comments')
api.add_resource(CommentResource, '/comments/<int:comment_id>')
| {"/app/api.py": ["/app/__init__.py", "/app/models.py", "/app/schemes.py"], "/app/models.py": ["/app/__init__.py"], "/app/schemes.py": ["/app/__init__.py", "/app/models.py"]} |
654 | AlenaPliusnina/Flask_API | refs/heads/main | /app/models.py | from datetime import datetime
from flask_bcrypt import generate_password_hash, check_password_hash
from app import db
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
posts = db.relationship('Post', backref='user', lazy='dynamic', cascade="all,delete")
comments = db.relationship('Comment', backref='user', lazy='dynamic', cascade="all,delete")
def hash_password(self):
self.password = generate_password_hash(self.password).decode('utf8')
def verify_password(self, password):
return check_password_hash(self.password, password)
def save(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
title = db.Column(db.String(50), nullable=False)
content = db.Column(db.String(256), nullable=False)
publication_datetime = db.Column(db.DateTime(), default=datetime.now(), nullable=False)
comments = db.relationship('Comment', backref='post', lazy='dynamic', cascade="all,delete")
def __repr__(self):
return '<Post %s>' % self.title
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True, nullable=False)
post_id = db.Column(db.Integer, db.ForeignKey(Post.id), nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
title = db.Column(db.String(50), nullable=False)
content = db.Column(db.String(256), nullable=False)
publication_datetime = db.Column(db.DateTime(), default=datetime.now(), nullable=False)
def __repr__(self):
return '<Comment %s>' % self.title | {"/app/api.py": ["/app/__init__.py", "/app/models.py", "/app/schemes.py"], "/app/models.py": ["/app/__init__.py"], "/app/schemes.py": ["/app/__init__.py", "/app/models.py"]} |
655 | AlenaPliusnina/Flask_API | refs/heads/main | /app/__init__.py | from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
app.debug = True
return app
app = create_app()
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import api, models
db.create_all() | {"/app/api.py": ["/app/__init__.py", "/app/models.py", "/app/schemes.py"], "/app/models.py": ["/app/__init__.py"], "/app/schemes.py": ["/app/__init__.py", "/app/models.py"]} |
656 | AlenaPliusnina/Flask_API | refs/heads/main | /app/schemes.py | from flask_marshmallow import Marshmallow
from app import app
from app.models import User, Post, Comment
ma = Marshmallow(app)
class CommentSchema(ma.Schema):
class Meta:
fields = ("id", "post_id", "author_id", "title", "content", "publication_datetime")
model = Comment
ordered = True
class PostSchema(ma.Schema):
class Meta:
fields = ("id", "title", "content", "author_id", "publication_datetime", "comments")
model = Post
ordered = True
comments = ma.Nested(CommentSchema, many=True)
class UserSchema(ma.Schema):
class Meta:
fields = ("id", "username", "email", "password", "posts", "comments")
model = User
ordered = True
posts = ma.Nested(CommentSchema, many=True)
comments = ma.Nested(CommentSchema, many=True)
post_schema = PostSchema()
posts_schema = PostSchema(many=True)
comment_schema = PostSchema()
comments_schema = PostSchema(many=True)
user_schema = UserSchema()
users_schema = UserSchema(many=True) | {"/app/api.py": ["/app/__init__.py", "/app/models.py", "/app/schemes.py"], "/app/models.py": ["/app/__init__.py"], "/app/schemes.py": ["/app/__init__.py", "/app/models.py"]} |
657 | kstandvoss/TFCA | refs/heads/master | /run.py | from argparse import Namespace
import co2_dataset
import os
import time
# Settings
data_path = 'CO2/monthly_in_situ_co2_mlo.csv'
save_path = 'reg_params/params3'
epochs = 10000
minibatch_size = 100
mc_samples = 50
optimizer = 'adam'
learning_rate = 1e-1
momentum = 0.9
l2_weight = 1e-6
drop_p = 0.1
tau_rc = 0.07
tau_ref = 0.0005
amplitude = 0.01
train = False
continue_training = True
spiking = True
plot = True
comment = 'test run'
args = Namespace(data_path=data_path, epochs=epochs, minibatch_size=minibatch_size,
optimizer=optimizer, learning_rate=learning_rate, l2_weight=l2_weight, momentum=momentum,
mc_samples=mc_samples, tau_ref=tau_ref, tau_rc=tau_rc, train=train, continue_training=continue_training,
save_path=save_path, amplitude=amplitude, drop_p=drop_p, spiking=spiking, plot=plot)
print('########################')
print(comment) # a comment that will be printed in the log file
print(args) # print all args in the log file so we know what we were running
print('########################')
start = time.time()
loss = co2_dataset.main(args)
print("The training took {:.1f} minutes with a loss of {:.3f}".format((time.time()-start)/60,loss)) # measure time
| {"/run.py": ["/co2_dataset.py"]} |
658 | kstandvoss/TFCA | refs/heads/master | /co2_dataset.py | # coding: utf-8
import nengo
import nengo_dl
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
import argparse
import pdb
def main(args):
co2_data = pd.read_csv(args.data_path, usecols=[0,4,5,6,7,8,9])
co2_data.columns = ['Date', 'standard', 'season_adjust', 'smoothed', 'smoothed_season', 'standard_no_missing', 'season_no_missing']
detrended = signal.detrend(co2_data['standard_no_missing'][200:600])
detrended /= np.max(detrended)
detrended *= 2
#if args.plot:
# plt.plot(detrended)
# plt.axvline(x=300, c='black', lw='1')
# plt.ylim([-20,20])
# plt.xlim([0,500])
# # Training setup
# leaky integrate and fire parameters
lif_params = {
'tau_rc': args.tau_rc,
'tau_ref': args.tau_ref,
'amplitude': args.amplitude
}
# training parameters
drop_p = args.drop_p
minibatch_size = args.minibatch_size
n_epochs = args.epochs
learning_rate = args.learning_rate
momentum = args.momentum
l2_weight = args.l2_weight
# lif parameters
lif_neurons = nengo.LIF(**lif_params)
# softlif parameters (lif parameters + sigma)
softlif_neurons = nengo_dl.SoftLIFRate(**lif_params,sigma=0.002)
# ensemble parameters
ens_params = dict(max_rates=nengo.dists.Choice([100]), intercepts=nengo.dists.Choice([0]))
def build_network(neuron_type, drop_p, l2_weight, n_units=1024, num_layers=4, output_size=1):
with nengo.Network() as net:
use_dropout = False
if drop_p:
use_dropout = True
#net.config[nengo.Connection].synapse = None
#nengo_dl.configure_settings(trainable=False)
# input node
inp = nengo.Node([0])
shape_in = 1
x = inp
# the regularizer is a function, so why not reuse it
reg = tf.contrib.layers.l2_regularizer(l2_weight)
class DenseLayer(object):
i=0
def pre_build(self, shape_in, shape_out):
self.W = tf.get_variable(
"weights" + str(DenseLayer.i), shape=(shape_in[1], shape_out[1]),
regularizer=reg)
self.B = tf.get_variable(
"biases" + str(DenseLayer.i), shape=(1, shape_out[1]), regularizer=reg)
DenseLayer.i+=1
def __call__(self, t, x):
return x @ self.W + self.B
for n in range(num_layers):
# add a fully connected layer
a = nengo_dl.TensorNode(DenseLayer(), size_in=shape_in, size_out=n_units, label='dense{}'.format(n))
nengo.Connection(x, a, synapse=None)
shape_in = n_units
x = a
# apply an activation function
x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)
# add a dropout layer
x = nengo_dl.tensor_layer(x, tf.layers.dropout, rate=drop_p, training=use_dropout)
# add an output layer
a = nengo_dl.TensorNode(DenseLayer(), size_in=shape_in, size_out=output_size)
nengo.Connection(x, a, synapse=None)
return net, inp, a
do_train = args.train
continue_training = args.continue_training
param_path = args.save_path
trainset_size = len(detrended)
x = np.linspace(-2,2,trainset_size)
y = detrended
# # training on continuous soft leaky integrate and fire neurons
# construct the network
net, inp, out = build_network(softlif_neurons, drop_p, l2_weight)
with net:
in_p = nengo.Probe(inp, 'output')
out_p = nengo.Probe(out, 'output')
"""
# define training set etc.
"""
#pdb.set_trace()
#train_x = {inp: x.reshape((minibatch_size, trainset_size // minibatch_size))[..., None]}
#train_y = {out_p: y.reshape((minibatch_size, trainset_size // minibatch_size))[..., None]}
target = x[:,None,None]
train_x = {inp: target[:300]}
train_y = {out_p: y[:300,None,None]}
test_x = {inp: target[300:]}
test_y = {out_p: y[300:,None,None]}
# construct the simulator
with nengo_dl.Simulator(net, minibatch_size=minibatch_size, tensorboard='./tensorboard') as sim:
#, tensorboard='./tensorboard')
# define the loss function (We need to do this in the
# context of the simulator because it changes the
# tensorflow default graph to the nengo network.
# That is, tf.get_collection won't work otherwise.)
def mean_squared_error_L2_regularized(y, t):
if not y.shape.as_list() == t.shape.as_list():
raise ValueError("Output shape", y.shape, "differs from target shape", t.shape)
e = tf.reduce_mean((t - y)**2) + tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return e
with tf.name_scope('sum_weights'):
first = 0
for node in net.nodes:
if type(node) == nengo_dl.tensor_node.TensorNode:
if 'Dense' in str(node.tensor_func):
if not first:
sum_weights = tf.linalg.norm(node.tensor_func.W)
first = 1
else:
sum_weights += tf.linalg.norm(node.tensor_func.W)
weight_summary = tf.summary.scalar('sum_weights', sum_weights)
starter_learning_rate = args.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, sim.tensor_graph.training_step,
1000, 0.96, staircase=True)
# define optimiser
if args.optimizer=='rmsprop':
opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
elif args.optimizer=='sgd':
opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
elif args.optimizer=='adadelta':
opt = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
elif args.optimizer=='adam':
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
#pdb.set_trace()
loss = 0
# actual training loop
if do_train:
if continue_training:
sim.load_params(path=param_path)
loss = sim.loss(test_x, test_y, objective='mse')
print("error before training: ", loss)
sim.train(train_x, train_y, opt, n_epochs=n_epochs, shuffle=True, objective={out_p:mean_squared_error_L2_regularized}, summaries=['loss', weight_summary])
loss = sim.loss(test_x, test_y, objective='mse')
print("error after training:", loss)
sim.save_params(path=param_path)
else:
sim.load_params(path=param_path)
T = args.mc_samples
outputs = np.zeros((T,target.size))
for t in range(T):
for i in range(0,target.size,minibatch_size):
sim.run_steps(1,input_feeds={inp: target[i:i+minibatch_size]})
sim.soft_reset(include_trainable=False, include_probes=False)
outputs[t] = sim.data[out_p].transpose(1,0,2).reshape((len(target),))
sim.soft_reset(include_trainable=False, include_probes=True)
predictive_mean = np.mean(outputs, axis=0)
predictive_variance = np.var(outputs, axis=0)
tau = (1 - args.drop_p) / (2 * len(predictive_variance) * args.l2_weight)
predictive_variance += tau**-1
target = np.squeeze(target)
if args.plot:
plt.plot(target,predictive_mean,label='out')
plt.fill_between(target, predictive_mean-2*np.sqrt(predictive_variance), predictive_mean+2*np.sqrt(predictive_variance),
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848', linewidth=0, label='variance')
plt.plot(target,detrended,label='target', color='blue',alpha=0.5)
plt.axvline(x=x[300], c='black', lw='1')
plt.ylim([-10,10])
plt.xlim([-2,2])
plt.legend(loc='upper right')
if args.spiking:
# # test on LIF neurons
# timesteps
# MC dropout samples
MC_drop = T
T = 100
# we want to see if spiking neural networks
# need dropout at all, so we disable it
net, inp, out = build_network(lif_neurons, drop_p=0, l2_weight=l2_weight)
with net:
in_p = nengo.Probe(inp)
out_p = nengo.Probe(out)
# start a new simulator
# T is the amount of MC dropout samples
sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)#, unroll_simulation=10, tensorboard='./tensorboard')
# load parameters
sim.load_params(path=param_path)
# copy the input for each MC dropout sample
minibatched_target = np.tile(target[:, None], (1,T))[..., None]
# run for T timesteps
spiking_outputs = np.zeros((target.size,T))
spiking_inputs = np.zeros((target.size,T))
for i in range(0,target.size,minibatch_size):
sim.soft_reset(include_trainable=False, include_probes=True)
sim.run_steps(T,input_feeds={inp: minibatched_target[i:i+minibatch_size,:]})
spiking_outputs[i:i+minibatch_size] = sim.data[out_p][...,0]
spiking_inputs[i:i+minibatch_size] = sim.data[in_p][...,0]
if args.plot:
# plot
plt.figure()
plt.scatter(spiking_inputs.flatten(), spiking_outputs.flatten(), c='r', s=1, label="output")
plt.plot()
#plt.plot(target.flatten(), y(target).flatten(), label="target", linewidth=2.0)
plt.legend(loc='upper right');
plt.plot(x,y, label='train set')
plt.axvline(x=x[300], c='black', lw='1')
plt.ylim([-10,10])
plt.xlim([-2,2])
# print(sim.data[out_p].shape)
predictive_mean = np.mean(spiking_outputs[:,-MC_drop:],axis=1)
predictive_variance = np.var(spiking_outputs[:,-MC_drop:],axis=1)
tau = (1 - args.drop_p) / (2 * len(predictive_variance) * args.l2_weight)
predictive_variance += tau**-1
plt.figure()
plt.plot(target,predictive_mean,label='out')
#plt.plot(target,spiking_outputs[:,-1],label='out')
plt.fill_between(np.squeeze(target), predictive_mean-2*np.sqrt(predictive_variance), predictive_mean+2*np.sqrt(predictive_variance),
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848', linewidth=0, label='variance')
plt.plot(x, y, c='blue', alpha=0.5, label='dataset')
#plt.scatter(x,y, color='black', s=9, label='train set')
plt.axvline(x=x[300], c='black', lw='1')
plt.legend(loc='upper right',)
plt.ylim([-10,10])
plt.xlim([-2,2])
sim.close()
if args.plot:
plt.show()
return loss
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Train spiking neural network to perform variational inference on co2 dataset')
parser.add_argument('data_path', action='store',
help='Path to data')
parser.add_argument('-e', '--epochs', action='store', dest='epochs', type=int, default=100,
help='Number of training epochs')
parser.add_argument('-mb', action='store', dest='minibatch_size', type=int, default=25,
help='Size of training mini batches')
parser.add_argument('-t', action='store', dest='mc_samples', type=int, default=20,
help='Number of MC forwardpasses and timesteps for spiking network')
parser.add_argument('-o', '--optimizer', action='store', dest='optimizer', default='rmsprop', choices=('sgd', 'adadelta', 'adam', 'rmsprop'),
help='Optimization function')
parser.add_argument('-r', '--learning_rate', action='store', dest='learning_rate', type=float,
help='Learning rate', default=1e-4)
parser.add_argument('-m', '--momentum', action='store', dest='momentum', type=float,
help='Momentum', default=0.9)
parser.add_argument('-l', '--l2_weight', action='store', dest='l2_weight', type=float,
help='Weight of l2 regularization', default=1e-6)
parser.add_argument('-d', '--dropout', action='store', dest='drop_p', type=float,
help='Dropout probability', default=0.1)
parser.add_argument('-rc', '--tau_rc', action='store', dest='tau_rc', type=float,
help='LIF parameter', default=0.07)
parser.add_argument('-ref', '--tau_ref', action='store', dest='tau_ref', type=float,
help='LIF parameter', default=0.0005)
parser.add_argument('-a', '--amplitude', action='store', dest='amplitude', type=float,
help='LIF parameter', default=0.05)
parser.add_argument('--save_path', action='store', default='./reg_params/params')
parser.add_argument('--train', action='store_true', dest='train', default=True,
help='Train new network, else load parameters')
parser.add_argument('--continue_training', action='store_true', dest='continue_training', default=False,
help='Continue training from previous parameters')
parser.add_argument('--plot', action='store_true', dest='plot', default=False,
help='Plot results')
parser.add_argument('--spiking', action='store_true', dest='spiking', default=False,
help='Test spiking model')
args = parser.parse_args()
main(args)
| {"/run.py": ["/co2_dataset.py"]} |
660 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /pytorch_gsp/utils/gsp.py | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import scipy
from sklearn.metrics.pairwise import rbf_kernel
def complement(S,N):
V = set(np.arange(0,N,1))
return np.array(list(V-set(S)))
class Reconstruction(nn.Module):
def __init__(self,V, sample, freqs, domain='vertex',use_original_set = False, device = 'cuda'):
"""
GSP reconstruction of Graph signals
Args:
V (numpy array): eigenvector matrix of Laplacian or adjacency. This matrix is expected to be orthonormal.
sample (list-like): list of indices of in-sample nodes
freqs (list): number of list of indices of
domain (str, optional): [description]. domain of the graph signal. Options are vertex or spectral'. Defaults to 'vertex'.
use_original_set (bool, optional): [description]. Defaults to False.
"""
super(Reconstruction, self).__init__()
assert(domain in ['vertex','spectral'])
if domain == 'vertex':
interp = Interpolator(V, sample, freqs)
elif domain == 'spectral':
interp= Interpolator(V, sample, freqs, freq=True)
self.Interp = torch.Tensor(interp).to(device)
self.N = V.shape[0]
if use_original_set:
self.sample = sample
else:
self.sample = None
def forward(self,x):
x0 = x
n_dim = len(x.size())
if n_dim == 3:
bz, seq_len, n = x.size()
x = x.T
x = x.reshape((n, bz*seq_len))
x = torch.matmul(self.Interp,x)
x = x.reshape((self.N,seq_len,bz)).T
else:
bz, n = x.size()
x = x.T
x = x.reshape((n, bz))
x = torch.matmul(self.Interp,x)
x = x.reshape((self.N,bz)).T
return x
def corrMatrix(A, x):
"""
corrMatrix compute an adjacency matrix with radial basis function entries
Args:
A (2D numpy array): adjacency matrix
x (2D numpy array): signals to be used to compute correlations
Returns:
2D numpy array: adjacency matrix
"""
cor = rbf_kernel(x.T/10)
A = cor*(A)
e, _ = np.linalg.eigh(A)
A/=np.max(e)
return A-np.diag(A.diagonal())
def spectral_components(A, x, return_vectors = True,lap = True, norm = False):
"""
spectral_components: compute the index of spectral components with largest magnitude in a set of graph signals
Args:
A (2d numpy array): adjacency matrix
x (2d numpy array): graph signals with time in the rows and nodes in the columns
return_vectors (bool, optional): [description]. Defaults to True.
lap (bool, optional): If it is the spectral components are computed using the laplacian. Defaults to True.
norm (bool, optional): [description]. If the matrix should be normalized as $D^{-1/2}AD^{-1/2}$.
Returns:
[type]: [description]
"""
if lap:
if norm:
d = 1/np.sqrt(A.sum(axis=1))
D=np.diag(d)
I = np.diag(np.ones(A.shape[0]))
L = I - D@A@D
else:
D = np.diag(A.sum(axis=1))
L = D - A
else:
if norm:
d = 1/np.sqrt(A.sum(axis=1))
D=np.diag(d)
I = np.diag(np.ones(A.shape[0]))
L = D@A@D
else: L = A
lambdas, V = np.linalg.eigh(L)
energy = np.abs(V.T@x.T).T
index = []
for y in energy:
index.append(list(np.argsort(y)))
ocorrencias = {i:0 for i in range(x.shape[1]) }
for y in index:
for i in y:
ocorrencias[i]+= y.index(i)
F_global= np.argsort([ocorrencias[oc] for oc in ocorrencias])[::-1]
if return_vectors:
return F_global, V
else:
return F_global
def Interpolator(V, sample, freqs, freq = False):
Vf = V[:,freqs]
Psi = np.zeros(Vf.shape[0])
Psi[sample] = 1 #transpose of the sampling operator \Psi
Psi = np.diag(Psi)
I = np.identity(Vf.shape[0])
inv = scipy.linalg.inv(Vf.T@Psi@Vf)
if freq == False:
pseudoi = inv@Vf.T@Psi[:, sample]
else:
pseudoi = inv
interp = np.dot(Vf, pseudoi)
Psi_bar = I - Psi
s = np.linalg.svd(np.dot(Psi_bar, Vf), compute_uv=False)
if np.max(s)>1:
print("Samling is not admissable")
return None
return interp
class KNN(nn.Module):
def __init__(self,A,sample, matrix):
super(KNN,self).__init__()
N = A.shape[0]
self.unknown = complement(sample,N)
self.mask = np.mean(matrix.values[:,sample])
def forward(self, input):
if len(input.size()) == 2:
input[:,self.unknown] = self.mask
elif len(input.size()) == 3:
input[:,:,self.unknown] = self.mask
elif len(input.size()) == 4:
input[:,:,:,self.unknown] = self.mask
x = input
for node in self.unknown:
neighbors = np.nonzero(A[node])[0]
x[:,:,[node]] = torch.mean(x[:,:, neighbors], dim=-1)
return x
def greedy_e_opt(Uf, S):
"""
code from https://github.com/georgosgeorgos/GraphSignalProcessing, please refer to this repository
MIT License
Copyright (c) 2018 Giorgio Giannone
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
greedy_e_opt: sample S nodes from a set of size N where N is the number of rows in matrix Uf
Args:
Uf (2D numpy array): truncated eigenvector matrix with N rows. Columns correspond to the selected eigenvectors
S (int): sample size
Returns:
sample: list of indices of selected nodes
"""
index_set = set()
sample=[]
n = Uf.shape[0] - 1
k = 0
I = np.diag(np.ones(Uf.shape[0]))
while len(index_set) < S:
i = -1
i_best = -1
old_list = []
sigma_best = np.inf
while i < n:
i = i + 1
if i in index_set:
continue
else:
Ds_list = np.zeros(Uf.shape[0])
ix = sample + [i]
Ds_list[ix] = 1
Ds = np.diag(Ds_list)
Ds_bar = I - Ds
DU = np.dot(Ds_bar, Uf)
s = np.linalg.svd(DU, compute_uv=False)
sigma_max = max(s)
if sigma_max < sigma_best and sigma_max != -np.inf:
sigma_best = sigma_max
i_best = i
k = k + 1
index_set.add(i_best)
sample.append(i_best)
return sample
| {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
661 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /data/Load_data.py | import math
import sys
import time
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import rbf_kernel
def USA_data(directory ):
""""TODO: include the GSOD dataset"""
signals = pd.read_csv( directory + 'Usa_temp.csv')
if "Unnamed: 0" in signals.columns:
signals.drop(columns="Unnamed: 0", inplace = True)
A = np.load( directory + 'Adjk10_07-13.npy')
return signals, A
def Seattle_data(directory , binary=False):
"""
Seattle_data:
https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb
Args:
directory (str): directory of the seattle loop detector dataset
binary (bool, optional): I the matrix should be binary or the RBF kernel should
be used on the . Defaults to False.
Returns:
speed_matrix: graph signals with time in the rows and nodes in the columns
A: adjacency matrix
FFR: free flow reachability matrices
"""
speed_matrix = pd.read_pickle( directory + 'speed_matrix_2015',)
A = np.load( directory + 'Loop_Seattle_2015_A.npy')
if not binary:
cor = rbf_kernel(speed_matrix[:1000].T/10)
A = cor*(A)
e, V = np.linalg.eigh(A)
A/=np.max(e)
A = A-np.diag(A.diagonal())
FFR_5min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_5min.npy')
FFR_10min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_10min.npy')
FFR_15min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_15min.npy')
FFR_20min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_20min.npy')
FFR_25min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_25min.npy')
FFR = [FFR_5min, FFR_10min, FFR_15min, FFR_20min, FFR_25min]
return speed_matrix, A, FFR
| {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
662 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /main/seattle_train_sggru_semisupervised.py | import os
import time
import torch
import argparse
import numpy as np
import pandas as pd
import time
from data.Load_data import Seattle_data
from data.Dataloader import *
from pytorch_gsp.train.train_rnn import Evaluate, Train
from pytorch_gsp.utils.gsp import ( greedy_e_opt, spectral_components)
from pytorch_gsp.models.sggru import *
def n_params(model):
params=[]
for param in model.parameters():
params.append(param.numel())
return np.sum(params)
print(torch.__version__)
def training_routine(args):
device = 'cuda' if torch.cuda.is_available else 'cpu'
if args.device == 'cuda' and device == 'cpu':
print("cuda is not available, device set to cpu")
else:
assert (args.device in ['cpu','cuda'])
device = args.device
lr = args.lr
epochs = args.epochs
seq_len = args.seq_len
pred_len = args.pred_len
patience = args.patience
name = args.save_name
speed_matrix, A, FFR = Seattle_data('data/Seattle_Loop_Dataset/') #put seattle Loop dataset in this directory
N = speed_matrix.shape[1]
S = int(args.sample_perc*N/100)
if args.F_perc is None:
F = int(S/3)
else:
F = int(args.F_perc*N/100)
assert(S>F) # the sampling set must be larger than the spectral support
#compute gft
F_list, V = spectral_components(A,np.array(speed_matrix)[:1000] )
if args.supervised:
freqs = F_list[:F]
else:
freqs = np.arange(0,F,1)
if args.e_opt:
print("Using e-optimal greedy algorithm")
if args.sample_perc == 25:
sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt25.npy')[0]
elif args.sample_perc == 50:
sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt50.npy')[0]
elif args.sample_perc == 75:
sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt75.npy')[0]
else:
sample = greedy_e_opt(V[:,Fs],S)
else: sample = np.sort(np.random.choice(np.arange(N), S, replace = False))
S = len(sample)
pre_time = time.time()
train, valid, test,max_value = SplitData(speed_matrix.values, label = None, seq_len = 10,
pred_len = 1, train_proportion = 0.7,
valid_proportion = 0.2, shuffle = False)
pipeline = DataPipeline(sample,V,freqs,seq_len,pred_len)
train_dataloader = pipeline.fit(train)
valid_dataloader = pipeline.transform(valid)
test_dataloader = pipeline.transform(test,sample_label=False,batch_size = test.shape[0]-seq_len-pred_len,shuffle=False)
print("Preprocessing time:", time.time()-pre_time)
layer = SpectralGraphForecast(V, sample,freqs, rnn = 'gru')
if args.supervised:
sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = True).to(device)
else:
sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = False).to(device)
pre_time = time.time()
print("Total number of nodes: {}".format(N))
print("Sample size: {}".format(S))
print("Spectral sample size: {}".format(F))
print("Initial learning rate: {}".format(lr))
sggru,sggru_loss= Train(sggru ,train_dataloader, valid_dataloader, epochs = epochs,
learning_rate = lr,patience=patience ,sample = sample)
print("Training time:", time.time()-pre_time)
pre_time = time.time()
sggru_test = Evaluate(sggru.to(device), test_dataloader, max_value )
print("Test time:", time.time()-pre_time)
name = 'sggru'
loss = (sggru_loss,sggru_test)
os.makedirs("models_and_losses/", exist_ok=True)
torch.save(sggru, "models_and_losses/{}.pt".format(name))
np.save("models_and_losses/{}.npy".format(name),loss)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Semi-Supervised Prediction\n SeattleLoop dataset \n download link: https://github.com/zhiyongc/Seattle-Loop-Data ')
parser.add_argument('--epochs', type=int, default = 100, help='maximum number of epochs before stopping training')
parser.add_argument('--lr', type=float, default = 1e-4, help='starting learn rate' )
parser.add_argument('--patience', type=int, default = 10, help='number of consecutive non-improving validation loss epochs before stop training')
parser.add_argument('--sample-perc', type=int, default = 50, help='percentage of in-sample nodes')
parser.add_argument('--F-perc', type=int, default = None, help='percentage of frequencies to keep in frequency set \mathcal{F}')
parser.add_argument('--S-perc', type=int, default = 50, help='percentage of samples')
parser.add_argument('--e-opt', action='store_true',help='if sampling is performed by E-optmal greedy algorithm')
parser.add_argument('--sample-seed',type=int,default=1, help='number of run with uniformely random samples. Only used if --e-opt is False')
parser.add_argument('--seq-len', type=int,default=10, help='history length')
parser.add_argument('--pred-len', type=int,default=1, help='prediction horizon')
parser.add_argument('--save-name', type=str, default='sggru_S50_F53_opt_pred1', help='name of file')
parser.add_argument('--supervised', action='store_true', help='if training is supervised or semi-supervised. Deafault is semi-supervised')
parser.add_argument('--device', type=str, default='cuda', help='devices: cuda or cpu')
args = parser.parse_args()
training_routine(args)
| {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
663 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /data/Dataloader.py |
import time
import numpy as np
import pandas as pd
import torch
import torch.utils.data as utils
from pytorch_gsp.utils.gsp import complement
def PrepareSequence(data, seq_len = 10, pred_len = 1):
time_len = data.shape[0]
sequences, labels = [], []
for i in range(time_len - seq_len - pred_len):
sequences.append(data[i:i+seq_len])
labels.append(data[i+seq_len+pred_len-1:i+seq_len+pred_len])
return np.asarray(sequences), np.asarray(labels)
def SplitData(data, label = None, seq_len = 10, pred_len = 1, train_proportion = 0.7,
valid_proportion = 0.2, shuffle = False):
max_value = np.max(data)
data /= max_value
samp_size = data.shape[0]
if label is not None:
assert(label.shape[0] == samp_size)
index = np.arange(samp_size, dtype = int)
train_index = int(np.floor(samp_size * train_proportion))
valid_index = int(np.floor(samp_size * ( train_proportion + valid_proportion)))
if label is not None:
train_data, train_label = data[:train_index+pred_len-1], label[:train_index+pred_len-1]
valid_data, valid_label = data[train_index-seq_len:valid_index+pred_len-1], label[train_index-seq_len:valid_index+pred_len-1]
test_data, test_label = data[valid_index-seq_len:], label[valid_index-seq_len:]
return (train_data, train_label), (valid_data, valid_label), (test_data, test_label), max_value
else:
train_data = data[:train_index+pred_len-1]
valid_data = data[train_index-seq_len:valid_index+pred_len-1]
test_data = data[valid_index-seq_len:]
return train_data ,valid_data, test_data, max_value
def Dataloader(data, label, batch_size = 40, suffle = False):
data, label = torch.Tensor(data), torch.Tensor(label )
dataset = utils.TensorDataset(data, label)
dataloader = utils.DataLoader(dataset, batch_size = batch_size, shuffle=suffle, drop_last = True)
return dataloader
def Preprocessing_hop_interp(matrix, A ,sample):
unknown = complement(sample,matrix.shape[1])
features_unknown = np.copy(matrix.values)
features_unknown[:,unknown] = np.mean(matrix.values[:100,sample])
for node in unknown:
neighbors = np.nonzero(A[node])[0]
for t in range(features_unknown.shape[0]):
features_unknown[np.array([t]), np.array([node])] = np.mean(features_unknown[t, neighbors])
return features_unknown
def MaxScaler(data):
max_value = np.max(data)
return max_value, data/max_value
def Preprocessing_GFT(matrix,sample, V , freqs ):
x = matrix.T
Vf = V[:, freqs]
Psi = np.zeros((V.shape[0],x.shape[1]))
Psi[sample] = x
Tx = (Vf.T@Psi).T
return Tx
class DataPipeline:
def __init__(self, sample, V , freqs ,seq_len, pred_len, gft = True):
"""
DataPipeline: perform the sampling procedure on the graph signals and create the dataloader object
Args:
sample (np array): list of graph indices
V (2D np array): Laplacian eigenvector matrix
freqs (np array): list of frequency indices
seq_len (int, optional): size of historical data. Defaults to 10.
pred_len (int, optional): number of future samples. Defaults to 1.
gft (bool, optional): if Fourier transform should be applied. Defaults to False.
"""
self.sample = sample
self.V = V
self.freqs = freqs
self.seq_len = seq_len
self.pred_len = pred_len
self.gft = gft
def fit(self,train_data,sample_label = True, batch_size=40, shuffle=True):
"""
fit: build dataloader for training data
Args:
train_data (numpy array): train data
sample_label (bool, optional): If labels should be sampled for a semisupervised
learning. Defaults to True.
batch_size (int, optional): batch size. Defaults to 40.
shuffle (bool, optional): If samples should be shuffled. Defaults to True.
Returns:
pytorch Dataloader: train data prepared for training
"""
train_X, train_y = PrepareSequence(train_data, seq_len = self.seq_len, pred_len = self.pred_len)
if self.gft:
train_data_freqs = Preprocessing_GFT(train_data[:,self.sample],self.sample, self.V , self.freqs )
train_X_freqs, _ = PrepareSequence(train_data_freqs, seq_len = self.seq_len, pred_len = self.pred_len)
train_X = np.concatenate((train_X[:,:,self.sample], train_X_freqs), axis=-1)
if sample_label:
train_y = train_y.T[self.sample]
train_y = train_y.T
return Dataloader(train_X, train_y, batch_size, shuffle)
def transform(self, data, sample_label = True, batch_size=40,shuffle=True):
"""
transform: build dataloader for validation and test data
Args:
train_data (numpy array): train data
sample_label (bool, optional): If validation labels should be sampled for a
semisupervised learning. Defaults to True.
batch_size (int, optional): batch size. Defaults to 40.
shuffle (bool, optional): If samples should be shuffled. Defaults to True.
Returns:
pytorch Dataloader: train data prepared for training
"""
X, y = PrepareSequence(data, seq_len = self.seq_len, pred_len = self.pred_len)
if self.gft:
data_freqs = Preprocessing_GFT(data[:,self.sample],self.sample, self.V , self.freqs)
X_freqs, _ = PrepareSequence(data_freqs, seq_len = self.seq_len, pred_len = self.pred_len)
X = np.concatenate((X[:,:,self.sample], X_freqs), axis=-1)
if sample_label:
y = y.T[self.sample]
y = y.T
return Dataloader(X, y, batch_size, shuffle)
| {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
664 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /main/__init.py | import os
import sys
current_dir = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
sys.path.append(os.path.join(current_dir, 'data'))
print(sys.path) | {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
665 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /setup.py | from setuptools import setup, find_packages
setup(
name='Joint-Forecasting-and-Interpolation-of-Graph-Signals-Using-Deep-Learning',
version='0.1.0',
author='Gabriela Lewenfus',
author_email='gabriela.lewenfus@gmail.com',
packages=find_packages(),
install_requires = ['scipy>=1.4.1', 'pandas>=0.15', 'scikit-learn>=0.22', 'numpy>=0.46'],
description='Code from the paper Joint Forecasting and Interpolation of Graph Signals Using Deep Learning',
) | {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
666 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /pytorch_gsp/train/train_rnn.py | ### training code ####
import sys
import time
import numpy as np
import torch
from torch.autograd import Variable
toolbar_width=20
def Train(model, train_dataloader, valid_dataloader, learning_rate = 1e-5, epochs = 300, patience = 10,
verbose=1, gpu = True, sample = None, optimizer = 'rmsprop'):
if optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr = learning_rate)
elif optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate )
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
batch_size = train_dataloader.batch_size
if gpu: device='cuda'
else: device= 'cpu'
losses_epochs_train = []
losses_epochs_valid = []
time_epochs = []
time_epochs_val = []
is_best_model = 0
patient_epoch = 0
scheduler = model.schedule(optimizer)
for epoch in range(epochs):
pre_time = time.time()
try:
data_size=train_dataloader.dataset.data_size
except: pass
try:
data_size=train_dataloader.dataset.tensors[0].shape[0]
except: pass
n_iter=data_size/train_dataloader.batch_size
if verbose:
count=0
checkpoints=np.linspace(0,n_iter,toolbar_width).astype(np.int16)
text='Epoch {:02d}: '.format(epoch)
sys.stdout.write(text+"[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1))
losses_train = []
losses_valid = []
for data in train_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
model.zero_grad()
outputs = model(inputs.to(device))
outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
loss_train = model.loss(outputs,y)
losses_train.append(loss_train.cpu().data.numpy())
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
if verbose:
if count in checkpoints:
sys.stdout.write('=')
sys.stdout.flush()
count+=1
for param_group in optimizer.param_groups:
learning_rate = param_group['lr']
if learning_rate >1e-5:
scheduler.step()
time_epochs.append(time.time()-pre_time)
pre_time = time.time()
losses_valid = []
for data in valid_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
outputs= model(inputs.to(device))
outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
losses_valid.append(model.loss(outputs, y).cpu().data.numpy())
time_epochs_val.append(time.time()-pre_time)
losses_epochs_train.append(np.mean(losses_train))
losses_epochs_valid.append(np.mean(losses_valid))
avg_losses_epoch_train = losses_epochs_train[-1]
avg_losses_epoch_valid = losses_epochs_valid[-1]
if avg_losses_epoch_valid >100000000000:
print("Diverged")
return (None,None)
if epoch == 0:
is_best_model = True
best_model = model
min_loss = avg_losses_epoch_valid
else:
if min_loss - avg_losses_epoch_valid > 1e-6:
is_best_model = True
best_model = model
min_loss = avg_losses_epoch_valid
patient_epoch = 0
else:
is_best_model = False
patient_epoch += 1
if patient_epoch >= patience:
print('Early Stopped at Epoch:', epoch)
break
if verbose:
sys.stdout.write("]")
print(' train loss: {}, valid loss: {}, time: {}, lr: {}'.format( \
np.around(avg_losses_epoch_train, 6),\
np.around(avg_losses_epoch_valid, 6),\
np.around([time_epochs[-1] ] , 2),\
learning_rate) )
return best_model, [losses_epochs_train ,
losses_epochs_valid ,
time_epochs ,
time_epochs_val ]
def Evaluate(model, dataloader, scale=1, pred_len = 1, gpu = True):
batch_size = dataloader.batch_size
pre_time = time.time()
gpu = torch.cuda.is_available()
if gpu: device='cuda'
else: device= 'cpu'
losses_mse = []
losses_l1 = []
losses_mape = []
for i,data in enumerate(dataloader):
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
outputs = model(inputs.to(device))
outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
loss_mse = torch.nn.MSELoss()(outputs*scale, y*scale).cpu().data
loss_l1 = torch.nn.L1Loss()(outputs*scale, y*scale).cpu().data
outputs = outputs.cpu().data.numpy()
y = y.cpu().data.numpy()
outputs = outputs*scale
y = y*scale
abs_diff = np.abs((outputs-y))
abs_y = np.abs(y)
abs_diff=abs_diff[abs_y>1]
abs_y=abs_y[abs_y>1]
loss_mape = abs_diff/abs_y
loss_mape = np.mean(loss_mape)*100
losses_mse.append(loss_mse)
losses_l1.append(loss_l1)
losses_mape.append(loss_mape)
losses_l1 = np.array(losses_l1)
losses_mse = np.array(losses_mse)
mean_l1 = np.mean(losses_l1, axis = 0)
rmse = np.mean(np.sqrt(losses_mse))
print('Test: MAE: {}, RMSE : {}, MAPE : {}'.format(mean_l1, rmse,np.mean(losses_mape)))
return [losses_l1, losses_mse, mean_l1, np.mean(losses_mape), time.time()-pre_time]
### modified from https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb | {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
667 | gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | /pytorch_gsp/models/sggru.py | import torch.utils.data as utils
import torch.nn.functional as F
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import numpy as np
import pandas as pd
import time
from pytorch_gsp.utils.gsp import (spectral_components, Reconstruction)
class SpectralGraphForecast(nn.Module):
"""
SpectralGraphForecast
Args:
V (numpy array): eingenvectors matrix graph signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
rnn (str, optional): predictive model: lstm, gru, 1dconv. Defaults to 'gru'.
"""
def __init__(self, V, sample,freqs, rnn = 'gru'):
super(SpectralGraphForecast, self).__init__()
self.N = V.shape[0] # number of nodes in the entire graph
self.d = len(freqs) # number of frequencies
self.n = len(sample) # number of samples
self.sample = sample
if rnn == 'gru':
self.srnn = nn.GRU(self.d,self.d,1, batch_first=True)
self.rnn =nn.GRU(self.n,self.n,1, batch_first=True)
elif rnn == 'lstm':
self.srnn = nn.LSTM(self.d,self.d,1, batch_first=True)
self.rnn =nn.LSTM(self.n,self.n,1, batch_first=True)
elif rnn == '1dconv':
self.srnn = nn.Conv1d(self.d,self.d,1, batch_first=True)
self.rnn =nn.Conv1d(self.n,self.n,1, batch_first=True)
if self.n != self.N:
self.interpolate = Reconstruction(V,sample,freqs, domain='spectral')
self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex')
self.linear = nn.Linear(self.N*2,self.N)
def forward(self, input):
x = input[:,:,:self.n]
x_hat = input[:,:,self.n:]
bz, seq_len, _ = x.size()
x_hat = self.srnn(x_hat)[0][:,-1,:]
if self.n != self.N:
xtilde = self.interpolate(x_hat).unsqueeze(1)
else:
xtilde = x_hat.unsqueeze(1)
x = self.rnn(x)[0][:,-1,:]
if self.n != self.N:
x1 = self.interpolate2(x)
x1[:,self.sample] = x
else:
x1 = x
x1 = x1.unsqueeze(1)
x1 = torch.cat((xtilde,x1),dim = 1).reshape((bz, self.N*2))
return self.linear(x1)
class SpectralGraphForecast2(nn.Module):
"""
SpectralGraphForecast2: combination of predictive models in both spectral and vertex domains
Args:
V (numpy array): eingenvectors matrix graph signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
rnn (str, optional): predictive model: lstm, gru, . Defaults to 'gru'.
"""
def __init__(self, V, sample,freqs, rnn = 'gru'):
super(SpectralGraphForecast2, self).__init__()
self.N = V.shape[0]
self.d = len(freqs)
self.n = len(sample)
self.sample = sample
if rnn == 'gru':
self.srnn = nn.GRU(self.d,self.d,1, batch_first=True)
self.rnn =nn.GRU(self.n,self.n,1, batch_first=True)
elif rnn == 'lstm':
self.srnn = nn.LSTM(self.d,self.d,1, batch_first=True)
self.rnn =nn.LSTM(self.n,self.n,1, batch_first=True)
if self.n != self.N:
self.interpolate = Reconstruction(V,sample,freqs, domain='sprctral')
self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex')
self.w = Parameter(torch.Tensor(self.N), requires_grad=True)
self.w.data.fill_(0.01)
def forward(self, input):
x = input[:,:,:self.n]
x_hat = input[:,:,self.n:]
bz, seq_len, _ = x.size()
x_hat = self.srnn(x_hat)[0][:,-1,:]
if self.n != self.N:
xtilde = self.interpolate(x_hat)
else:
xtilde = x_hat
x = self.rnn(x)[0][:,-1,:]
if self.n != self.N:
x1 = self.interpolate2(x)
return torch.tanh(self.w)*xtilde + (1-torch.tanh(self.w))*x1
class model(nn.Module):
def __init__(self, V, sample,freqs, layer, supervised = True, l1=0,l2=0, schedule_step=10):
"""
model: model class to use the SpectralGraphForecast layer
Args:
V (numpy array): eingenvector matrix graph from signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
layer (nn.Module): SpectralGraphForecast layer
"""
super(model, self).__init__()
self.N = V.shape[0]
self.d = len(freqs)
self.n = len(sample)
self.supervised = supervised
self.sample = sample
self.layer = layer
self.l1 = l1
self.l2 = l2
self.schedule_step = schedule_step
if not supervised:
self.interpolate = Reconstruction(V,sample,freqs, domain='vertex')
def forward(self, input):
return self.layer(input)
def loss(self,out,y):
assert (self.l1+self.l2 <=1)
assert(self.l1>=0)
assert(self.l2>=0)
regularization_loss = 0
if self.l1 != 0:
regularization_loss += self.l1*torch.nn.L1Loss()(y[:,self.sample],out[:,self.sample])
if self.l2 != 0:
regularization_loss += self.l2*torch.norm(y[:,self.sample]-out[:,self.sample])
if not self.supervised:
ys = y
y = self.interpolate(ys)
y[:,self.sample] = ys
return torch.nn.MSELoss()(y,out) + regularization_loss
def schedule(self,opt):
for param_group in opt.param_groups:
learning_rate = param_group['lr']
if learning_rate > 1e-5:
lamb = lambda epoch: 0.5 if epoch%10 == 0 else 1
else: lamb = lambda epoch: 1 if epoch%10 == 0 else 1
return torch.optim.lr_scheduler.MultiplicativeLR(opt, lr_lambda=[lamb])
class model2(nn.Module):
def __init__(self, V, sample,freqs, layer,l1=0,l2=0,schedule_step=10, supervised = True, unsqueeze=False):
super(model2, self).__init__()
"""
model2: interepolates the signal before running the layer.
Args:
V (numpy array): eingenvector matrix graph from signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
layer (nn.Module): layer
"""
self.N = V.shape[0]
self.d = len(freqs)
self.n = len(sample)
self.supervised = supervised
self.sample = sample
self.unsqueeze = unsqueeze
self.layer = layer
self.l1 = l1
self.l2 = l2
self.schedule_step = schedule_step
self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex')
if not supervised:
self.interpolate = Reconstruction(V,sample,freqs, domain='vertex')
self.linear = torch.nn.Linear(self.N,self.N)
def forward(self, input):
bz, seq_len, N = input.size()
if self.unsqueeze:
x = input.unsqueeze(dim=1)
x = self.layer(input)
if N < self.N:
x1 = self.interpolate2(x)
x1[:,self.sample] = x
else: x1 = x
return x1
def loss(self,out,y):
assert (self.l1+self.l2 <1)
assert(self.l1>=0)
assert(self.l2>=0)
regularization_loss = 0
if self.l1 != 0:
regularization_loss += self.l1*torch.nn.L1Loss()(y[:,self.sample],out[:,self.sample])
if self.l2 != 0:
regularization_loss += self.l2*torch.norm(y[:,self.sample]-out[:,self.sample])
if not self.supervised:
ys = y
y = self.interpolate(ys)
y[:,self.sample] = ys
return torch.nn.MSELoss()(y,out) + regularization_loss
def schedule(self,opt):
for param_group in opt.param_groups:
learning_rate = param_group['lr']
if learning_rate > 1e-5:
lamb = lambda epoch: 1/2 if epoch%self.schedule_step == 0 else 1
else: lamb = lambda epoch: 1 if epoch%5 == 0 else 1
return torch.optim.lr_scheduler.MultiplicativeLR(opt, lr_lambda=[lamb])
| {"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/data/Dataloader.py", "/pytorch_gsp/train/train_rnn.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/models/sggru.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"]} |
677 | AntLouiz/DatapathWay | refs/heads/master | /li.py | # Intruçoes que o programa reconhece
FUNCTIONS = {
'101011': 'sw',
'100011': 'lw',
'100000': 'add',
'100010': 'sub',
'100101': 'or',
'100100': 'and'
}
| {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
678 | AntLouiz/DatapathWay | refs/heads/master | /utils.py | def to_integer(binary_number):
if not isinstance(binary_number, str):
raise Exception()
return int(binary_number, 2)
def to_binary(number):
if not isinstance(number, int):
raise Exception()
return "{:0b}".format(number)
def extend_to_bits(binary_number, bits = 32):
if not isinstance(binary_number, str):
return None
number_length = len(binary_number)
result = bits - number_length
zero_fill = "0" * result
return "{}{}".format(zero_fill, binary_number)
def to_binaryC2(number, bits = 32):
if not isinstance(number, int):
raise Exception()
if number >= 0 :
number = to_binary(number)
number = extend_to_bits(number, bits)
return number
else:
number = 2**bits + number
number = to_binary(number)
number = extend_to_bits(number, bits)
return number
def to_decimalC2(binary_number):
if not isinstance(binary_number, str):
return None
bits = len(binary_number)
decimal = int(binary_number, 2)
if binary_number[0] == '0':
return decimal
else:
decimal = - (2**bits) + decimal
return decimal | {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
679 | AntLouiz/DatapathWay | refs/heads/master | /logic.py | from utils import (
extend_to_bits,
to_binary,
to_integer,
to_binaryC2,
to_decimalC2
)
class ALU:
def makeSum(self, a, b):
result = to_decimalC2(a) + to_decimalC2(b)
if result > (2**31 -1) or result < -(2**31):
print("{}OVERFLOW OCURRENCE{}".format("-" * 20, "-" * 7))
result = to_binaryC2(result)
return result
def makeSub(self, a, b):
result = to_decimalC2(a) - to_decimalC2(b)
if result > (2**31 -1) or result < -(2**31):
print("{}OVERFLOW OCURRENCE".format("-" * 26))
result = to_binaryC2(result)
return result
def makeAnd(self, a, b):
a = int(a, 2)
b = int(b, 2)
result = to_binary((a & b))
return extend_to_bits(result)
def makeOr(self, a, b):
a = int(a, 2)
b = int(b, 2)
result = to_binary((a | b))
return extend_to_bits(result)
def makeNot(self, a):
a_len = len(a)
a = to_decimalC2(a)
result = to_binaryC2(~a, a_len)
return result
| {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
680 | AntLouiz/DatapathWay | refs/heads/master | /core.py | from memory import RegistersBank, Memory
from logic import ALU
from instructions import PC
from control import (
ControlSw,
ControlLw,
ControlAdd,
ControlSub,
ControlAnd,
ControlOr,
)
class CPU:
def __init__(self):
self.alu = ALU()
self.pc = PC()
self.registers = RegistersBank()
self.memory = Memory()
self.control_types = {
'add': ControlAdd(self),
'sub': ControlSub(self),
'and': ControlAnd(self),
'or': ControlOr(self),
'lw': ControlLw(self),
'sw': ControlSw(self)
}
def execute(self):
for instruction in self.pc.get_instructions():
instruction_func = instruction.get_func()
self.control_types[instruction_func].execute()
| {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
681 | AntLouiz/DatapathWay | refs/heads/master | /control.py | import abc
from utils import to_integer, to_decimalC2
class BaseControl(abc.ABC):
def __init__(self, cpu):
self.cpu = cpu
@abc.abstractmethod
def execute(self):
pass
class ControlAdd(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1, ))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2, ))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeSum(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result, ))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlSub(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeSub(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlAnd(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeAnd(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlOr(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeOr(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlLw(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
offset = instruction.get_offset()
print(instruction)
rt = registers['rt']
rs = registers['rs']
print("Read the register 1:{}{}{}[{}]".format(' '*20, rs, ' '*6, to_integer(rs)))
register_data = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data))
print("ALU-in-1: {}{}[{}]".format(register_data, ' '*6, to_decimalC2(register_data)))
print("ALU-in-2: {}{}[{}]".format(offset, ' '*6, to_decimalC2(offset)))
alu_result = self.cpu.alu.makeSum(register_data, offset)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
print("Address: {}".format(alu_result))
memory_data = self.cpu.memory.get_value(alu_result)
print("Read data: {}".format(memory_data))
self.cpu.registers.set_value(rt, memory_data)
print("Write data: {}{}[{}]".format(memory_data, ' '*6, to_decimalC2(memory_data)))
print("Write register:{}{}{}[{}]".format(' '*25, rt, ' '*6, to_integer(rt)))
print("{}".format("-" * 64))
print("\n\n")
class ControlSw(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
offset = instruction.get_offset()
print(instruction)
rs = registers['rs']
print("Read the register 1:{}{}{}[{}]".format(' '*20, rs, ' '*6, to_integer(rs)))
rt = registers['rt']
print("Read the register 2:{}{}{}[{}]".format(' '*20, rt, ' '*6, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(offset, ' '*6, to_decimalC2(offset)))
alu_result = self.cpu.alu.makeSum(register_data1, offset)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
print("Address: {}".format(alu_result))
self.cpu.memory.set_value(alu_result, register_data2)
print("Write data: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
print("{}".format("-" * 64))
print("\n\n")
| {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
682 | AntLouiz/DatapathWay | refs/heads/master | /instructions.py | from li import FUNCTIONS
from utils import extend_to_bits
class MipsInstruction:
op = None
rs = None
rt = None
rd = None
shamt = None
func = None
offset = None
instruction_type = None
instruction = None
def __init__(self, instruction):
if not (isinstance(instruction, str) or len(instruction) == 32):
raise Exception()
self.instruction = instruction.replace('\n', '')
self.op = self.instruction[:6]
if self.op == '000000':
self._configure_to_registers()
else:
self._configure_to_imediate()
def _configure_to_imediate(self):
self.instruction_type = 'I'
self.rs = self.instruction[6:11]
self.rt = self.instruction[11:16]
self.offset = self.instruction[16:32]
return self.instruction
def _configure_to_registers(self):
self.instruction_type = 'R'
self.rs = self.instruction[6:11]
self.rt = self.instruction[11:16]
self.rd = self.instruction[16:21]
self.shamt = self.instruction[21:26]
self.func = self.instruction[26:32]
return self.instruction
def has_offset(self):
if self.instruction_type == 'R':
return False
return True
def get_type(self):
return self.instruction_type
def get_function(self):
return self.func
def get_registers(self):
registers = {
'rs': self.rs,
'rt': self.rt,
'rd': self.rd
}
return registers
def get_offset(self):
if not self.has_offset():
return None
return extend_to_bits(self.offset)
def get_func(self):
if self.op != '000000':
return FUNCTIONS[self.op]
return FUNCTIONS[self.func]
def __repr__(self):
representation = "-" * 64
representation += \
"\nInstruction: {}\nType: {}\nOperation: {}\n".format(
self.instruction,
self.instruction_type,
self.get_func()
)
representation += "-" * 64
return representation
class PC:
def __init__(self, filename="instructions_file.txt"):
self.file = open(filename, 'r')
self.next_instruction = None
def get_instructions(self):
"""
Return a mips instruction object
for each instruction in the file
"""
for instruction in self.file.readlines():
if self.next_instruction:
self.next_instruction = MipsInstruction(instruction)
else:
self.next_instruction = MipsInstruction(instruction)
yield self.next_instruction
| {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
683 | AntLouiz/DatapathWay | refs/heads/master | /memory.py | import random
from utils import to_binary, extend_to_bits, to_binaryC2
class BaseMemory:
def __init__(self):
self.data = {}
def set_value(self, address, value):
"""
Set a value with a given address
"""
self.data[address] = value
return True
def get_value(self, address):
"""
Return a value with a given address
"""
return self.data[address]
class RegistersBank(BaseMemory):
data = {}
def __new__(cls, *args, **kwargs):
"""
Make the BaseMemory a Monostate class
"""
obj = super(RegistersBank, cls).__new__(cls, *args, **kwargs)
obj.__dict__ = cls.data
return obj
def __init__(self):
total_registers = 2**5
for i in range(total_registers):
binary_number = to_binary(i)
if len(binary_number) < 5:
zero_fill = 5 - len(binary_number)
binary_number = "{}{}".format(
"0" * zero_fill,
binary_number
)
if i == 8:
self.data[binary_number] = extend_to_bits(to_binary(16))
else:
self.data[binary_number] = False
class Memory(BaseMemory):
data = {}
def __new__(cls, *args, **kwargs):
"""
Make the BaseMemory a Monostate class
"""
obj = super(Memory, cls).__new__(cls, *args, **kwargs)
obj.__dict__ = cls.data
return obj
def __init__(self):
total_data = 2**8
for i in range(total_data):
binary_number = to_binary(i)
binary_number = extend_to_bits(to_binary(i))
random_number = to_binaryC2(
random.randint(-(2**31), (2**31) - 1)
)
self.data[binary_number] = random_number
| {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
684 | AntLouiz/DatapathWay | refs/heads/master | /main.py | from core import CPU
if __name__ == "__main__":
cpu = CPU()
cpu.execute()
| {"/logic.py": ["/utils.py"], "/core.py": ["/memory.py", "/logic.py", "/instructions.py", "/control.py"], "/control.py": ["/utils.py"], "/instructions.py": ["/li.py", "/utils.py"], "/memory.py": ["/utils.py"], "/main.py": ["/core.py"]} |
686 | yueyoum/smoke | refs/heads/master | /test/mail_exception_test.py | import sys
from wsgiref.simple_server import make_server
sys.path.append('..')
from app import App
from smoke.exceptions import EmailExceptionMiddleware
def exception_func_1():
return exception_func_2()
def exception_func_2():
return exception_func_3()
def exception_func_3():
return 1 / 0
app = EmailExceptionMiddleware(
App(exception_func_1),
smoke_html=True,
to_address=[],
smtp_server='127.0.0.1'
)
server = make_server('127.0.0.1', 8000, app)
server.serve_forever()
| {"/test/mail_exception_test.py": ["/smoke/exceptions.py"], "/smoke/exceptions.py": ["/smoke/functional/__init__.py"]} |
687 | yueyoum/smoke | refs/heads/master | /test/app.py | class App(object):
def __init__(self, hook_func=None):
self.hook_func = hook_func
def __call__(self, environ, start_response):
html = """<html>
<body><table>{0}</table></body>
</html>"""
def _get_env(k, v):
return """<tr><td>{0}</td><td>{1}</td></tr>""".format(k, v)
env_table = ''.join( [_get_env(k, v) for k, v in sorted(environ.items())] )
html = html.format(env_table)
status = '200 OK'
headers = [
('Content-Type', 'text/html'),
('Content-Length', str(len(html)))
]
start_response(status, headers)
if self.hook_func:
self.hook_func()
return [html]
if __name__ == '__main__':
from wsgiref.simple_server import make_server
app = App()
server = make_server('127.0.0.1', 8000, app)
server.handle_request()
| {"/test/mail_exception_test.py": ["/smoke/exceptions.py"], "/smoke/exceptions.py": ["/smoke/functional/__init__.py"]} |
688 | yueyoum/smoke | refs/heads/master | /smoke/exceptions.py | # -*- coding: utf-8 -*-
import sys
import traceback
class ExceptionMiddleware(object):
def __init__(self, wrap_app, smoke_html=False):
self.wrap_app = wrap_app
self.smoke_html = smoke_html
def __call__(self, environ, start_response):
try:
return self.wrap_app(environ, start_response)
except:
tb_exc = traceback.format_exc()
exc_info = sys.exc_info()
self.handle_exception(tb_exc, exc_info)
if not self.smoke_html:
raise
status = '500 Internal Server Error'
start_response(
status,
[('Content-Type', 'text/html')],
exc_info
)
tb_exc = tb_exc.replace('\n', '<br/>').replace(' ', ' ')
html = """<html>
<head><title>%s</title></head>
<body>
<h1>%s</h1>
<p>%s</p>
</body>
</html>
""" % (status, status, tb_exc)
return [html]
def handle_exception(self, tb_exc, exc_info):
raise NotImplementedError
class EmailExceptionMiddleware(ExceptionMiddleware):
"""This is an Example, In production, It's better not send emails in sync mode.
Because sending emails maybe slow, this will block your web app.
So, the best practices is write your own EmailExceptionMiddleware,
In this class, It's handle_exception method not send mail directly,
You shoul use MQ, or something else.
"""
def __init__(self,
wrap_app,
smoke_html=False,
from_address=None,
to_address=None,
smtp_server=None,
smtp_port=25,
smtp_username=None,
smtp_password=None,
mail_subject_prefix=None,
mail_template=None):
assert isinstance(to_address, (list, tuple)) and smtp_server is not None, "Email Config Error"
self.from_address = from_address
self.to_address = to_address
self.smtp_server = smtp_server
self.smtp_port = smtp_port
self.smtp_username = smtp_username
self.smtp_password = smtp_password
self.mail_subject_prefix = mail_subject_prefix
self.mail_template = mail_template
super(EmailExceptionMiddleware, self).__init__(wrap_app, smoke_html=smoke_html)
def handle_exception(self, tb_exc, exc_info):
from smoke.functional import send_mail
send_mail(
self.smtp_server,
self.smtp_port,
self.smtp_username,
self.smtp_password,
self.from_address,
self.to_address,
'{0} Error Occurred'.format(self.mail_subject_prefix if self.mail_subject_prefix else ''),
tb_exc,
'html'
)
| {"/test/mail_exception_test.py": ["/smoke/exceptions.py"], "/smoke/exceptions.py": ["/smoke/functional/__init__.py"]} |
689 | yueyoum/smoke | refs/heads/master | /smoke/functional/__init__.py | from mail import send_mail
| {"/test/mail_exception_test.py": ["/smoke/exceptions.py"], "/smoke/exceptions.py": ["/smoke/functional/__init__.py"]} |
690 | Sprunth/TFO2ReelLogger | refs/heads/master | /db.py | import os.path
import sqlite3
import Scraper
import sys
def create_db():
conn = sqlite3.connect('reellog.db')
c = conn.cursor()
c.execute('''CREATE TABLE reellog
(lure text, body text, location text, species text, level integer, weight real, class text,
unique(lure, body, location, species, level, weight, class))''')
conn.commit()
conn.close()
def sample_db_entry():
scrape_data = "'Culprit Worm', 'Amazon River', 'Baia de Santa Rosa', 'Matrincha', '6', '0.062', 'Wimpy III'"
command = "INSERT INTO reellog VALUES (%s)" % scrape_data
conn = sqlite3.connect('reellog.db')
c = conn.cursor()
c.execute(command)
conn.commit()
conn.close()
def parse_and_store(html_file_path):
conn = sqlite3.connect('reellog.db')
c = conn.cursor()
c.execute("SELECT COUNT(*) from reellog")
(old_entry_count, ) = c.fetchone()
to_write = Scraper.scrape(html_file_path)
for row in to_write:
command = "INSERT INTO reellog VALUES (%s)" % row
try:
c.execute(command)
print('+ %s' % row)
except sqlite3.IntegrityError:
print('= %s' % row)
conn.commit()
c.execute("SELECT COUNT(*) from reellog")
(new_entry_count,) = c.fetchone()
conn.close()
print("%i new entries added" % (int(new_entry_count) - int(old_entry_count)))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Need one argument: path to html_file", file=sys.stderr)
sys.exit(1)
if not os.path.isfile('reellog.db'):
print('No reellog.db found, creating')
create_db()
parse_and_store(sys.argv[1])
# sample_db_entry()
print('Done')
| {"/db.py": ["/Scraper.py"]} |
691 | Sprunth/TFO2ReelLogger | refs/heads/master | /Scraper.py | from bs4 import BeautifulSoup
from pprint import pprint
from functools import reduce
import sys
def scrape(html_file_path):
soup = BeautifulSoup(open(html_file_path), 'html.parser')
rows = soup.find_all('tr')
commands = list()
for row in rows[1:]:
cols = row.find_all('td')
lure_string = list(cols[0].descendants)[0]
lure = lure_string.text
body_of_water = cols[1].string
location = cols[2].string
fish_string = cols[3]
fish_type = fish_string.font.string
fish_level = fish_string.find('font').text
size_strings = list(map(lambda x: x.string, cols[4].find_all('font')))
weight_idx = -1
for idx in range(len(size_strings)):
if 'lb' in size_strings[idx]:
weight_idx = idx
break
weight = size_strings[weight_idx].split()[0]
fish_class = reduce(lambda x, y: "%s %s" % (x, y), size_strings[weight_idx+1:])
if 'L e g e n d a r y' in fish_class:
fish_class = 'Legendary'
elif 'B R U I S E R' in fish_class:
fish_class = 'Bruiser'
# size not stored for now
# size = reduce(lambda x, y: "%s %s" % (x, y), size_strings[:-3])
command = "'%s', '%s', '%s', '%s', '%s', '%s', '%s'" % (lure, body_of_water, location, fish_type, fish_level,
weight, fish_class)
commands.append(command)
return commands
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Need one argument: path to html_file", file=sys.stderr)
sys.exit(1)
scrape_data = scrape(sys.argv[1])
pprint(scrape_data)
| {"/db.py": ["/Scraper.py"]} |
702 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/customclass/estruturas/__init__.py | from .dimensao import Dimensao | {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
703 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0018_auto_20200611_1905.py | # Generated by Django 3.0.3 on 2020-06-11 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0017_auto_20200611_1859'),
]
operations = [
migrations.RenameField(
model_name='clientemodel',
old_name='numerocasa',
new_name='numero_casa',
),
migrations.AddField(
model_name='dimensaomodel',
name='status',
field=models.CharField(blank=True, choices=[('Em negociação', 'Em negociação'), ('Contrato', 'Contrato'), ('Encerrado', 'Encerrado')], default='Em negociação', help_text='Status do Orçamento', max_length=15),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
704 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0005_dimensaomodel_data.py | # Generated by Django 3.0.3 on 2020-03-17 17:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0004_auto_20200317_0933'),
]
operations = [
migrations.AddField(
model_name='dimensaomodel',
name='data',
field=models.DateTimeField(blank=True, null=True),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
705 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0012_auto_20200603_1916.py | # Generated by Django 3.0.3 on 2020-06-03 22:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0011_auto_20200516_1518'),
]
operations = [
migrations.AlterField(
model_name='clientemodel',
name='telefone',
field=models.IntegerField(blank=True),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
706 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0009_auto_20200504_1529.py | # Generated by Django 3.0.3 on 2020-05-04 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0008_remove_precificacaomodel_custo'),
]
operations = [
migrations.DeleteModel(
name='PrecificacaoModel',
),
migrations.AddField(
model_name='dimensaomodel',
name='preco',
field=models.CharField(default=0, max_length=25),
),
migrations.AddField(
model_name='dimensaomodel',
name='produto',
field=models.CharField(default=0, max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.CharField(max_length=25),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
707 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0014_dimensaomodel_profundidade_media.py | # Generated by Django 3.0.3 on 2020-06-04 18:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0013_remove_dimensaomodel_profundidade_media'),
]
operations = [
migrations.AddField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.CharField(default=0, max_length=25),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
708 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0010_auto_20200511_1521.py | # Generated by Django 3.0.3 on 2020-05-11 18:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0009_auto_20200504_1529'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='comprimento',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='espessura',
field=models.CharField(max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='fornecedor',
field=models.CharField(max_length=8),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura_calcada',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_final',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_inicial',
field=models.FloatField(),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
709 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0017_auto_20200611_1859.py | # Generated by Django 3.0.3 on 2020-06-11 21:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0016_auto_20200611_1852'),
]
operations = [
migrations.RenameField(
model_name='clientemodel',
old_name='numero_casa',
new_name='numerocasa',
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
710 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0013_remove_dimensaomodel_profundidade_media.py | # Generated by Django 3.0.3 on 2020-06-04 18:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0012_auto_20200603_1916'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='profundidade_media',
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
711 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0001_initial.py | # Generated by Django 3.0.3 on 2020-03-16 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClienteModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('sobrenome', models.CharField(max_length=30)),
('cidade', models.CharField(blank=True, max_length=20)),
('estado', models.CharField(blank=True, max_length=15)),
('rua', models.CharField(blank=True, max_length=100)),
('numero_casa', models.CharField(blank=True, max_length=6)),
('cep', models.CharField(blank=True, max_length=20)),
('telefone', models.CharField(blank=True, max_length=15)),
('email', models.EmailField(blank=True, help_text='Ex. clinte@gmail.com', max_length=50)),
],
options={
'ordering': ['nome', 'sobrenome'],
},
),
migrations.CreateModel(
name='DimensaoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comprimento', models.FloatField(help_text='Ex. 8.00', max_length=3)),
('largura', models.FloatField(help_text='Ex. 4.00', max_length=3)),
('prof_inicial', models.FloatField(help_text='Ex. 1.20', max_length=3)),
('prof_final', models.FloatField(help_text='Ex. 1.40', max_length=3)),
('largura_calcada', models.FloatField(blank=True, default=1, help_text='Ex. 1.00', max_length=3)),
('espessura', models.CharField(choices=[['0.6', '0.6 mm'], ['0.7', '0.7 mm'], ['0.8', '0.8 mm']], help_text='Espessura do vinil', max_length=3)),
('fornecedor', models.CharField(choices=[['sodramar', 'Sodramar'], ['viniplas', 'Viniplas']], help_text='Fornecedor do vinil', max_length=8)),
('profundidade_media', models.FloatField(max_length=5)),
('area_calcada', models.FloatField(max_length=5)),
('perimetro', models.FloatField(max_length=5)),
('m2_facial', models.FloatField(max_length=5)),
('m2_parede', models.FloatField(max_length=5)),
('m2_total', models.FloatField(max_length=5)),
('m3_total', models.FloatField(max_length=5)),
('m3_real', models.FloatField(max_length=5)),
('filtro', models.CharField(max_length=30)),
('motobomba', models.CharField(max_length=30)),
('tampa_casa_maquinas', models.CharField(max_length=30)),
('sacos_areia', models.CharField(max_length=30)),
('vinil_m2', models.FloatField(max_length=5)),
('isomanta_m2', models.FloatField(max_length=5)),
('perfil_fixo_m', models.FloatField(max_length=5)),
('escavacao', models.CharField(max_length=30)),
('construcao', models.CharField(max_length=30)),
('contra_piso', models.CharField(max_length=30)),
('remocao_terra', models.CharField(max_length=30)),
('instalacao_vinil', models.CharField(max_length=30)),
('data', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(blank=True, choices=[('Em negociação', 'Em negociação'), ('Contrato', 'Contrato'), ('Encerrado', 'Encerrado')], default='Em negociação', help_text='Status do Orçamento', max_length=15)),
],
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
712 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0004_auto_20200317_0933.py | # Generated by Django 3.0.3 on 2020-03-17 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0003_remove_dimensaomodel_data'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='construcao',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='contra_piso',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='escavacao',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='instalacao_vinil',
field=models.CharField(default=0, max_length=30),
),
migrations.AlterField(
model_name='dimensaomodel',
name='remocao_terra',
field=models.CharField(default=0, max_length=30),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
713 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0011_auto_20200516_1518.py | # Generated by Django 3.0.3 on 2020-05-16 18:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0010_auto_20200511_1521'),
]
operations = [
migrations.AlterField(
model_name='clientemodel',
name='telefone',
field=models.IntegerField(blank=True, max_length=15),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
714 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0006_auto_20200318_1831.py | # Generated by Django 3.0.3 on 2020-03-18 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0005_dimensaomodel_data'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.FloatField(default=0, max_length=5),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
715 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0008_remove_precificacaomodel_custo.py | # Generated by Django 3.0.3 on 2020-04-29 20:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0007_auto_20200408_1540'),
]
operations = [
migrations.RemoveField(
model_name='precificacaomodel',
name='custo',
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
716 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0003_remove_dimensaomodel_data.py | # Generated by Django 3.0.3 on 2020-03-16 21:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0002_auto_20200316_1609'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='data',
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
717 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0016_auto_20200611_1852.py | # Generated by Django 3.0.3 on 2020-06-11 21:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0015_auto_20200604_1710'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='status',
),
migrations.AlterField(
model_name='clientemodel',
name='telefone',
field=models.IntegerField(blank=True, default=0),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
718 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0007_auto_20200408_1540.py | # Generated by Django 3.0.3 on 2020-04-08 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0006_auto_20200318_1831'),
]
operations = [
migrations.CreateModel(
name='PrecificacaoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('custo', models.CharField(max_length=30)),
('margem', models.CharField(max_length=30)),
('preco', models.CharField(max_length=30)),
('filtro_preco', models.CharField(max_length=30)),
('motobomba_preco', models.CharField(max_length=30)),
('tampa_casa_maquinas_preco', models.CharField(max_length=30)),
('sacos_areia_preco', models.CharField(max_length=30)),
('perfil_rigido_preco', models.CharField(max_length=30)),
('ralo_fundo_preco', models.CharField(max_length=30)),
('dispositivo_retorno_preco', models.CharField(max_length=30)),
('dispositivo_aspiracao_preco', models.CharField(max_length=30)),
('dispositivo_nivel_preco', models.CharField(max_length=30)),
('borda_preco', models.CharField(max_length=30)),
('skimmer_preco', models.CharField(max_length=30)),
('dispositivo_hidromassagem_preco', models.CharField(max_length=30)),
('escada_preco', models.CharField(max_length=30)),
('timer_preco', models.CharField(max_length=30)),
('capa_termica_preco', models.CharField(max_length=30)),
('capa_protecao_preco', models.CharField(max_length=30)),
('peneira_preco', models.CharField(max_length=30)),
('mangueira_preco', models.CharField(max_length=30)),
('ponteira_preco', models.CharField(max_length=30)),
('adaptador_giratorio_preco', models.CharField(max_length=30)),
('haste_aluminio_preco', models.CharField(max_length=30)),
('rodo_aspirador_preco', models.CharField(max_length=30)),
('escova_preco', models.CharField(max_length=30)),
('vinil_preco', models.CharField(max_length=25)),
('isomanta_preco', models.CharField(max_length=25)),
('perfil_fixo_preco', models.CharField(max_length=25)),
('escavacao_preco', models.CharField(default=0, max_length=30)),
('construcao_preco', models.CharField(default=0, max_length=30)),
('remocao_terra_preco', models.CharField(default=0, max_length=30)),
('colocacao_material_preco', models.CharField(default=0, max_length=30)),
('contra_piso_preco', models.CharField(default=0, max_length=30)),
('instalacao_skimmer_preco', models.CharField(default=0, max_length=30)),
('instalacao_borda_preco', models.CharField(default=0, max_length=30)),
('instalacao_escada_preco', models.CharField(default=0, max_length=30)),
('instalacao_capa_termica_preco', models.CharField(default=0, max_length=30)),
('instalacao_capa_protecao_preco', models.CharField(default=0, max_length=30)),
('instalacao_tampa_cm_preco', models.CharField(default=0, max_length=30)),
('instalacao_vinil_preco', models.CharField(default=0, max_length=30)),
('instalacao_filtro_preco', models.CharField(default=0, max_length=30)),
('instalacao_motobomba_preco', models.CharField(default=0, max_length=30)),
],
),
migrations.AddField(
model_name='clientemodel',
name='bairro',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='dimensaomodel',
name='area_calcada',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='comprimento',
field=models.CharField(default=0, help_text='Ex. 8.00', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='espessura',
field=models.CharField(choices=[['0.6', '0.6 mm'], ['0.7', '0.7 mm'], ['0.8', '0.8 mm']], max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='fornecedor',
field=models.CharField(choices=[['sodramar', 'Sodramar'], ['viniplas', 'Viniplas']], max_length=8),
),
migrations.AlterField(
model_name='dimensaomodel',
name='isomanta_m2',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura',
field=models.CharField(default=0, help_text='Ex. 4.00', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura_calcada',
field=models.CharField(blank=True, default=1, help_text='Ex. 1.00', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m2_facial',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m2_parede',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m2_total',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m3_real',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='m3_total',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='perfil_fixo_m',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='perimetro',
field=models.CharField(max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_final',
field=models.CharField(default=0, help_text='Ex. 1.40', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_inicial',
field=models.CharField(default=0, help_text='Ex. 1.20', max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='profundidade_media',
field=models.FloatField(default=0, max_length=25),
),
migrations.AlterField(
model_name='dimensaomodel',
name='vinil_m2',
field=models.CharField(max_length=25),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
719 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0015_auto_20200604_1710.py | # Generated by Django 3.0.3 on 2020-06-04 20:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0014_dimensaomodel_profundidade_media'),
]
operations = [
migrations.RemoveField(
model_name='dimensaomodel',
name='construcao',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='contra_piso',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='escavacao',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='instalacao_vinil',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='isomanta_m2',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='perfil_fixo_m',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='preco',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='produto',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='remocao_terra',
),
migrations.RemoveField(
model_name='dimensaomodel',
name='vinil_m2',
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
720 | leopesi/pool_budget | refs/heads/master | /projeto/dimensoes/migrations/0019_auto_20200618_1520.py | # Generated by Django 3.0.3 on 2020-06-18 18:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0018_auto_20200611_1905'),
]
operations = [
migrations.AlterField(
model_name='clientemodel',
name='numero_casa',
field=models.CharField(blank=True, max_length=10),
),
]
| {"/projeto/dimensoes/customclass/estruturas/__init__.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py"], "/projeto/dimensoes/models.py": ["/projeto/dimensoes/customclass/estruturas/dimensao.py", "/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/motor.py"], "/projeto/dimensoes/customclass/objetos/vinil.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/customclass/objetos/precificacao.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py", "/projeto/dimensoes/customclass/objetos/vinil.py", "/projeto/dimensoes/customclass/objetos/filtro.py"], "/projeto/dimensoes/customclass/objetos/filtro.py": ["/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/admin.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/actions.py"], "/projeto/dimensoes/customclass/objetos/motor.py": ["/projeto/dimensoes/customclass/objetos/filtro.py", "/projeto/dimensoes/customclass/objetos/dbs/database.py"], "/projeto/dimensoes/schema.py": ["/projeto/dimensoes/models.py"], "/projeto/dimensoes/views.py": ["/projeto/dimensoes/models.py", "/projeto/dimensoes/forms.py"], "/projeto/dimensoes/forms.py": ["/projeto/dimensoes/models.py"]} |
735 | moddevices/mod-devel-cli | refs/heads/master | /modcli/cli.py | import click
import crayons
from modcli import context, auth, __version__, bundle
_sso_disclaimer = '''SSO login requires you have a valid account in MOD Forum (https://forum.moddevices.com).
If your browser has an active session the credentials will be used for this login. Confirm?'''
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.version_option(prog_name='modcli', version=__version__)
def main():
pass
@click.group(name='auth', help='Authentication commands')
def auth_group():
pass
@click.group(name='bundle', help='LV2 bundle commands')
def bundle_group():
pass
@click.group(name='config', help='Configuration commands')
def config_group():
pass
@click.command(help='Authenticate user with SSO (MOD Forum)')
@click.option('-s', '--show-token', type=bool, help='Print the JWT token obtained', is_flag=True)
@click.option('-o', '--one-time', type=bool, help='Only print token once (do not store it)', is_flag=True)
@click.option('-y', '--confirm-all', type=bool, help='Confirm all operations', is_flag=True)
@click.option('-d', '--detached-mode', type=bool, help='Run process without opening a local browser', is_flag=True)
@click.option('-e', '--env_name', type=str, help='Switch to environment before authenticating')
def login_sso(show_token: bool, one_time: bool, confirm_all: bool, detached_mode: bool, env_name: str):
if env_name:
context.set_active_env(env_name)
env = context.current_env()
if not confirm_all:
response = click.confirm(_sso_disclaimer)
if not response:
exit(1)
if not one_time:
click.echo('Logging in to [{0}]...'.format(env.name))
try:
if detached_mode:
token = auth.login_sso_detached(env.api_url)
else:
token = auth.login_sso(env.api_url)
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
if not one_time:
env.set_token(token)
context.save()
if show_token or one_time:
print(token.strip())
else:
click.echo(crayons.green('You\'re now logged in as [{0}] in [{1}].'.format(env.username, env.name)))
@click.command(help='Authenticate user')
@click.option('-u', '--username', type=str, prompt=True, help='User ID')
@click.option('-p', '--password', type=str, prompt=True, hide_input=True, help='User password')
@click.option('-s', '--show-token', type=bool, help='Print the JWT token obtained', is_flag=True)
@click.option('-o', '--one-time', type=bool, help='Only print token once (do not store it)', is_flag=True)
@click.option('-e', '--env_name', type=str, help='Switch to environment before authenticating')
def login(username: str, password: str, show_token: bool, one_time: bool, env_name: str):
if env_name:
context.set_active_env(env_name)
env = context.current_env()
if not one_time:
click.echo('Logging in to [{0}]...'.format(env.name))
try:
token = auth.login(username, password, env.api_url)
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
if not one_time:
env.set_token(token)
context.save()
if show_token or one_time:
print(token.strip())
else:
click.echo(crayons.green('You\'re now logged in as [{0}] in [{1}].'.format(username, env.name)))
@click.command(help='Remove all tokens and reset context data')
def clear_context():
try:
context.clear()
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
click.echo(crayons.green('Context cleared'))
@click.command(help='Show current active access JWT token')
@click.option('-e', '--env_name', type=str, help='Show current active token from a specific environment')
def active_token(env_name: str):
if env_name:
context.set_active_env(env_name)
token = context.active_token()
if not token:
click.echo(crayons.red('You must authenticate first.'), err=True)
click.echo('Try:\n $ modcli auth login')
exit(1)
return
click.echo(token)
@click.command(help='Set active environment, where ENV_NAME is the name')
@click.argument('env_name')
def set_active_env(env_name: str):
try:
context.set_active_env(env_name)
context.save()
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
click.echo(crayons.green('Current environment set to: {0}'.format(env_name)))
@click.command(help='Add new environment, where ENV_NAME is the name, API_URL '
'and BUNDLE_URL are the API entry points')
@click.argument('env_name')
@click.argument('api_url')
@click.argument('bundle_url')
def add_env(env_name: str, api_url: str, bundle_url: str):
try:
context.add_env(env_name, api_url, bundle_url)
context.set_active_env(env_name)
context.save()
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
click.echo(crayons.green('Environment [{0}] added and set as active'.format(env_name)))
@click.command(help='List current configuration', name='list')
def list_config():
env = context.current_env()
click.echo('Active environment: {0}'.format(env.name))
click.echo('Authenticated in [{0}]: {1}'.format(env.name, 'Yes' if env.token else 'No'))
click.echo('Registered environments: {0}'.format(list(context.environments.keys())))
@click.command(help='Publish LV2 bundles, where PROJECT_FILE points to the buildroot project descriptor file (JSON)')
@click.argument('project_file')
@click.option('-p', '--packages-path', type=str, help='Path to buildroot package')
@click.option('-s', '--show-result', type=bool, help='Print pipeline process result', is_flag=True)
@click.option('-k', '--keep-environment', type=bool, help='Don\'t remove build environment after build', is_flag=True)
@click.option('-r', '--rebuild', type=bool, help='Don\'t increment release number, just rebuild', is_flag=True)
@click.option('-e', '--env', type=str, help='Environment where the bundles will be published')
@click.option('-f', '--force', type=bool, help='Don\'t ask for confirmation', is_flag=True)
def publish(project_file: str, packages_path: str, show_result: bool, keep_environment: bool,
rebuild: bool, env: str, force: bool):
try:
bundle.publish(project_file, packages_path, show_result=show_result,
keep_environment=keep_environment, rebuild=rebuild, env_name=env, force=force)
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
auth_group.add_command(active_token)
auth_group.add_command(login)
auth_group.add_command(login_sso)
bundle_group.add_command(publish)
config_group.add_command(add_env)
config_group.add_command(set_active_env)
config_group.add_command(list_config)
config_group.add_command(clear_context)
main.add_command(auth_group)
main.add_command(bundle_group)
main.add_command(config_group)
if __name__ == '__main__':
main()
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
736 | moddevices/mod-devel-cli | refs/heads/master | /modcli/__init__.py | from modcli import config
__version__ = '1.1.3'
context = config.read_context()
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
737 | moddevices/mod-devel-cli | refs/heads/master | /setup.py | import re
import sys
from setuptools import setup
with open('modcli/__init__.py', 'r') as fh:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fh.read(), re.MULTILINE).group(1)
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
setup(
name='mod-devel-cli',
python_requires='>=3',
version=version,
description='MOD Command Line Interface',
author='Alexandre Cunha',
author_email='alex@moddevices.com',
license='Proprietary',
install_requires=[
'click==6.7',
'crayons==0.1.2',
'requests>=2.18.4',
],
packages=[
'modcli',
],
entry_points={
'console_scripts': [
'modcli = modcli.cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
url='http://moddevices.com/',
)
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
738 | moddevices/mod-devel-cli | refs/heads/master | /modcli/settings.py | import os
CONFIG_DIR = os.path.expanduser('~/.config/modcli')
URLS = {
'labs': ('https://api-labs.moddevices.com/v2', 'https://pipeline-labs.moddevices.com/bundle/'),
'dev': ('https://api-dev.moddevices.com/v2', 'https://pipeline-dev.moddevices.com/bundle/'),
}
DEFAULT_ENV = 'labs'
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
739 | moddevices/mod-devel-cli | refs/heads/master | /modcli/config.py | import base64
import json
import os
import stat
import re
from modcli import settings
from modcli.utils import read_json_file
def read_context():
context = CliContext.read(settings.CONFIG_DIR)
if len(context.environments) == 0:
for env_name, urls in settings.URLS.items():
context.add_env(env_name, urls[0], urls[1])
context.set_active_env(settings.DEFAULT_ENV)
context.save()
return context
def clear_context():
CliContext.clear(settings.CONFIG_DIR)
def _write_file(path: str, data: str, remove_existing: bool=True):
# create dir if doesn't exist
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname, exist_ok=True)
# remove previous file
if remove_existing:
if os.path.isfile(path):
os.remove(path)
# write json file
with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as fh:
fh.write(data)
fh.writelines(os.linesep)
def _write_json_file(path: str, data: dict, remove_existing: bool=True):
_write_file(path, json.dumps(data, indent=4), remove_existing)
def _remove_file(path: str):
if os.path.isfile(path):
os.remove(path)
class CliContext(object):
_filename = 'context.json'
_access_token_filename = 'access_token'
@staticmethod
def read(path: str):
context = CliContext(path)
data = read_json_file(os.path.join(path, CliContext._filename))
if not data:
return context
for env_data in data['environments']:
context.add_env(env_data['name'], env_data['api_url'], env_data['bundle_url'])
env = context.environments[env_data['name']]
env.username = env_data['username']
env.token = env_data['token']
env.exp = env_data['exp']
context.set_active_env(data['active_env'])
return context
def __init__(self, path: str):
self._path = path
self._active_env = ''
self.environments = {}
def _ensure_env(self, env_name: str):
if env_name not in self.environments:
raise Exception('Environment {0} doen\'t exist'.format(env_name))
def set_active_env(self, env_name: str):
if not env_name:
self._active_env = ''
else:
self._ensure_env(env_name)
self._active_env = env_name
def add_env(self, env_name: str, api_url: str, bundle_url: str):
if not env_name:
raise Exception('Environment name is invalid')
if env_name in self.environments:
raise Exception('Environment {0} already exists'.format(env_name))
if not re.match('https?://.*', api_url):
raise Exception('Invalid api_url: {0}'.format(api_url))
if not re.match('https?://.*', bundle_url):
raise Exception('Invalid api_url: {0}'.format(bundle_url))
self.environments[env_name] = EnvSettings(env_name, api_url, bundle_url)
def remove_env(self, env_name: str):
self._ensure_env(env_name)
del self.environments[env_name]
def active_token(self):
return self.current_env().token
def current_env(self):
if not self._active_env:
raise Exception('Not environment has been set')
return self.environments[self._active_env]
def get_env(self, env_name: str=None):
if not env_name:
return self.current_env()
self._ensure_env(env_name)
return self.environments[env_name]
def save(self):
data = {
'active_env': self._active_env,
'environments': list({
'name': e.name,
'api_url': e.api_url,
'bundle_url': e.bundle_url,
'username': e.username,
'token': e.token,
'exp': e.exp,
} for e in self.environments.values())
}
_write_json_file(os.path.join(self._path, CliContext._filename), data)
active_token = self.active_token()
if active_token:
_write_file(os.path.join(self._path, CliContext._access_token_filename), active_token)
else:
_remove_file(os.path.join(self._path, CliContext._access_token_filename))
def clear(self):
_remove_file(os.path.join(self._path, CliContext._filename))
_remove_file(os.path.join(self._path, CliContext._access_token_filename))
self.environments.clear()
class EnvSettings(object):
def __init__(self, name: str, api_url: str, bundle_url: str):
self.name = name
self.api_url = api_url.rstrip('/')
self.bundle_url = bundle_url.rstrip('/')
self.username = ''
self.token = ''
self.exp = ''
def set_token(self, token: str):
_, payload, _ = token.split('.')
payload_data = json.loads(base64.b64decode(payload + '===').decode())
username = payload_data['user_id']
exp = payload_data.get('exp', None)
self.username = username
self.token = token
self.exp = exp
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
740 | moddevices/mod-devel-cli | refs/heads/master | /modcli/auth.py | import socket
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
import click
import requests
from click import Abort
from modcli import __version__
def login(username: str, password: str, api_url: str):
result = requests.post('{0}/users/tokens'.format(api_url), json={
'user_id': username,
'password': password,
'agent': 'modcli:{0}'.format(__version__),
})
if result.status_code != 200:
raise Exception('Error: {0}'.format(result.json()['error-message']))
return result.json()['message'].strip()
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def login_sso_detached(api_url: str):
click.echo('Running in detached mode...')
click.echo('1) Open this url in any browser: {0}'.format('{0}/users/tokens_sso'.format(api_url)))
click.echo('2) The URL will automatically redirect to MOD Forum (https://forum.moddevices.com)')
click.echo('3) Once MOD Forum page loads, if asked, enter your credentials or register a new user')
click.echo('4) A JWT token will be displayed in your browser')
try:
token = click.prompt('Copy the token value and paste it here, then press ENTER')
return token.strip()
except Abort:
exit(1)
def login_sso(api_url: str):
server_host = 'localhost'
server_port = get_open_port()
local_server = 'http://{0}:{1}'.format(server_host, server_port)
class SSORequestHandler(BaseHTTPRequestHandler):
token = ''
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
response = self.handle_http(200)
_, _, _, query, _ = parse.urlsplit(self.path)
result = parse.parse_qs(query)
tokens = result.get('token', None)
SSORequestHandler.token = tokens[0] if len(tokens) > 0 else None
self.wfile.write(response)
def handle_http(self, status_code):
self.send_response(status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
content = '''
<html><head><title>modcli - success</title></head>
<body>Authentication successful! This browser window can be closed.</body></html>
'''
return bytes(content, 'UTF-8')
def log_message(self, format, *args):
pass
httpd = HTTPServer((server_host, server_port), SSORequestHandler)
httpd.timeout = 30
webbrowser.open('{0}/users/tokens_sso?local_url={1}'.format(api_url, local_server))
try:
httpd.handle_request()
except KeyboardInterrupt:
pass
token = SSORequestHandler.token
if not token:
raise Exception('Authentication failed!')
return token
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
741 | moddevices/mod-devel-cli | refs/heads/master | /modcli/bundle.py | import os
import shutil
import subprocess
import tempfile
from hashlib import md5
import click
import crayons
import requests
from modcli import context
from modcli.utils import read_json_file
def publish(project_file: str, packages_path: str, keep_environment: bool=False, bundles: list=None,
show_result: bool=False, rebuild: bool=False, env_name: str=None, force: bool=False):
project_file = os.path.realpath(project_file)
packages_path = os.path.realpath(packages_path) if packages_path else None
env = context.get_env(env_name)
if not env.token:
raise Exception('You must authenticate first')
if not os.path.isfile(project_file):
raise Exception('File {0} not found or not a valid file'.format(project_file))
if packages_path:
if not os.path.isdir(packages_path):
raise Exception('Packages path {0} not found'.format(packages_path))
else:
packages_path = os.path.dirname(project_file)
project = os.path.split(project_file)[1]
if not force and not click.confirm('Project {0} will be compiled and published in [{1}], '
'do you confirm?'.format(crayons.green(project), crayons.green(env.name))):
raise Exception('Cancelled')
process = read_json_file(project_file)
# setting up process data
if keep_environment:
process['keep_environment'] = True
process['rebuild'] = rebuild
buildroot_pkg = process.pop('buildroot_pkg', None)
mk_filename = '{0}.mk'.format(buildroot_pkg)
if not buildroot_pkg:
raise Exception('Missing buildroot_pkg in project file')
if bundles:
process['bundles'] = [b for b in process['bundles'] if b['name'] in bundles]
if not process['bundles']:
raise Exception('Could not match any bundle from: {0}'.format(bundles))
# find buildroot_pkg under packages_path
mk_path = next((i[0] for i in os.walk(packages_path) if mk_filename in i[2]), None)
if not mk_path:
raise Exception('Could not find buildroot mk file for package {0} in {1}'.format(buildroot_pkg, packages_path))
basename = os.path.basename(mk_path)
if basename != buildroot_pkg:
raise Exception('The package folder containing the .mk file has to be named {0}'.format(buildroot_pkg))
pkg_path = os.path.dirname(mk_path)
work_dir = tempfile.mkdtemp()
try:
package = '{0}.tar.gz'.format(buildroot_pkg)
source_path = os.path.join(work_dir, package)
try:
subprocess.check_output(
['tar', 'zhcf', source_path, buildroot_pkg], stderr=subprocess.STDOUT, cwd=os.path.join(pkg_path)
)
except subprocess.CalledProcessError as ex:
raise Exception(ex.output.decode())
click.echo('Submitting release process for project {0} using file {1}'.format(project_file, package))
click.echo('URL: {0}'.format(env.bundle_url))
headers = {'Authorization': 'MOD {0}'.format(env.token)}
result = requests.post('{0}/'.format(env.bundle_url), json=process, headers=headers)
if result.status_code == 401:
raise Exception('Invalid token - please authenticate (see \'modcli auth\')')
elif result.status_code != 200:
raise Exception('Error: {0}'.format(result.text))
release_process = result.json()
click.echo('Release process created: {0}'.format(release_process['id']))
click.echo('Uploading buildroot package {0} ...'.format(package))
with open(source_path, 'rb') as fh:
data = fh.read()
headers = {'Content-Type': 'application/octet-stream'}
result = requests.post(release_process['source-href'], data=data, headers=headers)
if result.status_code == 401:
raise Exception('Invalid token - please authenticate (see \'modcli auth\')')
elif result.status_code != 201:
raise Exception('Error: {0}'.format(result.text))
checksum = result.text.lstrip('"').rstrip('"')
result_checksum = md5(data).hexdigest()
if checksum == result_checksum:
click.echo('Checksum match ok!')
else:
raise Exception('Checksum mismatch: {0} <> {1}'.format(checksum, result_checksum))
finally:
click.echo('Cleaning up...')
shutil.rmtree(work_dir, ignore_errors=True)
release_process_url = release_process['href']
click.echo(crayons.blue('Process url: {0}?pretty=true'.format(release_process_url)))
click.echo(crayons.green('Done'))
if show_result:
click.echo('Retrieving release process from {0} ...'.format(release_process_url))
release_process_full = requests.get('{0}?pretty=true'.format(release_process_url)).text
click.echo(crayons.blue('================ Release Process {0} ================'.format(release_process['id'])))
click.echo(release_process_full)
click.echo(crayons.blue('================ End Release Process ================'))
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
742 | moddevices/mod-devel-cli | refs/heads/master | /modcli/utils.py | import json
import os
def read_json_file(path: str):
if not os.path.isfile(path):
return {}
with open(path, 'r') as file:
contents = file.read()
return json.loads(contents)
| {"/modcli/cli.py": ["/modcli/__init__.py"], "/modcli/config.py": ["/modcli/__init__.py", "/modcli/utils.py"], "/modcli/auth.py": ["/modcli/__init__.py"], "/modcli/bundle.py": ["/modcli/__init__.py", "/modcli/utils.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.