code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
This script is to generate the label and comparison result based on training both diagnostic label data (RSD or 13 experts absoluete labels) and pairwise comparison data
Parameters :
-------------
dataType: 'auto' or 'manual'
'auto' would load the automatic segmentation data. 'manual' would load the manual segmentation data.
lambdWeight: list
contains the weights on L1 penalty parameter
alphaWeight: list
contains the weights on label data. The weights on comparison data would be (1-alphaWeight). 0 - train label data only, 1 - train comparison data only.
penaltyTimes: float
the number times on the expert bias avoid the penalty on these biases. Larger number would have less penalty on bias.
Return :
-------------
.mat file
Two types for classification. Plus: Plus vs Not Plus. PreP : Not Normal vs Normal. All of the model belowing are all incorporate with comparison data.
aucLExp132Exp13 : Using expert absolute label with bias predict expert absolute label with bias.
aucLExp132RSD : Using expert absolute label with bias predict concensus RSD label.
aucLExp132Exp13NoB : Usint expert absolute label with bias to trian but test without using bias.
aucLRSD2Exp13 : Using the concensus RSD label to train and test on experts absolute labels.
aucLRSD2RSD : Using the concensus RSD label to train and test on RSD labels.
aucCExp13 : Using the expert absolute label with bias to train and test on the comparison labels.
aucCRSD : Using concensus RSD label to train and text on comparison labels.
Author: <NAME>
Date: Septempber 2016
"""
from sklearn import metrics
import scipy.io as sio
import numpy as np
from scipy.io import loadmat
from cvxOpt import SVM_Log
import sys
def TrainSinleExp(alpha, labelSin,labelAbs, labelCmp,labelTrainPartition, labelTestPartition, cmpTrainPartition, cmpTestPartition, num_iters=10000):
Yl = np.reshape(labelAbs[:, :-1], [-1, ], order='F')
YlSin = 1 * labelSin
YlRSD = labelAbs[:, 13]
Ntol = N * numOfExpts4Lbl
Mtol = M * numOfExpts4Cmp
# prepare Beta File
# Prepare Exp13 File score variables
scoreSin2Abs = np.zeros([N * numOfExpts4Lbl, repeatTimes])
# PrePare RSD File score variables
scoreSin2RSD = np.zeros([N, repeatTimes])
# Prepare comparison score variables
scoreSin2Cmp = np.zeros([M * numOfExpts4Cmp, repeatTimes * K]) # Train RSD Labels
locSin2Cmp = np.zeros([M * numOfExpts4Cmp, repeatTimes * K]) # Train RSD Labels
aucSin2Abs = np.zeros([1, repeatTimes])
aucSin2RSD = np.zeros([1, repeatTimes])
aucSin2Cmp = np.zeros([1, repeatTimes])
betaMat = np.zeros([repeatTimes * K, d])
constMat = np.zeros([repeatTimes * K, 1])
for repeatCount in range(repeatTimes):
for countFold in range(K):
trainLIndex = labelTrainPartition[repeatCount][countFold].copy()
testLIndex = labelTestPartition[repeatCount][countFold].copy()
trainCIndex = cmpTrainPartition[repeatCount][countFold].copy()
testCIndex = cmpTestPartition[repeatCount][countFold].copy()
trainLIndex = np.reshape(trainLIndex,[-1,])
testLIndex = np.reshape(testLIndex,[-1,])
trainCIndex = np.reshape(trainCIndex,[-1,])
testCIndex = np.reshape(testCIndex,[-1,])
trainFeatC = cmpFeat[trainCIndex, :]
testFeatC = cmpFeat[testCIndex, :]
trainFeatL = labelFeat[trainLIndex, :]
testFeatL = labelFeat[testLIndex, :]
# Prepare 13 Experts with experts bias training and testing feats labels
YtrainExp13C = np.array([])
for eC in range(numOfExpts4Cmp):
YtrainExp13C = np.append(YtrainExp13C, labelCmp[trainCIndex, eC])
# Prepare RSD training adn testing feats labels
XtrainSinL = 1 * trainFeatL
XtrainSinC = np.tile(trainFeatC, [numOfExpts4Cmp, 1])
YtrainSinL = 1 * YlSin[trainLIndex]
YtrainSinC = 1 * YtrainExp13C
countLamda = 0
for lamda in lamdaWeights:
# Train RSD Model
betaSin, constSin = SVM_Log(XtrainSinL,YtrainSinL,XtrainSinC, YtrainSinC,alpha,lamda)
betaSin = np.array(betaSin)
constSin = np.array(constSin)
# Save the Paramter Values
betaMat[countFold + K * repeatCount,:] = np.array(betaSin.T)
constMat[countFold + K * repeatCount,:] = constSin
# Test on Exp13 Label
for eLT in range(numOfExpts4Lbl):
scoreSin2Abs[eLT * N + testLIndex, repeatCount] = np.reshape(
np.dot(testFeatL, betaSin) + constSin, [-1, ])
for eCT in range(numOfExpts4Cmp):
scoreSin2Cmp[eCT * M + testCIndex, K * repeatCount + countFold] = np.reshape(
np.dot(testFeatC, betaSin)+constSin, [-1, ])
locSin2Cmp[eCT * M + testCIndex, K * repeatCount + countFold] = 1
# Test On RSD Label
scoreSin2RSD[testLIndex, repeatCount] = np.reshape(np.dot(testFeatL, betaSin), [-1, ])
countLamda += 1
# Compute all the scores and auc for each repeat time.
aucSin2Abs[0, repeatCount] = metrics.roc_auc_score(Yl, scoreSin2Abs[:, repeatCount])
aucSin2RSD[0, repeatCount] = metrics.roc_auc_score(YlRSD, scoreSin2RSD[:, repeatCount])
indexCmpValidTe = np.where(np.reshape(locSin2Cmp[:, repeatCount], [-1, ]) != 0)[0]
aucSin2Cmp[0, repeatCount] = metrics.roc_auc_score(Yc[indexCmpValidTe],
scoreSin2Cmp[indexCmpValidTe, repeatCount])
return aucSin2Abs, aucSin2RSD, aucSin2Cmp, betaMat, constMat, scoreSin2RSD
dataType = 'auto'
nameBase = '../../../Data/Result/MS_RSD_SVMLog_L1_CV1' + '_' +dataType
dataFile = loadmat('../../../Data/ProbalisticModel/iROP_6DD_1st100_Partition.mat')
aveLabelFile = loadmat('../../../Data/ProbalisticModel/AveLabels.mat')
# alphaWeights = [1.0]
# lamdaWeights = [1e-10]
alphaWeights = [float(sys.argv[1])]
lamdaWeights = [float(sys.argv[2])]
numOfExpts4Lbl = 13
numOfExpts4Cmp = 5
penaltyTimes = 100
lenAlpha = len(alphaWeights)
lenLamda = len(lamdaWeights)
labelPlusSet = dataFile['labelPlus']
labelPrePSet = dataFile['labelPreP']
cmpLabel = dataFile['cmpLabel']
Yc = dataFile['cmpLabel1Column'][0,:]
repeatTimes = int(dataFile['repeatTimes'][0,:])
K = int(dataFile['numOfFolds'][0,:])
RSDTrainPlusPartition = dataFile['RSDTrainPlusPartition']
RSDTestPlusPartition = dataFile['RSDTestPlusPartition']
cmpTrainPlusPartition = dataFile['cmpTrainPlusPartition']
cmpTestPlusPartition = dataFile['cmpTestPlusPartition']
RSDTrainPrePPartition = dataFile['RSDTrainPrePPartition']
RSDTestPrePPartition = dataFile['RSDTestPrePPartition']
cmpTrainPrePPartition = dataFile['cmpTrainPrePPartition']
cmpTestPrePPartition = dataFile['cmpTestPrePPartition']
labelRSDPlus = labelPlusSet[:,-1]
labelRSDPreP = labelPrePSet[:,-1]
labelAvePlus = aveLabelFile['labelAvePlus']
labelAvePreP = aveLabelFile['labelAvePreP']
if dataType == 'manual':
labelFeatOrigin = dataFile['labelFeatManual']
cmpFeatOrigin = dataFile['cmpFeatManual']
elif dataType == 'auto':
labelFeatOrigin = dataFile['labelFeatAuto']
cmpFeatOrigin = dataFile['cmpFeatAuto']
else:
assert('dataType should be manual or auto')
N, d = labelFeatOrigin.shape
M, _ = cmpFeatOrigin.shape
labelFeat = 1 * labelFeatOrigin
cmpFeat = 1 * cmpFeatOrigin
# main script
aucRSD2AbsPlus, aucRSD2RSDPlus, aucRSD2CmpPlus, betaRSDPlus, constRSDPlus, scoreRSD2RSDPlus = TrainSinleExp(alphaWeights[0],
labelRSDPlus,labelPlusSet,
cmpLabel,
RSDTrainPlusPartition,
RSDTestPlusPartition,
cmpTrainPlusPartition,
cmpTestPlusPartition)
aucRSD2AbsPreP, aucRSD2RSDPreP, aucRSD2CmpPreP, betaRSDPreP, constRSDPreP, scoreRSD2RSDPreP = TrainSinleExp(alphaWeights[0],
labelRSDPreP,labelPrePSet,
cmpLabel,
RSDTrainPrePPartition,
RSDTestPrePPartition,
cmpTrainPrePPartition,
cmpTestPrePPartition)
aucAve2AbsPlus, aucAve2RSDPlus, aucAve2CmpPlus, betaAvePlus, constAvePlus, scoreAve2RSDPlus = TrainSinleExp(alphaWeights[0],
labelAvePlus[:,0],labelPlusSet,
cmpLabel,
RSDTrainPlusPartition,
RSDTestPlusPartition,
cmpTrainPlusPartition,
cmpTestPlusPartition)
aucAve2AbsPreP, aucAve2RSDPreP, aucAve2CmpPreP, betaAvePreP, constAvePreP, scoreAve2RSDPreP = TrainSinleExp(alphaWeights[0],
labelAvePreP[:,0],labelPrePSet,
cmpLabel,
RSDTrainPrePPartition,
RSDTestPrePPartition,
cmpTrainPrePPartition,
cmpTestPrePPartition)
outputDict = {'aucRSD2AbsPlus': aucRSD2AbsPlus, 'aucRSD2RSDPlus':aucRSD2RSDPlus, 'aucRSD2CmpPlus':aucRSD2CmpPlus,
'betaRSDPlus':betaRSDPlus, 'constRSDPlus':constRSDPlus, 'scoreRSD2RSDPlus':scoreRSD2RSDPlus,
'aucRSD2AbsPreP':aucRSD2AbsPreP, 'aucRSD2RSDPreP':aucRSD2RSDPreP, 'aucRSD2CmpPreP':aucRSD2CmpPreP,
'betaRSDPreP':betaRSDPreP, 'constRSDPreP':constRSDPreP, 'scoreRSD2RSDPreP':scoreRSD2RSDPreP,
'aucAve2AbsPlus':aucAve2AbsPlus, 'aucAve2RSDPlus':aucAve2RSDPlus, 'aucAve2CmpPlus':aucAve2CmpPlus,
'betaAvePlus':betaAvePlus, 'constAvePlus':constAvePlus, 'scoreAve2RSDPlus':scoreAve2RSDPlus,
'aucAve2AbsPreP':aucAve2AbsPreP, 'aucAve2RSDPreP':aucAve2RSDPreP, 'aucAve2CmpPreP':aucAve2CmpPreP,
'betaAvePreP':betaAvePreP, 'constAvePreP':constAvePreP, 'scoreAve2RSDPreP':scoreAve2RSDPreP}
sio.savemat(nameBase + '_' + str(alphaWeights[0]) + '_' + str(lamdaWeights[0]) + '.mat', outputDict) | [
"scipy.io.loadmat",
"cvxOpt.SVM_Log",
"numpy.zeros",
"sklearn.metrics.roc_auc_score",
"numpy.append",
"numpy.array",
"numpy.reshape",
"numpy.tile",
"numpy.dot"
] | [((5949, 6020), 'scipy.io.loadmat', 'loadmat', (['"""../../../Data/ProbalisticModel/iROP_6DD_1st100_Partition.mat"""'], {}), "('../../../Data/ProbalisticModel/iROP_6DD_1st100_Partition.mat')\n", (5956, 6020), False, 'from scipy.io import loadmat\n'), ((6036, 6091), 'scipy.io.loadmat', 'loadmat', (['"""../../../Data/ProbalisticModel/AveLabels.mat"""'], {}), "('../../../Data/ProbalisticModel/AveLabels.mat')\n", (6043, 6091), False, 'from scipy.io import loadmat\n'), ((1930, 1975), 'numpy.reshape', 'np.reshape', (['labelAbs[:, :-1]', '[-1]'], {'order': '"""F"""'}), "(labelAbs[:, :-1], [-1], order='F')\n", (1940, 1975), True, 'import numpy as np\n'), ((2176, 2219), 'numpy.zeros', 'np.zeros', (['[N * numOfExpts4Lbl, repeatTimes]'], {}), '([N * numOfExpts4Lbl, repeatTimes])\n', (2184, 2219), True, 'import numpy as np\n'), ((2279, 2305), 'numpy.zeros', 'np.zeros', (['[N, repeatTimes]'], {}), '([N, repeatTimes])\n', (2287, 2305), True, 'import numpy as np\n'), ((2367, 2414), 'numpy.zeros', 'np.zeros', (['[M * numOfExpts4Cmp, repeatTimes * K]'], {}), '([M * numOfExpts4Cmp, repeatTimes * K])\n', (2375, 2414), True, 'import numpy as np\n'), ((2452, 2499), 'numpy.zeros', 'np.zeros', (['[M * numOfExpts4Cmp, repeatTimes * K]'], {}), '([M * numOfExpts4Cmp, repeatTimes * K])\n', (2460, 2499), True, 'import numpy as np\n'), ((2536, 2562), 'numpy.zeros', 'np.zeros', (['[1, repeatTimes]'], {}), '([1, repeatTimes])\n', (2544, 2562), True, 'import numpy as np\n'), ((2580, 2606), 'numpy.zeros', 'np.zeros', (['[1, repeatTimes]'], {}), '([1, repeatTimes])\n', (2588, 2606), True, 'import numpy as np\n'), ((2624, 2650), 'numpy.zeros', 'np.zeros', (['[1, repeatTimes]'], {}), '([1, repeatTimes])\n', (2632, 2650), True, 'import numpy as np\n'), ((2665, 2695), 'numpy.zeros', 'np.zeros', (['[repeatTimes * K, d]'], {}), '([repeatTimes * K, d])\n', (2673, 2695), True, 'import numpy as np\n'), ((2711, 2741), 'numpy.zeros', 'np.zeros', (['[repeatTimes * K, 1]'], {}), '([repeatTimes * K, 1])\n', (2719, 2741), True, 'import numpy as np\n'), ((5340, 5395), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Yl', 'scoreSin2Abs[:, repeatCount]'], {}), '(Yl, scoreSin2Abs[:, repeatCount])\n', (5361, 5395), False, 'from sklearn import metrics\n'), ((5433, 5491), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['YlRSD', 'scoreSin2RSD[:, repeatCount]'], {}), '(YlRSD, scoreSin2RSD[:, repeatCount])\n', (5454, 5491), False, 'from sklearn import metrics\n'), ((5620, 5710), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Yc[indexCmpValidTe]', 'scoreSin2Cmp[indexCmpValidTe, repeatCount]'], {}), '(Yc[indexCmpValidTe], scoreSin2Cmp[indexCmpValidTe,\n repeatCount])\n', (5641, 5710), False, 'from sklearn import metrics\n'), ((3146, 3175), 'numpy.reshape', 'np.reshape', (['trainLIndex', '[-1]'], {}), '(trainLIndex, [-1])\n', (3156, 3175), True, 'import numpy as np\n'), ((3201, 3229), 'numpy.reshape', 'np.reshape', (['testLIndex', '[-1]'], {}), '(testLIndex, [-1])\n', (3211, 3229), True, 'import numpy as np\n'), ((3256, 3285), 'numpy.reshape', 'np.reshape', (['trainCIndex', '[-1]'], {}), '(trainCIndex, [-1])\n', (3266, 3285), True, 'import numpy as np\n'), ((3311, 3339), 'numpy.reshape', 'np.reshape', (['testCIndex', '[-1]'], {}), '(testCIndex, [-1])\n', (3321, 3339), True, 'import numpy as np\n'), ((3649, 3661), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3657, 3661), True, 'import numpy as np\n'), ((3915, 3955), 'numpy.tile', 'np.tile', (['trainFeatC', '[numOfExpts4Cmp, 1]'], {}), '(trainFeatC, [numOfExpts4Cmp, 1])\n', (3922, 3955), True, 'import numpy as np\n'), ((3738, 3788), 'numpy.append', 'np.append', (['YtrainExp13C', 'labelCmp[trainCIndex, eC]'], {}), '(YtrainExp13C, labelCmp[trainCIndex, eC])\n', (3747, 3788), True, 'import numpy as np\n'), ((4182, 4251), 'cvxOpt.SVM_Log', 'SVM_Log', (['XtrainSinL', 'YtrainSinL', 'XtrainSinC', 'YtrainSinC', 'alpha', 'lamda'], {}), '(XtrainSinL, YtrainSinL, XtrainSinC, YtrainSinC, alpha, lamda)\n', (4189, 4251), False, 'from cvxOpt import SVM_Log\n'), ((4274, 4291), 'numpy.array', 'np.array', (['betaSin'], {}), '(betaSin)\n', (4282, 4291), True, 'import numpy as np\n'), ((4319, 4337), 'numpy.array', 'np.array', (['constSin'], {}), '(constSin)\n', (4327, 4337), True, 'import numpy as np\n'), ((4438, 4457), 'numpy.array', 'np.array', (['betaSin.T'], {}), '(betaSin.T)\n', (4446, 4457), True, 'import numpy as np\n'), ((5174, 5200), 'numpy.dot', 'np.dot', (['testFeatL', 'betaSin'], {}), '(testFeatL, betaSin)\n', (5180, 5200), True, 'import numpy as np\n'), ((5527, 5571), 'numpy.reshape', 'np.reshape', (['locSin2Cmp[:, repeatCount]', '[-1]'], {}), '(locSin2Cmp[:, repeatCount], [-1])\n', (5537, 5571), True, 'import numpy as np\n'), ((4721, 4747), 'numpy.dot', 'np.dot', (['testFeatL', 'betaSin'], {}), '(testFeatL, betaSin)\n', (4727, 4747), True, 'import numpy as np\n'), ((4940, 4966), 'numpy.dot', 'np.dot', (['testFeatC', 'betaSin'], {}), '(testFeatC, betaSin)\n', (4946, 4966), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from numpy import array, arange
from pyNastran.gui.qt_version import qt_version
from qtpy import QtCore, QtGui
from qtpy.QtWidgets import (
QDialog, QPushButton, QApplication,
QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem,
)
if qt_version == 4:
QString = QtCore.QString
elif qt_version == 5:
QString = str
else:
raise NotImplementedError('qt_version = %r' % qt_version)
from pyNastran.gui.menus.groups_modify import Group
class GroupsPostView(QDialog):
"""
+------------------------+
| Groups : Post/Delete |
+------------------------+
| |
| check1 Name1 |
| check2 Name2 |
| check3 Name3 |
| |
| SetAsMain |
| Apply OK Close |
+------------------------+
"""
def __init__(self, data, win_parent=None):
self.win_parent = win_parent
#Init the base class
groups = data['groups']
inames = data['inames']
self.imain = data['imain']
self.names = [group.name for group in groups]
self.white = (255, 255, 255)
self.light_grey = (211, 211, 211)
self.inames = inames
self.shown_set = data['shown']
self.deleted_groups = set()
#self.inames = argsort(self.names)
#print('inames =', inames)
anames = array(self.names)
for iname, name in enumerate(anames[self.inames]):
print('name[%s] = %r' % (iname, name))
# ignore these...
#self._default_name = data['name']
#self._default_coords = data['coords']
#self._default_elements = data['elements']
#self._default_color = data['color']
#self.coords_pound = data['coords_pound']
#self.elements_pound = data['elements_pound']
#self._default_is_discrete = data['is_discrete']
self.out_data = data
QDialog.__init__(self, win_parent)
#self.setupUi(self)
self.setWindowTitle('Groups: Post/View')
self.create_widgets()
self.create_layout()
self.set_connections()
#self.show()
def create_widgets(self):
# main/delete/supergroup
self.set_as_main_button = QPushButton("Set As Main")
self.create_super_group_button = QPushButton("Create Super Group")
self.delete_groups_button = QPushButton("Delete Groups")
self.revert_groups_button = QPushButton("Revert Groups")
self.show_groups_button = QPushButton("Show Groups")
self.hide_groups_button = QPushButton("Hide Groups")
# closing
self.apply_button = QPushButton("Apply")
self.ok_button = QPushButton("OK")
self.cancel_button = QPushButton("Cancel")
#table
self.table = QTableWidget()
self.checks = []
self.names_text = []
bold = QtGui.QFont()
bold.setBold(True)
bold.setItalic(True)
bold.setWeight(75)
anames = array(self.names)
for iname, name in enumerate(anames[self.inames]):
check = QTableWidgetItem()
check.setCheckState(False)
# TODO: create right click menu ???
name_text = QTableWidgetItem(str(name))
if iname == self.imain:
name_text.setFont(bold)
self.shown_set.add(iname)
check.setCheckState(2)
name_text.setBackground(QtGui.QColor(*self.light_grey))
elif iname in self.shown_set:
name_text.setBackground(QtGui.QColor(*self.light_grey))
self.checks.append(check)
self.names_text.append(name_text)
def create_layout(self):
nrows = len(self.names)
table = self.table
table.setRowCount(nrows)
table.setColumnCount(2)
headers = [QString('Operate On'), QString('Name')]
table.setHorizontalHeaderLabels(headers)
header = table.horizontalHeader()
header.setStretchLastSection(True)
#table.setAlternatingRowColors(True)
#header = table.verticalHeader()
#header.setStretchLastSection(True)
#table.resize(400, 250)
#heighti = table.rowHeight(0)
#total_height = nrows * heighti
#table.setMaximumHeight(total_height)
#table.resize(total_height, None)
#for iname, name in enumerate(self.names[self.inames]):
#print('name[%s] = %r' % (iname, name))
for iname in self.inames:
check = self.checks[iname]
name_text = self.names_text[iname]
# row, col, value
table.setItem(iname, 0, check)
table.setItem(iname, 1, name_text)
table.resizeRowsToContents()
#table.horizontalHeaderItem(1).setTextAlignment(QtCore.AlignHCenter)
#= QVBoxLayout()
ok_cancel_box = QHBoxLayout()
ok_cancel_box.addWidget(self.apply_button)
ok_cancel_box.addWidget(self.ok_button)
ok_cancel_box.addWidget(self.cancel_button)
vbox = QVBoxLayout()
vbox.addWidget(table)
vbox.addWidget(self.set_as_main_button)
#vbox.addWidget(self.create_super_group_button)
vbox.addStretch()
vbox.addWidget(self.show_groups_button)
vbox.addWidget(self.hide_groups_button)
vbox.addStretch()
vbox.addWidget(self.delete_groups_button)
vbox.addWidget(self.revert_groups_button)
vbox.addStretch()
vbox.addStretch()
vbox.addLayout(ok_cancel_box)
self.setLayout(vbox)
def set_connections(self):
"""creates the actions for the menu"""
self.set_as_main_button.clicked.connect(self.on_set_as_main)
self.delete_groups_button.clicked.connect(self.on_delete_groups)
self.revert_groups_button.clicked.connect(self.on_revert_groups)
self.show_groups_button.clicked.connect(self.on_show_groups)
self.hide_groups_button.clicked.connect(self.on_hide_groups)
self.create_super_group_button.clicked.connect(self.on_create_super_group)
self.apply_button.clicked.connect(self.on_apply)
self.ok_button.clicked.connect(self.on_ok)
self.cancel_button.clicked.connect(self.on_cancel)
def closeEvent(self, event):
event.accept()
@property
def nrows(self):
return self.table.rowCount()
def on_hide_groups(self):
self._set_highlight(self.white)
def on_show_groups(self):
self._set_highlight(self.light_grey)
def _set_highlight(self, color):
for irow in range(self.nrows):
check = self.checks[irow]
is_checked = check.checkState()
# 0 - unchecked
# 1 - partially checked (invalid)
# 2 - checked
if is_checked:
name_text = self.names_text[irow]
name_text.setBackground(QtGui.QColor(*color))
def on_delete_groups(self):
for irow in range(self.nrows):
check = self.checks[irow]
is_checked = check.checkState()
# 0 - unchecked
# 1 - partially checked (invalid)
# 2 - checked
if irow == 0 and is_checked:
# TODO: change this to a log
print('error deleting group ALL...change this to a log')
#self.window_parent.log
return
if is_checked:
self.table.hideRow(irow)
self.deleted_groups.add(irow)
check.setCheckState(0)
if self.imain > 0 and self.shown_set == set([0]):
bold = QtGui.QFont()
bold.setBold(True)
bold.setItalic(True)
self.imain = 0
irow = 0
check = self.checks[irow]
name_text = self.names_texts[irow]
name_text.setFont(bold)
name_text.setBackground(QtGui.QColor(*self.light_grey))
def on_revert_groups(self):
for irow in range(self.nrows):
self.table.showRow(irow)
self.deleted_groups = set()
def on_create_super_group(self):
inames = [iname for iname, check in enumerate(self.checks)
if bool(check.checkState())]
if not len(inames):
# TODO: add logging
print('nothing is checked...')
return
if inames[0] == 0:
# TODO: add logging
print("cannot include 'ALL' in supergroup...")
return
name = 'SuperGroup'
# popup gui and get a name
irow = self.table.rowCount()
self.table.insertRow(irow)
check = QTableWidgetItem()
check.setCheckState(False)
name_text = QTableWidgetItem(str(name))
self.names.extend(name)
self.names_text.append(name_text)
self.checks.append(check)
self.table.setItem(irow, 0, check)
self.table.setItem(irow, 1, name_text)
def on_set_as_main(self):
bold = QtGui.QFont()
bold.setBold(True)
bold.setItalic(True)
normal = QtGui.QFont()
normal.setBold(False)
normal.setItalic(False)
imain = None
imain_set = False
for irow in range(self.nrows):
check = self.checks[irow]
name_text = self.names_text[irow]
is_checked = check.checkState()
# 0 - unchecked
# 1 - partially checked (invalid)
# 2 - checked
if is_checked and not imain_set:
# TODO: change this to a log
#self.window_parent.log
imain_set = True
imain = irow
name_text.setFont(bold)
name_text.setBackground(QtGui.QColor(*self.light_grey))
self.shown_set.add(irow)
elif irow == self.imain:
name_text.setFont(normal)
if irow == 0:
name_text.setBackground(QtGui.QColor(*self.white))
if irow in self.shown_set:
self.shown_set.remove(irow)
elif imain == 0:
name_text.setBackground(QtGui.QColor(*self.white))
self.shown_set.remove(imain)
self.imain = imain
def get_main_group(self):
return self.imain
def get_shown_group(self):
return self.shown_set
def get_deleted_groups(self):
return self.deleted_groups
def on_validate(self):
flag0 = flag1 = flag2 = True
main_group_id = self.get_main_group()
shown_groups_ids = self.get_shown_group()
deleted_group_ids = self.get_deleted_groups()
if flag0 and flag1 and flag2:
self.out_data['imain'] = main_group_id
self.out_data['shown'] = shown_groups_ids
self.out_data['remove'] = deleted_group_ids
self.out_data['clicked_ok'] = True
return True
return False
def on_apply(self):
passed = self.on_validate()
if passed:
self.win_parent.on_post_group(self.out_data)
def on_ok(self):
passed = self.on_validate()
if passed:
self.close()
#self.destroy()
def on_cancel(self):
self.close()
def on_post_group(data):
print('hi')
def main(): # pragma: no cover
# kills the program when you hit Cntl+C from the command line
# doesn't save the current state as presumably there's been an error
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
# Someone is launching this directly
# Create the QApplication
app = QApplication(sys.argv)
app.on_post_group = on_post_group
group1 = Group('this is a really long name', [1, 2, 3], 4)
group2 = Group('frog', [1, 3], 4)
group3 = Group('dog', [1, 2, 3, 5], 4)
all_group = Group('ALL', [1, 2, 3, 34], 4)
print(group3)
groups = [
all_group, group1, group2, group3,
all_group, group1, group2, group3,
all_group, group1, group2, group3,
]
#The Main window
data = {
'groups' : groups,
'inames' : arange(len(groups)),
'imain' : 0,
'shown' : set([1, 2, 3]),
'remove' : None,
}
win_parent = None
main_window = GroupsPostView(data, win_parent=win_parent)
main_window.show()
# Enter the main loop
app.exec_()
if __name__ == '__main__': # pragma: no cover
main()
| [
"qtpy.QtGui.QFont",
"qtpy.QtWidgets.QHBoxLayout",
"pyNastran.gui.menus.groups_modify.Group",
"qtpy.QtWidgets.QTableWidget",
"qtpy.QtWidgets.QVBoxLayout",
"qtpy.QtGui.QColor",
"numpy.array",
"qtpy.QtWidgets.QPushButton",
"qtpy.QtWidgets.QTableWidgetItem",
"signal.signal",
"qtpy.QtWidgets.QApplica... | [((11589, 11633), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (11602, 11633), False, 'import signal\n'), ((11731, 11753), 'qtpy.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (11743, 11753), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((11806, 11855), 'pyNastran.gui.menus.groups_modify.Group', 'Group', (['"""this is a really long name"""', '[1, 2, 3]', '(4)'], {}), "('this is a really long name', [1, 2, 3], 4)\n", (11811, 11855), False, 'from pyNastran.gui.menus.groups_modify import Group\n'), ((11869, 11893), 'pyNastran.gui.menus.groups_modify.Group', 'Group', (['"""frog"""', '[1, 3]', '(4)'], {}), "('frog', [1, 3], 4)\n", (11874, 11893), False, 'from pyNastran.gui.menus.groups_modify import Group\n'), ((11907, 11936), 'pyNastran.gui.menus.groups_modify.Group', 'Group', (['"""dog"""', '[1, 2, 3, 5]', '(4)'], {}), "('dog', [1, 2, 3, 5], 4)\n", (11912, 11936), False, 'from pyNastran.gui.menus.groups_modify import Group\n'), ((11953, 11983), 'pyNastran.gui.menus.groups_modify.Group', 'Group', (['"""ALL"""', '[1, 2, 3, 34]', '(4)'], {}), "('ALL', [1, 2, 3, 34], 4)\n", (11958, 11983), False, 'from pyNastran.gui.menus.groups_modify import Group\n'), ((1417, 1434), 'numpy.array', 'array', (['self.names'], {}), '(self.names)\n', (1422, 1434), False, 'from numpy import array, arange\n'), ((1960, 1994), 'qtpy.QtWidgets.QDialog.__init__', 'QDialog.__init__', (['self', 'win_parent'], {}), '(self, win_parent)\n', (1976, 1994), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2281, 2307), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Set As Main"""'], {}), "('Set As Main')\n", (2292, 2307), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2349, 2382), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Create Super Group"""'], {}), "('Create Super Group')\n", (2360, 2382), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2419, 2447), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Delete Groups"""'], {}), "('Delete Groups')\n", (2430, 2447), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2484, 2512), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Revert Groups"""'], {}), "('Revert Groups')\n", (2495, 2512), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2548, 2574), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Show Groups"""'], {}), "('Show Groups')\n", (2559, 2574), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2609, 2635), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Hide Groups"""'], {}), "('Hide Groups')\n", (2620, 2635), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2683, 2703), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Apply"""'], {}), "('Apply')\n", (2694, 2703), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2729, 2746), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""OK"""'], {}), "('OK')\n", (2740, 2746), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2776, 2797), 'qtpy.QtWidgets.QPushButton', 'QPushButton', (['"""Cancel"""'], {}), "('Cancel')\n", (2787, 2797), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2835, 2849), 'qtpy.QtWidgets.QTableWidget', 'QTableWidget', ([], {}), '()\n', (2847, 2849), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((2920, 2933), 'qtpy.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2931, 2933), False, 'from qtpy import QtCore, QtGui\n'), ((3034, 3051), 'numpy.array', 'array', (['self.names'], {}), '(self.names)\n', (3039, 3051), False, 'from numpy import array, arange\n'), ((4914, 4927), 'qtpy.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (4925, 4927), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((5095, 5108), 'qtpy.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (5106, 5108), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((8710, 8728), 'qtpy.QtWidgets.QTableWidgetItem', 'QTableWidgetItem', ([], {}), '()\n', (8726, 8728), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((9059, 9072), 'qtpy.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (9070, 9072), False, 'from qtpy import QtCore, QtGui\n'), ((9147, 9160), 'qtpy.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (9158, 9160), False, 'from qtpy import QtCore, QtGui\n'), ((3131, 3149), 'qtpy.QtWidgets.QTableWidgetItem', 'QTableWidgetItem', ([], {}), '()\n', (3147, 3149), False, 'from qtpy.QtWidgets import QDialog, QPushButton, QApplication, QHBoxLayout, QVBoxLayout, QTableWidget, QTableWidgetItem\n'), ((7682, 7695), 'qtpy.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (7693, 7695), False, 'from qtpy import QtCore, QtGui\n'), ((7966, 7996), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['*self.light_grey'], {}), '(*self.light_grey)\n', (7978, 7996), False, 'from qtpy import QtCore, QtGui\n'), ((3487, 3517), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['*self.light_grey'], {}), '(*self.light_grey)\n', (3499, 3517), False, 'from qtpy import QtCore, QtGui\n'), ((6952, 6972), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['*color'], {}), '(*color)\n', (6964, 6972), False, 'from qtpy import QtCore, QtGui\n'), ((9811, 9841), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['*self.light_grey'], {}), '(*self.light_grey)\n', (9823, 9841), False, 'from qtpy import QtCore, QtGui\n'), ((3601, 3631), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['*self.light_grey'], {}), '(*self.light_grey)\n', (3613, 3631), False, 'from qtpy import QtCore, QtGui\n'), ((10037, 10062), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['*self.white'], {}), '(*self.white)\n', (10049, 10062), False, 'from qtpy import QtCore, QtGui\n'), ((10240, 10265), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['*self.white'], {}), '(*self.white)\n', (10252, 10265), False, 'from qtpy import QtCore, QtGui\n')] |
#!/usr/bin/env python
VERSION_STR = 'TGSA v0.99 18-Apr-2013'
import os
from math import sin,cos,atan,pi,log
import numpy as np
from scipy.ndimage.interpolation import rotate
from scipy.signal import medfilt
import pyfits
import ConfigParser
import wx
import wx.html as html
import wx.grid as grid
import wx.aui as aui
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
import matplotlib.pyplot as plt
import matplotlib.font_manager
import matplotlib.figure
import matplotlib.gridspec as gridspec
# MKS Conversions
cm = 1.e-2
mm = 1.e-3
micron = 1.e-6
nm = 1.e-9
deg = pi/180.
arcmin = deg/60.
arcsec = deg/3600.
# User-configurable program settings
DEFAULT_APP_WIDTH=900 # default application window width
DEFAULT_APP_HEIGHT=650 # default application window height
PIXEL_START = 400 # start of spectrum from zeroth order
PIXEL_END = 1200 # end of spectrum from zeroth order
PIXEL_SCALE = 1.5 # nm to pixel scale
DEFAULT_WIDTH = 30 # default spectrum width in pixels
DEFAULT_WMIN = 350 # lower wavelength limit to plot
DEFAULT_WMAX = 750 # upper wavelength limit to plot
DEFAULT_WSPLIT = '483.3, 616.7' # default wavelengths to stitch
STITCH_WIDTH = 3 # pixel window to use for scaling when stitching
REDSHIFT_Z = 0.0 # default redshift to apply
MEDAVG_WIDTH = 1 # default median averaging width
ZOOM_MAX = 4.0 # max zoom multiplier (power of two)
ZOOM_MIN = 0.03125 # min zoom multiplier (power of two)
TILT_INC = 0.25*deg # spectrum tilt increment, in degrees
PLOT_BALMER = True
PLOT_HELIUM = False
PLOT_METALLIC = False
PLOT_TELLURIC = False
# Telescope and grating parameters
f_ratio = 14 # Telescope focal length
Diam = 37*cm # Telescope diameter
L = 38.8*mm # Distance from grating to CCD sensor
lpmm = 600/mm # Grating lines per mm
npixel = 2048 # Number of pixels along dispersion direction
pixel = 18*micron # Pixel size
# Derived quantities
d_g = 1/lpmm
FL = Diam*f_ratio
x_ctr = npixel/2
myEVT_BROADCAST = wx.NewEventType()
EVT_BROADCAST = wx.PyEventBinder(myEVT_BROADCAST, 1)
class MsgEvent(wx.PyCommandEvent):
def __init__(self, evtType, id):
wx.PyCommandEvent.__init__(self, evtType, id)
msg = None
def setMsg(self, msg):
self.msg = msg
def getMsg(self):
return self.msg
class FigPanel(wx.Panel):
def __init__(self, *args, **kwargs):
super(FigPanel, self).__init__(*args, **kwargs)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.fig = plt.Figure()
self.canvas = FigureCanvas(self, -1, self.fig)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.EXPAND)
self.SetSizer(self.sizer)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Hide()
self.wmin = DEFAULT_WMIN
self.wmax = DEFAULT_WMAX
def plot(self, wave=[], intensity_wt=[]):
self.fig.clear()
if len(wave)>0:
self.wave = wave
self.intensity_wt = intensity_wt
# run Median filter if requested
if MEDAVG_WIDTH != 1:
ampl = medfilt(self.intensity_wt,MEDAVG_WIDTH)
else:
ampl = self.intensity_wt
if hasattr(self, 'spec'):
gs = gridspec.GridSpec(2, 1,height_ratios=[3,1])
else:
gs = gridspec.GridSpec(1,1)
ax1 = self.fig.add_subplot(gs[0])
ax1.plot(self.wave,ampl, 'r-')
ax1.set_xlabel('Wavelength [nm]')
ax1.set_ylabel('Normalized intensity')
# Set plot limits
ax1.set_xlim(self.wmin, self.wmax)
ymin = np.min(self.intensity_wt); ymax = 1.2*np.max(self.intensity_wt)
ax1.set_ylim(ymin,ymax)
# Plot reference lines if requested
if PLOT_BALMER: plt_Balmer(ax1, REDSHIFT_Z)
if PLOT_HELIUM: plt_Helium(ax1, REDSHIFT_Z)
if PLOT_METALLIC: plt_Metals(ax1, REDSHIFT_Z)
if PLOT_TELLURIC: plt_Telluric(ax1, REDSHIFT_Z)
if PLOT_BALMER or PLOT_HELIUM or PLOT_METALLIC or PLOT_TELLURIC:
ax1.legend(loc='upper right',prop={'size':10})
# Add title
if not hasattr(self, 'title'):
title = 'Rigel TGS Spectrum Plot'
ax1.set_title(title, fontsize=12)
else:
ax1.set_title(self.title, fontsize=12)
ax1.grid(True)
# Strip Spectrum
if hasattr(self, 'spec'):
ax2 = self.fig.add_subplot(gs[1])
ax2.imshow(self.spec)
ax2.set_xlabel("Pixel offset from zeroth order (+%d)" % (PIXEL_START))
ax2.get_yaxis().set_visible(False)
# Add version info in lower right corner
ax1.text(0.85,-0.5,VERSION_STR,ha ='left',fontsize=8, transform = ax1.transAxes)
self.canvas.draw()
self.is_saved = False
class ImgViewer(wx.SplitterWindow):
def __init__(self, path, data, hdr, *args, **kwargs):
super(ImgViewer, self).__init__(*args, **kwargs)
self.scroll = wx.ScrolledWindow(self)
self.info = wx.grid.Grid(self)
self.info.CreateGrid(1,3)
self.info.SetColLabelSize(0)
self.info.SetRowLabelSize(0)
self.SplitVertically(self.info, self.scroll, 200)
self.Unsplit(self.info)
# Load the FITS image
self.fname = os.path.basename(path)
self.data = data
self.hdr = hdr
(w, h) = self.data.shape
self.imin = np.min(self.data)
self.imax = self.imin + 16.0*np.std(self.data)
b = np.clip( 255.99*(self.data-self.imin)/(self.imax-self.imin),0,255.99 )
self.img = wx.EmptyImage(w, h)
self.img.SetData( np.dstack((b,b,b)).astype('uint8').tostring() )
self.bmp = wx.BitmapFromImage(self.img)
i=0
for k in hdr.keys():
if hdr.get(k,False):
while i>=self.info.GetNumberRows():
self.info.AppendRows()
self.info.SetCellValue(i,0,"%s" % k)
self.info.SetCellValue(i,1,"%s" % (hdr.get(k)) )
if hdr.comments[k] != '':
self.info.SetCellValue(i,2,"%s" % (hdr.comments[k]) )
for j in range(3): self.info.SetReadOnly(i,j,True)
i+=1
self.info.AutoSize()
# Object data
self.zoomLevel = 1.0
self.was_zoomed=False
self.alignDegrees = 0.0
xymax = np.unravel_index(np.argmax(self.data),self.data.shape)
self.specZero = wx.Point(xymax[1],xymax[0])
self.specWidth = DEFAULT_WIDTH
# Event bindings
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.onLeftDown)
self.scroll.Bind(wx.EVT_PAINT, self.onPaint)
self.scroll.Bind(wx.EVT_MOTION, self.onMotion)
self.scroll.Bind(wx.EVT_LEAVE_WINDOW, self.onLeave)
self.scroll.Bind(wx.EVT_IDLE, self.zoom_redraw)
self.scroll.SetVirtualSize((w,h))
self.scroll.SetScrollRate(20,20)
x,y=self.scroll.CalcScrolledPosition(xymax[1]-20,xymax[0]-20)
dx,dy=self.scroll.GetScrollPixelsPerUnit()
self.scroll.Scroll(x/dx,y/dy)
def onMotion(self, event):
pos = event.GetPosition()
(x, y) = self.scroll.CalcUnscrolledPosition(pos.x, pos.y)
info = "(%d, %d) %d%%" % (x/self.zoomLevel, y/self.zoomLevel, int(self.zoomLevel*100))
event = MsgEvent(myEVT_BROADCAST, self.GetId())
event.setMsg(info)
self.GetEventHandler().ProcessEvent(event)
def onLeave(self, event):
event = MsgEvent(myEVT_BROADCAST, self.GetId())
event.setMsg('')
self.GetEventHandler().ProcessEvent(event)
def onLeftDown(self, event):
pos = event.GetPosition()
(x, y) = self.scroll.CalcUnscrolledPosition(pos.x, pos.y)
self.specZero = wx.Point(x/self.zoomLevel, y/self.zoomLevel)
self.scroll.Refresh()
def onPaint(self, event):
dc = wx.PaintDC(self.scroll)
self.scroll.DoPrepareDC(dc)
self.draw(dc)
def adjustImage(self, min=-1, max=-1):
if min<0: min=self.imin
if max<0: max=self.imax
if max==min:max+=1.0
b = np.clip( 256.0*(self.data-min)/(max-min),0,255.99 )
self.img.SetData( np.dstack((b,b,b)).astype('uint8').tostring() )
self.zoom(mult=1)
def draw(self, dc):
dc.DrawBitmap(self.bmp,0,0,False)
dc.SetBrush(wx.Brush('#000000', wx.TRANSPARENT))
dc.SetPen(wx.Pen('RED', 1, wx.SOLID))
x = self.specZero.x*self.zoomLevel
y = self.specZero.y*self.zoomLevel
cosa = cos(-self.alignDegrees)
sina = sin(-self.alignDegrees)
dw = 0.5*self.specWidth*self.zoomLevel
p0 = PIXEL_START*self.zoomLevel
pf = PIXEL_END*self.zoomLevel
dc.DrawLine(x-10,y,x+10,y)
dc.DrawLine(x,y-10,x,y+10)
sel = [ wx.Point( x + p0*cosa + dw*sina, y + p0*sina - dw*cosa ) ]
sel += [ wx.Point( x + p0*cosa - dw*sina, y + p0*sina + dw*cosa ) ]
sel += [ wx.Point( x + pf*cosa - dw*sina, y + pf*sina + dw*cosa ) ]
sel += [ wx.Point( x + pf*cosa + dw*sina, y + pf*sina - dw*cosa ) ]
sel += [ wx.Point( x + p0*cosa + dw*sina, y + p0*sina - dw*cosa ) ]
dc.DrawLines(sel)
def zoom(self, mult=1.0, zoom=1.0):
if mult != 0:
zoom = 2**(int(log(self.zoomLevel*mult,2)))
if zoom>ZOOM_MAX:
zoom=ZOOM_MAX
if zoom<ZOOM_MIN:
zoom=ZOOM_MIN
w,h = self.img.GetWidth(), self.img.GetHeight()
w *= zoom
h *= zoom
self.bmp = wx.BitmapFromImage(self.img.Scale(w, h))
self.scroll.SetVirtualSize((w,h))
self.scroll.SetScrollRate(20,20)
self.scroll.Refresh(True)
info = "Zoom level %d%%" % (int(self.zoomLevel*100))
event = MsgEvent(myEVT_BROADCAST, self.GetId())
event.setMsg(info)
self.GetEventHandler().ProcessEvent(event)
self.zoomLevel=zoom
self.was_zoomed=True
def zoom_redraw(self, event):
# This is a hack for Mac OS X - selection box shows up in
# old spot - maybe scrolledwindow virtual size isn't updating
# until idle?
if self.was_zoomed:
self.Refresh(True)
self.was_zoomed = False
def zoom_fit(self):
vw,vh = self.scroll.GetClientSize()
w,h = self.img.GetWidth(), self.img.GetHeight()
z = 1.*vw/w
if 1.*vh/h<z: z=1.*vh/h
if z>1.0: z=1.0
self.zoom(0, z)
def sel_width(self, inc):
self.specWidth += inc
if self.specWidth<1: self.specWidth=1
self.Refresh()
def sel_tilt(self, inc):
self.alignDegrees += inc
if self.alignDegrees<-60: self.alignDegrees=-60
if self.alignDegrees> 60: self.alignDegrees= 60
self.Refresh()
def sel_nudge(self, dir):
(dx, dy) = dir
self.specZero.x += dx
self.specZero.y += dy
w,h = self.img.GetWidth(), self.img.GetHeight()
if self.specZero.x > w: self.specZero.x = w
if self.specZero.x < 0: self.specZero.x = 0
if self.specZero.y > h: self.specZero.y = h
if self.specZero.y < 0: self.specZero.y = 0
self.Refresh()
def extract(self):
# Define subimage containing dispersed spectrum
xmin = PIXEL_START + self.specZero.x
xmax = PIXEL_END + self.specZero.x
ymin = self.specZero.y - self.specWidth/2.
ymax = self.specZero.y + self.specWidth/2.
w, h = self.data.shape
if xmin>=w: return
if xmax>=w: xmax=w-1
if ymin<0: ymin=0
if ymax>=h: ymax=h-1
if self.alignDegrees != 0.0: im = rotate(self.data, -self.alignDegrees/deg)
else: im = np.copy(self.data)
spec = im[ymin:ymax,xmin:xmax]
# Get 'off source' spectrum to find background
yoff = self.specWidth
spec_off = im[ymin+yoff:ymax+yoff,xmin:xmax]
# Subtract median background from each pixel
xmed = np.median(spec_off,axis=0)
spec -= xmed
spec_sum = np.sum(spec,axis=0)
# Fill arrays for plotting, using efficiency curves to correct for sensitivity vs wavelength
npts = xmax - xmin
wave = np.zeros(npts); intensity = np.zeros(npts)
intensity_wt = np.zeros(npts)
for n in range(npts):
wave[n] = f_lambda(xmin-self.specZero.x+n,self.specZero.x)
wt = ccd_sens(wave[n])*grating_sens(wave[n])
intensity[n] = float(spec_sum[n])
intensity_wt[n] = intensity[n]/wt
intensity_wt /= max(intensity_wt)
return (wave, intensity_wt, spec)
class MainWindow(wx.Frame):
def __init__(self, filename='noname.txt'):
super(MainWindow, self).__init__(None)
super(MainWindow, self).SetTitle('TGS Analyzer')
self.SetSize(size=wx.Size(DEFAULT_APP_WIDTH,DEFAULT_APP_HEIGHT))
# _icon = wx.Icon('tgsa.ico', wx.BITMAP_TYPE_ICO)
# self.SetIcon(_icon)
# Make interior window components
self.tabs = aui.AuiNotebook(self)
self.CreateStatusBar()
# Event Bindings
self.Bind(EVT_BROADCAST, self.onBroadcast)
self.Bind(wx.EVT_CLOSE, self.onExit)
self.Bind(wx.EVT_CHAR_HOOK, self.onKeyPress)
self.tabs.Bind(aui.EVT_AUINOTEBOOK_BUTTON, self.onClose)
# Create Menus
fileMenu = wx.Menu()
tmp = fileMenu.Append(wx.ID_ANY,'X')
tmp.SetBitmap(wx.EmptyBitmap(1,1))
item = fileMenu.Append(wx.ID_OPEN, '&Open\tCtrl+O', 'Open FITS spectrum file(s)')
self.Bind(wx.EVT_MENU, self.onOpen, item)
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN))
fileMenu.Remove(tmp.GetId()) # deals with wxPython bug where first menu item with a bitmap doesn't show up
item = fileMenu.Append(wx.ID_SAVEAS, '&Save Plot\tCtrl+S', 'Save spectrum plot')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE))
self.Bind(wx.EVT_MENU, self.onSave, item)
item = fileMenu.Append(wx.ID_ANY, '&Close\tCtrl+W', 'Close current window')
self.Bind(wx.EVT_MENU, self.onClose, item)
item = fileMenu.Append(wx.ID_ANY, '&Import Data', 'Load spectrum data from a text file')
self.Bind(wx.EVT_MENU, self.onImport, item)
item = fileMenu.Append(wx.ID_ANY, 'Export &Data', 'Export spectrum data as text')
self.Bind(wx.EVT_MENU, self.onExport, item)
fileMenu.AppendSeparator()
item = fileMenu.Append(wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Terminate the program')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_QUIT))
self.Bind(wx.EVT_MENU, self.onExit, item)
selMenu = wx.Menu()
tmp = selMenu.Append(wx.ID_ANY,'X')
tmp.SetBitmap(wx.EmptyBitmap(1,1))
item = selMenu.Append(wx.ID_ANY, '&Decrease width\tCtrl+Left', 'Decrease selection width')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_GO_BACK))
selMenu.Remove(tmp.GetId())
self.Bind(wx.EVT_MENU, lambda x: self.onWidth(x, -1), item)
item = selMenu.Append(wx.ID_ANY, '&Increase width\tCtrl+Right', 'Increase selection width')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_GO_FORWARD))
self.Bind(wx.EVT_MENU, lambda x: self.onWidth(x, 1), item)
item = selMenu.Append(wx.ID_ANY, 'Tilt &up\tCtrl+Up', 'Tilt selection up')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_GO_UP))
self.Bind(wx.EVT_MENU, lambda x: self.onTilt(x, TILT_INC), item)
item = selMenu.Append(wx.ID_ANY, 'Tilt dow&n\tCtrl+Down', 'Tilt selection down')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_GO_DOWN))
self.Bind(wx.EVT_MENU, lambda x: self.onTilt(x, -TILT_INC), item)
item = selMenu.Append(wx.ID_ANY, '&Set Position', 'Set zeroth order diffraction point')
self.Bind(wx.EVT_MENU, self.onSetPosition, item)
imgMenu = wx.Menu()
tmp = imgMenu.Append(wx.ID_ANY, 'X')
tmp.SetBitmap(wx.EmptyBitmap(1,1))
item = imgMenu.Append(wx.ID_ZOOM_IN, 'Zoom &in\tCtrl++', 'Zoom in')
self.Bind(wx.EVT_MENU, lambda x: self.onZoom(x, 2.0), item)
item = imgMenu.Append(wx.ID_ZOOM_OUT, 'Zoom &out\tCtrl+-', 'Zoom out')
self.Bind(wx.EVT_MENU, lambda x: self.onZoom(x, 0.5), item)
item = imgMenu.Append(wx.ID_ZOOM_100, 'Zoo&m 100%\tCtrl+0', 'Zoom to original size')
self.Bind(wx.EVT_MENU, lambda x: self.onZoom(x, 0), item)
item = imgMenu.Append(wx.ID_ANY, 'Zoom &Fit', 'Zoom to fit in window')
self.Bind(wx.EVT_MENU, self.onZoomFit, item)
imgMenu.AppendSeparator()
imgMenu.AppendSubMenu(selMenu, '&Selection', 'Modify selection')
item = imgMenu.Append(wx.ID_ANY, '&Adjust Image Levels...', 'Adjust brightness ranges in image')
self.Bind(wx.EVT_MENU, self.onAdjImage, item)
item = imgMenu.Append(wx.ID_ANY, 'Show/Hide FITS &Header\tCtrl+F', 'Toggle visibility of the FITS header data')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_REPORT_VIEW))
imgMenu.Remove(tmp.GetId())
self.Bind(wx.EVT_MENU, self.onShowFITS, item)
linesMenu = wx.Menu()
self.Balmer = linesMenu.Append(wx.ID_ANY, 'Balmer Series', 'Show hydrogen Balmer lines on plot', kind=wx.ITEM_CHECK)
self.Helium = linesMenu.Append(wx.ID_ANY, 'Helium Lines', 'Show helium lines on plot', kind=wx.ITEM_CHECK)
self.Metallic = linesMenu.Append(wx.ID_ANY, 'Metallic', 'Show metal lines on plot', kind=wx.ITEM_CHECK)
self.Telluric = linesMenu.Append(wx.ID_ANY, 'Telluric', 'Show atmospheric absorption lines on plot', kind=wx.ITEM_CHECK)
if PLOT_BALMER: self.Balmer.Check(True)
if PLOT_HELIUM: self.Helium.Check(True)
if PLOT_METALLIC: self.Metallic.Check(True)
if PLOT_TELLURIC: self.Telluric.Check(True)
modMenu = wx.Menu()
tmp = modMenu.Append(wx.ID_ANY, 'X')
tmp.SetBitmap(wx.EmptyBitmap(1,1))
item = modMenu.Append(wx.ID_ANY, 'Change &Title...', 'Change title of current plot')
self.Bind(wx.EVT_MENU, self.onSetTitle, item)
item = modMenu.Append(wx.ID_ANY, 'Change &Range...', 'Change wavelength range of current plot')
self.Bind(wx.EVT_MENU, self.onSetRange, item)
item = modMenu.Append(wx.ID_ANY, '&Adjust Plot', 'Adjust size/margins of current plot')
self.Bind(wx.EVT_MENU, self.onAdjust, item)
modMenu.Remove(tmp.GetId())
plotMenu = wx.Menu()
tmp = plotMenu.Append(wx.ID_ANY, 'X')
tmp.SetBitmap(wx.EmptyBitmap(1,1))
item = plotMenu.Append(wx.ID_ANY, 'Create/Redo &Plot', 'Plot spectrum from image or refresh current plot')
self.Bind(wx.EVT_MENU, self.onPlot, item)
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_NEW))
plotMenu.Remove(tmp.GetId())
item = plotMenu.Append(wx.ID_ANY, '&Stitch Spectra...', 'Stitch multiple spectra together')
self.Bind(wx.EVT_MENU, self.onStitch, item)
plotMenu.AppendSubMenu(linesMenu, '&Lines', 'Select spectral lines to show')
item = plotMenu.Append(wx.ID_ANY, 'Set Redshift(&z)...', 'Set redshift of lines')
self.Bind(wx.EVT_MENU, self.onSetRedshift, item)
item = plotMenu.Append(wx.ID_ANY, 'Set A&veraging...', 'Set median average width')
self.Bind(wx.EVT_MENU, self.onSetAverage, item)
plotMenu.AppendSubMenu(modMenu, '&Modify Plot', 'Modify the current plot')
helpMenu = wx.Menu()
tmp = helpMenu.Append(wx.ID_ANY,'X')
tmp.SetBitmap(wx.EmptyBitmap(1,1))
item = helpMenu.Append(wx.ID_HELP, '&Help\tF1', 'Get help with this program')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_QUESTION, size=(16,16)))
helpMenu.Remove(tmp.GetId())
self.Bind(wx.EVT_MENU, self.onHelp, item)
item = helpMenu.Append(wx.ID_ABOUT, '&About', 'Information about this program')
item.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, size=(16,16)))
self.Bind(wx.EVT_MENU, self.onAbout, item)
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, '&File')
menuBar.Append(imgMenu, '&Image')
menuBar.Append(plotMenu, '&Plot')
menuBar.Append(helpMenu, '&Help')
self.SetMenuBar(menuBar)
# Make Toolbars
fileTool = self.CreateToolBar()
tool = fileTool.AddLabelTool(wx.ID_OPEN, 'Open', wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN), shortHelp='Open', longHelp='Open a file')
self.Bind(wx.EVT_TOOL, self.onOpen, tool)
tool = fileTool.AddLabelTool(wx.ID_ANY, 'Save', wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE), shortHelp='Save', longHelp='Save plot')
self.Bind(wx.EVT_TOOL, self.onSave, tool)
fileTool.AddSeparator()
b = wx.ArtProvider.GetBitmap(wx.ART_GO_BACK)
tool = fileTool.AddLabelTool(wx.ID_ANY, 'Decrease', b, shortHelp='Decrease width', longHelp='Decrease selection width')
self.Bind(wx.EVT_TOOL, lambda x: self.onWidth(x, -1), tool)
b = wx.ArtProvider.GetBitmap(wx.ART_GO_FORWARD)
tool = fileTool.AddLabelTool(wx.ID_ANY, 'Increase', b, shortHelp='Increase width', longHelp='Increase selection width')
self.Bind(wx.EVT_TOOL, lambda x: self.onWidth(x, 1), tool)
b = wx.ArtProvider.GetBitmap(wx.ART_GO_UP)
tool = fileTool.AddLabelTool(wx.ID_ANY, 'Tilt Up', b, shortHelp='Tilt Up', longHelp='Tilt selection up')
self.Bind(wx.EVT_TOOL, lambda x: self.onTilt(x, TILT_INC), tool)
b = wx.ArtProvider.GetBitmap(wx.ART_GO_DOWN)
tool = fileTool.AddLabelTool(wx.ID_ANY, 'Tilt Down', b, shortHelp='Tilt Down', longHelp='Tilt selection down')
self.Bind(wx.EVT_TOOL, lambda x: self.onTilt(x, -TILT_INC), tool)
b = wx.ArtProvider.GetBitmap(wx.ART_NEW)
tool = fileTool.AddLabelTool(wx.ID_APPLY, 'Plot', b, shortHelp='Plot', longHelp='Plot spectrum from image or refresh current plot')
self.Bind(wx.EVT_TOOL, self.onPlot, tool)
fileTool.Realize()
# Create dialog boxes
self.stitch = StitchDialog(self)
self.stitch.Show(False)
self.stitch.Bind(wx.EVT_BUTTON, self.do_stitch, id=wx.ID_OK)
self.help = HelpDialog(self)
self.help.Show(False)
def readCheckItems(self):
global PLOT_BALMER, PLOT_HELIUM, PLOT_METALLIC, PLOT_TELLURIC
if self.Balmer.IsChecked(): PLOT_BALMER=True
else: PLOT_BALMER=False
if self.Helium.IsChecked(): PLOT_HELIUM=True
else: PLOT_HELIUM=False
if self.Metallic.IsChecked(): PLOT_METALLIC=True
else: PLOT_METALLIC=False
if self.Telluric.IsChecked(): PLOT_TELLURIC=True
else: PLOT_TELLURIC=False
def onKeyPress(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if isinstance(cur, ImgViewer) and event.GetModifiers()<=0:
if event.GetKeyCode() == wx.WXK_UP:
cur.sel_nudge(( 0,-1))
return
if event.GetKeyCode() == wx.WXK_DOWN:
cur.sel_nudge(( 0, 1))
return
if event.GetKeyCode() == wx.WXK_LEFT:
cur.sel_nudge((-1, 0))
return
if event.GetKeyCode() == wx.WXK_RIGHT:
cur.sel_nudge(( 1, 0))
return
event.Skip()
def onOpen(self, event):
wildcard = "FITS image files (*.fts,*.fits,*.fit)|*.fts;*.fits;*.fit"
dialog = wx.FileDialog(None, "Choose a file", wildcard=wildcard, style=wx.FD_OPEN|wx.FD_MULTIPLE)
if dialog.ShowModal() == wx.ID_OK:
for path in dialog.GetPaths():
fname = os.path.basename(path)
try:
data,hdr = pyfits.getdata(path,0,header=True)
except:
msg = 'Error opening %s' % (fname)
errmsg = wx.MessageDialog(self, msg ,'File Error', style=wx.OK|wx.ICON_ERROR)
errmsg.ShowModal()
continue
newim = ImgViewer(parent=self.tabs, path=path, data=data, hdr=hdr)
self.tabs.AddPage(newim, fname, select=True)
dialog.Destroy()
def onImport(self, event):
wildcard = "Text file (*.csv)|*.csv"
dialog = wx.FileDialog(None, "Choose a file", wildcard=wildcard, style=wx.FD_OPEN)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
fname = os.path.basename(path)
newplot = FigPanel(self.tabs)
newplot.fname = fname
self.tabs.AddPage(newplot, fname, select=True)
fn = open(path,'r')
lines = fn.readlines()
wave = []; ampl = []
for line in lines:
if line[0] == '#': continue
s = [float(t) for t in line.split()]
wave.append(s[0]); ampl.append(s[1])
fn.close()
newplot.title = 'Rigel TGS Spectrum from %s' % (fname)
newplot.plot(wave, ampl)
dialog.Destroy()
def onSave(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if not isinstance(cur, FigPanel): return
if not hasattr(cur, 'fname'): fname = '.pdf'
else: fname = os.path.splitext(cur.fname)[0] + '-spec.pdf'
wildcard = "PDF File (*.pdf)|*.pdf"
dialog = wx.FileDialog(None, "Save Plot", defaultFile=fname, wildcard=wildcard, style=wx.FD_SAVE)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
cur.canvas.print_figure(path)
cur.is_saved=True
dialog.Destroy()
return wx.ID_OK
else:
dialog.Destroy()
return wx.ID_CANCEL
def onExport(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if not isinstance(cur, FigPanel): return
if not hasattr(cur, 'fname'):
fname = '.pdf'
fromfile = 'unknown'
else:
csvname = os.path.splitext(cur.fname)[0] + '-data.csv'
fromfile = cur.fname
wildcard = "Text file (*.csv)|*.csv"
dialog = wx.FileDialog(None, "Export Spectral Data", defaultFile=csvname, wildcard=wildcard, style=wx.FD_SAVE)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
fn = open(path,'w')
fn.write('# File %s\n' % fromfile)
fn.write('# Wavelength [nm] Normalized Amplitude\n')
for n in range(len(cur.wave)):
fn.write(' %5.2f %6.4f\n' % (cur.wave[n], cur.intensity_wt[n]))
fn.close()
dialog.Destroy()
def onClose(self, event):
n = self.tabs.GetSelection()
if n<0: return
p = self.tabs.GetPage(n)
if isinstance(p, FigPanel) and not p.is_saved:
name = self.tabs.GetPageText(n)
dialog = wx.MessageDialog(self, 'Save %s before closing?' % (name), 'Close', wx.YES_NO|wx.CANCEL)
response = dialog.ShowModal()
dialog.Destroy()
if response == wx.ID_CANCEL:
return wx.ID_CANCEL
if response == wx.ID_YES:
saveok = self.onSave(None)
if saveok == wx.ID_CANCEL:
return wx.ID_CANCEL
self.tabs.DeletePage(n)
else:
self.tabs.DeletePage(n)
return wx.ID_OK
def onStitch(self, event):
if self.stitch.IsShown():
self.stitch.Raise()
return
plotnames = {}
for k in range(self.tabs.GetPageCount()):
if isinstance(self.tabs.GetPage(k), FigPanel):
id = self.tabs.GetPage(k).GetId()
plotnames[id] = self.tabs.GetPageText(k)
if len(plotnames.keys())<2: return
self.stitch.repopulate(plotnames)
self.stitch.Show(True)
def do_stitch(self, event):
ids, wsplits = self.stitch.get_stitched()
ids = ids[:len(wsplits)+1] # in case user didn't specify enough splits
for id in ids:
plot = wx.FindWindowById(id)
if plot == None: return
waves = [wx.FindWindowById(id).wave for id in ids]
vals = [wx.FindWindowById(id).intensity_wt for id in ids]
wb = [waves[0][0]] + wsplits[0:] + [waves[-1][-2]]
nf = [j for j in range(len(waves[0])) if waves[0][j]>wb[1]][0]
wave_stitch = waves[0][0:nf]
intensity_stitch = vals[0][0:nf]
for i in range(len(wb)-2):
# get indices of array segments to stitch
nf = [j for j in range(len(waves[i])) if waves[i][j]>wb[i+1]][0]
mi = [j for j in range(len(waves[i+1])) if waves[i+1][j]>wb[i+1]][0]
mf = [j for j in range(len(waves[i+1])) if waves[i+1][j]>wb[i+2]][0]
# get median value a few pixels on each side of stitch for scaling
scale = np.median(vals[i][nf-STITCH_WIDTH:nf])/np.median(vals[i+1][mi:mi+STITCH_WIDTH])
vals[i+1] = [x * scale for x in vals[i+1]]
x = waves[i+1][mi:mf]
wave_stitch = np.concatenate((wave_stitch, waves[i+1][mi:mf]))
intensity_stitch = np.concatenate((intensity_stitch, vals[i+1][mi:mf]))
# done! now make the plot
newplot = FigPanel(self.tabs)
newplot.fname = 'stitched'
newplot.title = 'Rigel TGS Stitched Spectrum'
newplot.plot(wave_stitch, intensity_stitch)
curnames = [self.tabs.GetPageText(i) for i in range(self.tabs.GetPageCount())]
n = 1
while "Stitched - %d" % (n) in curnames:
n+=1
self.tabs.AddPage(newplot, "Stitched - %d" % (n), select=True)
self.stitch.Show(False)
def onExit(self, event):
while self.tabs.GetSelection()>=0:
if self.onClose(None) == wx.ID_CANCEL: return
self.Destroy()
def onZoom(self, event, mult):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if isinstance(cur, ImgViewer): cur.zoom(mult)
def onZoomFit(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if isinstance(cur, ImgViewer): cur.zoom_fit()
def onSetPosition(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if not isinstance(cur, ImgViewer): return
dialog = wx.TextEntryDialog(None, "Enter x,y coordinates (pixels)", "Set Position", "%d,%d" % (cur.specZero.x, cur.specZero.y))
if dialog.ShowModal() == wx.ID_OK:
pt=dialog.GetValue().split(',')
cur.specZero=wx.Point(int(pt[0]),int(pt[1]))
cur.Refresh()
dialog.Destroy()
def onWidth(self, event, inc):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if isinstance(cur, ImgViewer):
cur.sel_width(inc)
def onTilt(self, event, inc):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if isinstance(cur, ImgViewer):
cur.sel_tilt(inc)
def onShowFITS(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if isinstance(cur, ImgViewer):
if cur.IsSplit():
cur.Unsplit(cur.info)
else:
cur.SplitVertically(cur.info, cur.scroll, 200)
def onPlot(self, event):
self.readCheckItems()
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if isinstance(cur, ImgViewer):
name=self.tabs.GetPageText(self.tabs.GetSelection())
(wave, intensity_wt, spec) = cur.extract()
newplot = FigPanel(self.tabs)
curnames = [self.tabs.GetPageText(i) for i in range(self.tabs.GetPageCount())]
n = 1
while "%s - Plot %d" % (name, n) in curnames:
n+=1
self.tabs.AddPage(newplot, "%s - Plot %d" % (name, n), select=True)
newplot.spec = spec
newplot.fname = cur.fname
newplot.title = 'Rigel TGS Spectrum from %s' % (cur.fname)
newplot.plot(wave, intensity_wt)
elif isinstance(cur, FigPanel):
cur.plot()
def onSetTitle(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if not isinstance(cur, FigPanel): return
if not hasattr(cur, 'title'): cur.title=''
dialog = wx.TextEntryDialog(None, "Enter plot title", "Set title", cur.title)
if dialog.ShowModal() == wx.ID_OK:
cur.title=dialog.GetValue()
self.readCheckItems()
cur.plot()
dialog.Destroy()
def onAdjImage(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if not isinstance(cur, ImgViewer): return
dialog = AdjustImageDialog(parent=None, imgv=cur)
if dialog.ShowModal() == wx.ID_OK:
cur.imin, cur.imax = dialog.get_minmax()
cur.adjustImage()
else:
cur.adjustImage()
dialog.Destroy()
def onSetRange(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if not isinstance(cur, FigPanel): return
dialog = SetRangeDialog(parent=None, wmin=cur.wmin, wmax=cur.wmax)
if dialog.ShowModal() == wx.ID_OK:
cur.wmin, cur.wmax = dialog.get_range()
cur.plot()
dialog.Destroy()
def onAdjust(self, event):
n = self.tabs.GetSelection()
if n<0: return
cur = self.tabs.GetPage(n)
if not isinstance(cur, FigPanel):
return
cur.toolbar.configure_subplots(None)
def onSetAverage(self, event):
global MEDAVG_WIDTH
val = "%s" % MEDAVG_WIDTH
dialog = wx.TextEntryDialog(None, "Enter median average width (in pixels)", "Set Averaging", val)
if dialog.ShowModal() == wx.ID_OK:
a = int(dialog.GetValue())
if np.mod(a,2) == 0: a+=1
if a<1: a=1
MEDAVG_WIDTH=a
dialog.Destroy()
def onSetRedshift(self, event):
global REDSHIFT_Z
val = "%s" % REDSHIFT_Z
dialog = wx.TextEntryDialog(None, "Enter redshift value (z)", "Set redshift", val)
if dialog.ShowModal() == wx.ID_OK:
REDSHIFT_Z=float(dialog.GetValue())
dialog.Destroy()
def onHelp(self, event):
self.help.Show(True)
self.help.Raise()
def onAbout(self, event):
description = """Extracts spectra from FITS images,
applies site-specific corrections, and
produces plots and calibrated spectral data.
Provides options to median-smooth, apply
redshift, and overlay common spectral
lines. Code can be easily modified to
apply corrections for different sites.
"""
info = wx.AboutDialogInfo()
info.SetName('TGS Analyzer')
info.SetVersion(VERSION_STR)
info.SetDescription(description)
info.SetCopyright('(C) 2013 University of Iowa Physics and Astronomy')
info.SetWebSite('http://astro.physics.uiowa.edu/rigel')
info.AddDeveloper('<NAME> (<EMAIL>)')
info.AddDeveloper('<NAME> (<EMAIL>)')
wx.AboutBox(info)
return
def onBroadcast(self, event):
msg = event.getMsg()
self.SetStatusText(msg)
class StitchDialog(wx.Dialog):
def __init__(self, *args, **kw):
super(StitchDialog, self).__init__(style=wx.RESIZE_BORDER|wx.DEFAULT_DIALOG_STYLE, *args, **kw)
self.SetSize((550, 450))
self.SetTitle("TGS Stitch")
ctrl = wx.FlexGridSizer(2, 3, vgap=8, hgap=8)
ctrl2 = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.VERTICAL)
self.plots = wx.ListBox(self, style=wx.LB_EXTENDED)
self.parts = wx.ListBox(self, style=wx.LB_EXTENDED)
addButton = wx.Button(self, label='Add >>')
remButton = wx.Button(self, label='<< Remove')
rb1 = self.useSplits = wx.RadioButton(self, label='Split spectra equally by wavelength', style=wx.RB_GROUP)
rb2 = self.pickSplits = wx.RadioButton(self, label='Specify wavelengths at which to split (nm):')
self.splits = wx.TextCtrl(self, size=wx.Size(350,22))
self.splits.Enable(False)
ctrl.Add(wx.StaticText(self, label='Available Plots:'), flag=wx.ALL|wx.ALIGN_LEFT, border=5)
ctrl.Add(wx.StaticText(self, label=''))
ctrl.Add(wx.StaticText(self, label='Plots to Stitch:'), flag=wx.ALL|wx.ALIGN_LEFT, border=5)
ctrl.Add(self.plots, border=5, flag=wx.ALL|wx.EXPAND)
ctrl.Add(ctrl2, flag=wx.ALIGN_CENTER)
ctrl.Add(self.parts, border=5, flag=wx.ALL|wx.EXPAND)
ctrl.AddGrowableCol(0,1)
ctrl.AddGrowableCol(2,1)
ctrl.AddGrowableRow(1,1)
ctrl2.Add(addButton, border=5, flag=wx.ALL|wx.ALIGN_CENTER)
ctrl2.Add(remButton, border=5, flag=wx.ALL|wx.ALIGN_CENTER)
hbox.Add(self.useSplits, border=5, flag=wx.ALIGN_LEFT|wx.ALL)
hbox.Add(self.pickSplits, border=5, flag=wx.ALIGN_LEFT|wx.ALL)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(ctrl, flag=wx.ALL|wx.EXPAND, border=5, proportion=1)
vbox.Add(hbox, flag=wx.ALL|wx.ALIGN_CENTER, border=5)
vbox.Add(self.splits, flag=wx.ALL|wx.ALIGN_CENTER, border=5)
vbox.Add(self.CreateButtonSizer(wx.OK|wx.CANCEL), flag=wx.ALL|wx.ALIGN_CENTER, border=5)
self.SetSizer(vbox)
addButton.Bind(wx.EVT_BUTTON, self.add_sel, addButton)
remButton.Bind(wx.EVT_BUTTON, self.rem_sel, remButton)
self.useSplits.Bind(wx.EVT_RADIOBUTTON, self.toggle_entry, rb1)
self.pickSplits.Bind(wx.EVT_RADIOBUTTON, self.toggle_entry, rb2)
self.Centre()
def repopulate(self, name_dict):
# takes figpanel object ids and tab names from main window
self.name_dict = name_dict
self.plots_dict = {}
self.parts_dict = {}
self.plots.Clear()
self.parts.Clear()
for id in self.name_dict.keys():
pos = self.plots.GetCount()
self.plots.Insert(name_dict[id], pos)
self.plots_dict[pos] = id
def add_sel(self, event):
for k in self.plots.GetSelections():
id = self.plots_dict[k]
pos = self.parts.GetCount()
self.parts.Insert(self.name_dict[id], pos)
self.parts_dict[pos] = id
del self.plots_dict[k]
plots = self.plots_dict.values()
self.plots.Clear()
self.plots_dict = {}
for id in plots:
pos = self.plots.GetCount()
self.plots.Insert(self.name_dict[id], pos)
self.plots_dict[pos] = id
def rem_sel(self, event):
for k in self.parts.GetSelections():
id = self.parts_dict[k]
pos = self.plots.GetCount()
self.plots.Insert(self.name_dict[id], pos)
self.plots_dict[pos] = id
del self.parts_dict[k]
parts = self.parts_dict.values()
self.parts.Clear()
self.parts_dict = {}
for id in parts:
pos = self.parts.GetCount()
self.parts.Insert(self.name_dict[id], pos)
self.parts_dict[pos] = id
def toggle_entry(self, event):
if self.pickSplits.GetValue():
self.splits.Enable(True)
else:
self.splits.Enable(False)
def get_stitched(self):
ids = self.parts_dict.values()
if self.pickSplits.GetValue():
wsplits = [float(x) for x in self.splits.GetValue().split(',')]
else:
wsplits = [DEFAULT_WMIN + (DEFAULT_WMAX - DEFAULT_WMIN)*(x+1.)/len(ids) for x in range(len(ids)-1)]
return ids, wsplits
class SetRangeDialog(wx.Dialog):
def __init__(self, wmin, wmax, *args, **kw):
super(SetRangeDialog, self).__init__(*args, **kw)
self.SetTitle("Set Plot Range")
self.SetSize((350,150))
mintext = "%s" % wmin
maxtext = "%s" % wmax
self.min = wx.TextCtrl(self, style=wx.TE_RIGHT,size=wx.Size(150,22), value=mintext)
self.max = wx.TextCtrl(self, style=wx.TE_RIGHT,size=wx.Size(150,22), value=maxtext)
ctrl = wx.FlexGridSizer(2, 2, vgap=8, hgap=8)
vbox = wx.BoxSizer(wx.VERTICAL)
szf=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL
ctrl.Add(wx.StaticText(self, label='Minimum (nm):'), flag=szf)
ctrl.Add(self.min, flag=szf)
ctrl.Add(wx.StaticText(self, label='Maximum (nm):'), flag=szf)
ctrl.Add(self.max, flag=szf)
ctrl.AddGrowableCol(0,0)
vbox.Add(ctrl, border=5, flag=wx.ALL|wx.EXPAND, proportion=1)
vbox.Add(self.CreateButtonSizer(wx.OK|wx.CANCEL), flag=wx.ALL|wx.ALIGN_CENTER, border=5)
self.SetSizer(vbox)
self.Centre()
def get_range(self):
wmin = float(self.min.GetValue())
wmax = float(self.max.GetValue())
return wmin, wmax
class AdjustImageDialog(wx.Dialog):
def __init__(self, imgv, *args, **kw):
super(AdjustImageDialog, self).__init__(style=wx.RESIZE_BORDER|wx.DEFAULT_DIALOG_STYLE, *args, **kw)
self.SetTitle("Set Image Levels")
self.SetSize((450,250))
amax=np.max(imgv.data)
amin=np.min(imgv.data)
self.imgv = imgv
self.min = wx.Slider(self, style=wx.SL_LABELS, minValue=amin, maxValue=amax, value=imgv.imin)
self.max = wx.Slider(self, style=wx.SL_LABELS, minValue=amin, maxValue=amax, value=imgv.imax)
ctrl = wx.FlexGridSizer(2, 2, vgap=8, hgap=8)
vbox = wx.BoxSizer(wx.VERTICAL)
ctrl.Add(wx.StaticText(self, label='Minimum Intensity:'), flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)
ctrl.Add(self.min, flag=wx.EXPAND)
ctrl.Add(wx.StaticText(self, label='Maximum Intensity:'), flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)
ctrl.Add(self.max, flag=wx.EXPAND)
ctrl.AddGrowableCol(1,1)
vbox.Add(ctrl, border=5, flag=wx.ALL|wx.EXPAND, proportion=1)
bs=self.CreateButtonSizer(wx.OK|wx.CANCEL)
apply=wx.Button(self,label='Preview')
bs.Add(apply)
vbox.Add(bs, flag=wx.ALL|wx.ALIGN_CENTER, border=5)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.onApply, apply)
self.Centre()
def onApply(self, event):
min,max=self.get_minmax()
self.imgv.adjustImage(min,max)
def get_minmax(self):
return (self.min.GetValue(), self.max.GetValue())
class HelpDialog(wx.Dialog):
def __init__(self, *args, **kw):
super(HelpDialog, self).__init__(*args, **kw)
self.SetTitle("Help")
self.SetSize((400,500))
help = html.HtmlWindow(self)
help.LoadPage('tgsa_help.html')
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(help, border=5, flag=wx.ALL|wx.EXPAND, proportion=1)
vbox.Add(self.CreateButtonSizer(wx.OK), flag=wx.ALL|wx.ALIGN_CENTER, border=5)
self.SetSizer(vbox)
self.Centre()
### Nuts and Bolts
def f_lambda(x,x_star):
'''
Input: displacement from the zeroth order image (x, pixels) and star at x_star
Returns: wavelength in nanometers
'''
global L, d_g, FL, x_ctr
dx = (x_ctr - x_star) * pixel
x *= pixel
a0 = 0.004; a1 = 2.2; a2 = -40 # Determined by best-fit to actual spectra
x2 = x + a1*(x**2) + a2 * (x**3)
lam = (d_g/nm) * ( sin( atan(x2/L) ) + a0 )
return lam
def grating_sens(lam):
'''
Models eff. (0.0-1.0) of Edmunds 600 lpmm grating using published efficiency curve
Input wavelength, nm
'''
sigma = 120; a= 110; lam0 = 250
t = np.abs(float(lam) -lam0) / sigma
eff = ( (lam - lam0)/a)**2 * np.exp(-t)
eff /= 100.
return eff
def ccd_sens(lam):
'''
Models FLI CCD QE (0.0 - 1.0) vs wavelength (nm) - approximate fit to QE curve
'''
sigma = 130; a= 125; lam0 = 260
t = np.abs(float(lam) -lam0) / sigma
eff = ( (lam - lam0)/a)**2 * np.exp(-t)
eff /= 100.
return eff
def plt_Balmer(axis,z):
z1 = 1 + z
(y1,y2) = axis.get_ylim()
axis.vlines(656.3*z1,y1,y2,linestyle='solid', linewidth='1',color ='r', label=r'H${\alpha}$ 656.3')
axis.vlines(486.1*z1,y1,y2,linestyle='solid', linewidth='1',color = 'g',label=r'H${\beta}$ 486.1')
axis.vlines(434.3*z1,y1,y2,linestyle='solid', linewidth='1',color = 'b',label=r'H${\gamma}$ 434.3')
axis.vlines(410.2*z1,y1,y2,linestyle='solid', linewidth='1',color = 'm',label=r'H${\delta}$ 410.2')
axis.vlines(397.0*z1,y1,y2,linestyle='solid', linewidth='1',color = 'm',label=r'H${\epsilon}$ 397.0')
def plt_Helium(axis,z):
z1 = 1 + z
(y1,y2) = axis.get_ylim()
axis.vlines(501.5*z1,y1,y2,linestyle='dashdot', linewidth='1',color = 'r',label=r'HeI 501.5')
axis.vlines(587.6*z1,y1,y2,linestyle='dashdot', linewidth='1',color = 'g',label=r'HeI 587.6')
axis.vlines(667.8*z1,y1,y2,linestyle='dashdot', linewidth='1',color = 'b',label=r'HeI 667.8')
axis.vlines(706.5*z1,y1,y2,linestyle='dashdot', linewidth='1',color = 'm',label=r'HeI 706.5')
def plt_Metals(axis,z):
z1 = 1 + z
(y1,y2) = axis.get_ylim()
axis.vlines(715.0*z1,y1,y2,linestyle='dotted', linewidth='2',color ='k',label='TiO 715.0')
axis.vlines(410.0*z1,y1,y2,linestyle='dotted', linewidth='2',color ='b',label='HeII 410.0')
axis.vlines(464.0*z1,y1,y2,linestyle='dotted', linewidth='2',color ='g',label='NIII 464.0 ')
axis.vlines(465.0*z1,y1,y2,linestyle='dotted', linewidth='2',color ='r',label='CIV 465.0')
axis.vlines(468.6*z1,y1,y2,linestyle='dotted', linewidth='2',color ='b',label='HeII 468.6')
axis.vlines(541.1*z1,y1,y2,linestyle='dotted', linewidth='2',color ='b',label='HeII 541.1')
axis.vlines(569.6*z1,y1,y2,linestyle='dotted', linewidth='2',color ='r',label='CIII 569.6')
axis.vlines(580.5*z1,y1,y2,linestyle='dotted', linewidth='2',color ='r',label='CIV 580.5')
def plt_Telluric(axis,z):
z1 = 1 + z
(y1,y2) = axis.get_ylim()
axis.vlines(759.0,y1,y2,linestyle='solid', linewidth='1',color ='m',label='O$_{2}$ (telluric)')
def mkconfigfile():
newcfg = ConfigParser.ConfigParser()
cfgfile = open('tgsa_cfg.ini', 'w')
newcfg.add_section('Defaults')
newcfg.set('Defaults','WindowWidth',DEFAULT_APP_WIDTH)
newcfg.set('Defaults','WindowHeight',DEFAULT_APP_HEIGHT)
newcfg.set('Defaults','SelWidth',DEFAULT_WIDTH)
newcfg.set('Defaults','WavelengthMin',DEFAULT_WMIN)
newcfg.set('Defaults','WavelengthMax',DEFAULT_WMAX)
newcfg.set('Defaults','RedshiftZ',REDSHIFT_Z)
newcfg.set('Defaults','PlotBalmer',PLOT_BALMER)
newcfg.set('Defaults','PlotHelium',PLOT_HELIUM)
newcfg.set('Defaults','PlotMetallic',PLOT_METALLIC)
newcfg.set('Defaults','PlotTelluric',PLOT_TELLURIC)
newcfg.add_section('Telescope')
newcfg.set('Telescope','FocalRatio',f_ratio)
newcfg.set('Telescope','Diameter_cm',Diam/cm)
newcfg.set('Telescope','SensorDistance_mm',L/mm)
newcfg.set('Telescope','GratingLines_mm',lpmm*mm)
newcfg.set('Telescope','NPixels',npixel)
newcfg.set('Telescope','PixelSize_um',pixel/micron)
newcfg.set('Telescope','PixelStart',PIXEL_START)
newcfg.set('Telescope','PixelEnd',PIXEL_END)
newcfg.set('Telescope','PixelScale',PIXEL_SCALE)
newcfg.add_section('Advanced')
newcfg.set('Advanced','StitchWidth',STITCH_WIDTH)
newcfg.set('Advanced','MedAvgWidth',MEDAVG_WIDTH)
newcfg.set('Advanced','ZoomMax',ZOOM_MAX)
newcfg.set('Advanced','ZoomMin',ZOOM_MIN)
newcfg.set('Advanced','TiltInc_deg',TILT_INC/deg)
newcfg.write(cfgfile)
cfgfile.close()
def envpath(envvar, *paths):
a = os.getenv(envvar)
if a == None: return ''
return os.path.join(a,*paths)
### MAIN
# hunt for configuration files in various locations
cfgfiles = [ os.path.join( os.getcwd(), 'tgsa_cfg.ini'),
envpath('USERPROFILE', 'AppData', 'Local', 'TGS Analyzer', 'tgsa_cfg.ini'),
envpath('HOME', 'Library', 'tgsa_cfg.ini'),
envpath('HOME', '.tgsarc')
]
cfg = ConfigParser.ConfigParser()
if cfg.read(cfgfiles) == []:
mkconfigfile()
else:
try:
DEFAULT_APP_WIDTH=cfg.getint('Defaults','WindowWidth')
DEFAULT_APP_HEIGHT=cfg.getint('Defaults','WindowHeight')
DEFAULT_WIDTH=cfg.getint('Defaults','SelWidth')
DEFAULT_WMIN=cfg.getfloat('Defaults','WavelengthMin')
DEFAULT_WMAX=cfg.getfloat('Defaults','WavelengthMax')
REDSHIFT_Z=cfg.getfloat('Defaults','RedshiftZ')
PLOT_BALMER=cfg.getboolean('Defaults','PlotBalmer')
PLOT_HELIUM=cfg.getboolean('Defaults','PlotHelium')
PLOT_METALLIC=cfg.getboolean('Defaults','PlotMetallic')
PLOT_TELLURIC=cfg.getboolean('Defaults','PlotTelluric')
f_ratio=cfg.getfloat('Telescope','FocalRatio')
Diam=cfg.getfloat('Telescope','Diameter_cm')*cm
L=cfg.getfloat('Telescope','SensorDistance_mm')*mm
lpmm=cfg.getfloat('Telescope','GratingLines_mm')/mm
npixel=cfg.getint('Telescope','NPixels')
pixel=cfg.getfloat('Telescope','PixelSize_um')*micron
PIXEL_START=cfg.getint('Telescope','PixelStart')
PIXEL_END=cfg.getint('Telescope','PixelEnd')
PIXEL_SCALE=cfg.getfloat('Telescope','PixelScale')
STITCH_WIDTH=cfg.getint('Advanced','StitchWidth')
MEDAVG_WIDTH=cfg.getint('Advanced','MedAvgWidth')
ZOOM_MAX=cfg.getfloat('Advanced','ZoomMax')
ZOOM_MIN=cfg.getfloat('Advanced','ZoomMin')
TILT_INC=cfg.getfloat('Advanced','TiltInc_deg')*deg
except ConfigParser.NoOptionError:
# make a new config file if one doesn't exist
# or if a token is missing
mkconfigfile()
app = wx.App(redirect=False)
frame = MainWindow()
frame.Show()
app.MainLoop()
| [
"wx.Menu",
"numpy.sum",
"numpy.argmax",
"numpy.clip",
"matplotlib.backends.backend_wxagg.FigureCanvasWxAgg",
"numpy.exp",
"ConfigParser.ConfigParser",
"wx.Size",
"os.path.join",
"wx.PyEventBinder",
"wx.EmptyBitmap",
"numpy.copy",
"wx.html.HtmlWindow",
"numpy.std",
"wx.AboutBox",
"wx.Fl... | [((340, 363), 'matplotlib.use', 'matplotlib.use', (['"""WXAgg"""'], {}), "('WXAgg')\n", (354, 363), False, 'import matplotlib\n'), ((2091, 2108), 'wx.NewEventType', 'wx.NewEventType', ([], {}), '()\n', (2106, 2108), False, 'import wx\n'), ((2125, 2161), 'wx.PyEventBinder', 'wx.PyEventBinder', (['myEVT_BROADCAST', '(1)'], {}), '(myEVT_BROADCAST, 1)\n', (2141, 2161), False, 'import wx\n'), ((42556, 42583), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (42581, 42583), False, 'import ConfigParser\n'), ((44040, 44062), 'wx.App', 'wx.App', ([], {'redirect': '(False)'}), '(redirect=False)\n', (44046, 44062), False, 'import wx\n'), ((40772, 40799), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (40797, 40799), False, 'import ConfigParser\n'), ((42206, 42223), 'os.getenv', 'os.getenv', (['envvar'], {}), '(envvar)\n', (42215, 42223), False, 'import os\n'), ((42257, 42280), 'os.path.join', 'os.path.join', (['a', '*paths'], {}), '(a, *paths)\n', (42269, 42280), False, 'import os\n'), ((2243, 2288), 'wx.PyCommandEvent.__init__', 'wx.PyCommandEvent.__init__', (['self', 'evtType', 'id'], {}), '(self, evtType, id)\n', (2269, 2288), False, 'import wx\n'), ((2538, 2562), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (2549, 2562), False, 'import wx\n'), ((2576, 2588), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (2586, 2588), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2637), 'matplotlib.backends.backend_wxagg.FigureCanvasWxAgg', 'FigureCanvas', (['self', '(-1)', 'self.fig'], {}), '(self, -1, self.fig)\n', (2617, 2637), True, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\n'), ((2746, 2779), 'matplotlib.backends.backend_wx.NavigationToolbar2Wx', 'NavigationToolbar2Wx', (['self.canvas'], {}), '(self.canvas)\n', (2766, 2779), False, 'from matplotlib.backends.backend_wx import NavigationToolbar2Wx\n'), ((3477, 3502), 'numpy.min', 'np.min', (['self.intensity_wt'], {}), '(self.intensity_wt)\n', (3483, 3502), True, 'import numpy as np\n'), ((4669, 4692), 'wx.ScrolledWindow', 'wx.ScrolledWindow', (['self'], {}), '(self)\n', (4686, 4692), False, 'import wx\n'), ((4707, 4725), 'wx.grid.Grid', 'wx.grid.Grid', (['self'], {}), '(self)\n', (4719, 4725), False, 'import wx\n'), ((4935, 4957), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4951, 4957), False, 'import os\n'), ((5035, 5052), 'numpy.min', 'np.min', (['self.data'], {}), '(self.data)\n', (5041, 5052), True, 'import numpy as np\n'), ((5108, 5186), 'numpy.clip', 'np.clip', (['(255.99 * (self.data - self.imin) / (self.imax - self.imin))', '(0)', '(255.99)'], {}), '(255.99 * (self.data - self.imin) / (self.imax - self.imin), 0, 255.99)\n', (5115, 5186), True, 'import numpy as np\n'), ((5192, 5211), 'wx.EmptyImage', 'wx.EmptyImage', (['w', 'h'], {}), '(w, h)\n', (5205, 5211), False, 'import wx\n'), ((5293, 5321), 'wx.BitmapFromImage', 'wx.BitmapFromImage', (['self.img'], {}), '(self.img)\n', (5311, 5321), False, 'import wx\n'), ((5886, 5914), 'wx.Point', 'wx.Point', (['xymax[1]', 'xymax[0]'], {}), '(xymax[1], xymax[0])\n', (5894, 5914), False, 'import wx\n'), ((7043, 7091), 'wx.Point', 'wx.Point', (['(x / self.zoomLevel)', '(y / self.zoomLevel)'], {}), '(x / self.zoomLevel, y / self.zoomLevel)\n', (7051, 7091), False, 'import wx\n'), ((7147, 7170), 'wx.PaintDC', 'wx.PaintDC', (['self.scroll'], {}), '(self.scroll)\n', (7157, 7170), False, 'import wx\n'), ((7341, 7400), 'numpy.clip', 'np.clip', (['(256.0 * (self.data - min) / (max - min))', '(0)', '(255.99)'], {}), '(256.0 * (self.data - min) / (max - min), 0, 255.99)\n', (7348, 7400), True, 'import numpy as np\n'), ((7713, 7736), 'math.cos', 'cos', (['(-self.alignDegrees)'], {}), '(-self.alignDegrees)\n', (7716, 7736), False, 'from math import sin, cos, atan, pi, log\n'), ((7746, 7769), 'math.sin', 'sin', (['(-self.alignDegrees)'], {}), '(-self.alignDegrees)\n', (7749, 7769), False, 'from math import sin, cos, atan, pi, log\n'), ((10653, 10680), 'numpy.median', 'np.median', (['spec_off'], {'axis': '(0)'}), '(spec_off, axis=0)\n', (10662, 10680), True, 'import numpy as np\n'), ((10708, 10728), 'numpy.sum', 'np.sum', (['spec'], {'axis': '(0)'}), '(spec, axis=0)\n', (10714, 10728), True, 'import numpy as np\n'), ((10854, 10868), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (10862, 10868), True, 'import numpy as np\n'), ((10882, 10896), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (10890, 10896), True, 'import numpy as np\n'), ((10914, 10928), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (10922, 10928), True, 'import numpy as np\n'), ((11569, 11590), 'wx.aui.AuiNotebook', 'aui.AuiNotebook', (['self'], {}), '(self)\n', (11584, 11590), True, 'import wx.aui as aui\n'), ((11859, 11868), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (11866, 11868), False, 'import wx\n'), ((13042, 13051), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (13049, 13051), False, 'import wx\n'), ((14153, 14162), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (14160, 14162), False, 'import wx\n'), ((15285, 15294), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (15292, 15294), False, 'import wx\n'), ((15945, 15954), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (15952, 15954), False, 'import wx\n'), ((16494, 16503), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (16501, 16503), False, 'import wx\n'), ((17402, 17411), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (17409, 17411), False, 'import wx\n'), ((17934, 17946), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (17944, 17946), False, 'import wx\n'), ((18562, 18602), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_BACK'], {}), '(wx.ART_GO_BACK)\n', (18586, 18602), False, 'import wx\n'), ((18793, 18836), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_FORWARD'], {}), '(wx.ART_GO_FORWARD)\n', (18817, 18836), False, 'import wx\n'), ((19026, 19064), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_UP'], {}), '(wx.ART_GO_UP)\n', (19050, 19064), False, 'import wx\n'), ((19245, 19285), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_DOWN'], {}), '(wx.ART_GO_DOWN)\n', (19269, 19285), False, 'import wx\n'), ((19473, 19509), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_NEW'], {}), '(wx.ART_NEW)\n', (19497, 19509), False, 'import wx\n'), ((20933, 21027), 'wx.FileDialog', 'wx.FileDialog', (['None', '"""Choose a file"""'], {'wildcard': 'wildcard', 'style': '(wx.FD_OPEN | wx.FD_MULTIPLE)'}), "(None, 'Choose a file', wildcard=wildcard, style=wx.FD_OPEN |\n wx.FD_MULTIPLE)\n", (20946, 21027), False, 'import wx\n'), ((21579, 21652), 'wx.FileDialog', 'wx.FileDialog', (['None', '"""Choose a file"""'], {'wildcard': 'wildcard', 'style': 'wx.FD_OPEN'}), "(None, 'Choose a file', wildcard=wildcard, style=wx.FD_OPEN)\n", (21592, 21652), False, 'import wx\n'), ((22491, 22583), 'wx.FileDialog', 'wx.FileDialog', (['None', '"""Save Plot"""'], {'defaultFile': 'fname', 'wildcard': 'wildcard', 'style': 'wx.FD_SAVE'}), "(None, 'Save Plot', defaultFile=fname, wildcard=wildcard,\n style=wx.FD_SAVE)\n", (22504, 22583), False, 'import wx\n'), ((23151, 23257), 'wx.FileDialog', 'wx.FileDialog', (['None', '"""Export Spectral Data"""'], {'defaultFile': 'csvname', 'wildcard': 'wildcard', 'style': 'wx.FD_SAVE'}), "(None, 'Export Spectral Data', defaultFile=csvname, wildcard=\n wildcard, style=wx.FD_SAVE)\n", (23164, 23257), False, 'import wx\n'), ((26766, 26889), 'wx.TextEntryDialog', 'wx.TextEntryDialog', (['None', '"""Enter x,y coordinates (pixels)"""', '"""Set Position"""', "('%d,%d' % (cur.specZero.x, cur.specZero.y))"], {}), "(None, 'Enter x,y coordinates (pixels)', 'Set Position', \n '%d,%d' % (cur.specZero.x, cur.specZero.y))\n", (26784, 26889), False, 'import wx\n'), ((28548, 28616), 'wx.TextEntryDialog', 'wx.TextEntryDialog', (['None', '"""Enter plot title"""', '"""Set title"""', 'cur.title'], {}), "(None, 'Enter plot title', 'Set title', cur.title)\n", (28566, 28616), False, 'import wx\n'), ((29720, 29812), 'wx.TextEntryDialog', 'wx.TextEntryDialog', (['None', '"""Enter median average width (in pixels)"""', '"""Set Averaging"""', 'val'], {}), "(None, 'Enter median average width (in pixels)',\n 'Set Averaging', val)\n", (29738, 29812), False, 'import wx\n'), ((30049, 30122), 'wx.TextEntryDialog', 'wx.TextEntryDialog', (['None', '"""Enter redshift value (z)"""', '"""Set redshift"""', 'val'], {}), "(None, 'Enter redshift value (z)', 'Set redshift', val)\n", (30067, 30122), False, 'import wx\n'), ((30624, 30644), 'wx.AboutDialogInfo', 'wx.AboutDialogInfo', ([], {}), '()\n', (30642, 30644), False, 'import wx\n'), ((30961, 30978), 'wx.AboutBox', 'wx.AboutBox', (['info'], {}), '(info)\n', (30972, 30978), False, 'import wx\n'), ((31305, 31343), 'wx.FlexGridSizer', 'wx.FlexGridSizer', (['(2)', '(3)'], {'vgap': '(8)', 'hgap': '(8)'}), '(2, 3, vgap=8, hgap=8)\n', (31321, 31343), False, 'import wx\n'), ((31354, 31378), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (31365, 31378), False, 'import wx\n'), ((31388, 31412), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (31399, 31412), False, 'import wx\n'), ((31431, 31469), 'wx.ListBox', 'wx.ListBox', (['self'], {'style': 'wx.LB_EXTENDED'}), '(self, style=wx.LB_EXTENDED)\n', (31441, 31469), False, 'import wx\n'), ((31485, 31523), 'wx.ListBox', 'wx.ListBox', (['self'], {'style': 'wx.LB_EXTENDED'}), '(self, style=wx.LB_EXTENDED)\n', (31495, 31523), False, 'import wx\n'), ((31538, 31569), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Add >>"""'}), "(self, label='Add >>')\n", (31547, 31569), False, 'import wx\n'), ((31584, 31618), 'wx.Button', 'wx.Button', (['self'], {'label': '"""<< Remove"""'}), "(self, label='<< Remove')\n", (31593, 31618), False, 'import wx\n'), ((31644, 31733), 'wx.RadioButton', 'wx.RadioButton', (['self'], {'label': '"""Split spectra equally by wavelength"""', 'style': 'wx.RB_GROUP'}), "(self, label='Split spectra equally by wavelength', style=wx.\n RB_GROUP)\n", (31658, 31733), False, 'import wx\n'), ((31755, 31828), 'wx.RadioButton', 'wx.RadioButton', (['self'], {'label': '"""Specify wavelengths at which to split (nm):"""'}), "(self, label='Specify wavelengths at which to split (nm):')\n", (31769, 31828), False, 'import wx\n'), ((32652, 32676), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (32663, 32676), False, 'import wx\n'), ((35307, 35345), 'wx.FlexGridSizer', 'wx.FlexGridSizer', (['(2)', '(2)'], {'vgap': '(8)', 'hgap': '(8)'}), '(2, 2, vgap=8, hgap=8)\n', (35323, 35345), False, 'import wx\n'), ((35355, 35379), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (35366, 35379), False, 'import wx\n'), ((36208, 36225), 'numpy.max', 'np.max', (['imgv.data'], {}), '(imgv.data)\n', (36214, 36225), True, 'import numpy as np\n'), ((36233, 36250), 'numpy.min', 'np.min', (['imgv.data'], {}), '(imgv.data)\n', (36239, 36250), True, 'import numpy as np\n'), ((36286, 36373), 'wx.Slider', 'wx.Slider', (['self'], {'style': 'wx.SL_LABELS', 'minValue': 'amin', 'maxValue': 'amax', 'value': 'imgv.imin'}), '(self, style=wx.SL_LABELS, minValue=amin, maxValue=amax, value=\n imgv.imin)\n', (36295, 36373), False, 'import wx\n'), ((36382, 36469), 'wx.Slider', 'wx.Slider', (['self'], {'style': 'wx.SL_LABELS', 'minValue': 'amin', 'maxValue': 'amax', 'value': 'imgv.imax'}), '(self, style=wx.SL_LABELS, minValue=amin, maxValue=amax, value=\n imgv.imax)\n', (36391, 36469), False, 'import wx\n'), ((36474, 36512), 'wx.FlexGridSizer', 'wx.FlexGridSizer', (['(2)', '(2)'], {'vgap': '(8)', 'hgap': '(8)'}), '(2, 2, vgap=8, hgap=8)\n', (36490, 36512), False, 'import wx\n'), ((36522, 36546), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (36533, 36546), False, 'import wx\n'), ((36979, 37011), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Preview"""'}), "(self, label='Preview')\n", (36988, 37011), False, 'import wx\n'), ((37514, 37535), 'wx.html.HtmlWindow', 'html.HtmlWindow', (['self'], {}), '(self)\n', (37529, 37535), True, 'import wx.html as html\n'), ((37582, 37606), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (37593, 37606), False, 'import wx\n'), ((38470, 38480), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (38476, 38480), True, 'import numpy as np\n'), ((38718, 38728), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (38724, 38728), True, 'import numpy as np\n'), ((42371, 42382), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (42380, 42382), False, 'import os\n'), ((3066, 3106), 'scipy.signal.medfilt', 'medfilt', (['self.intensity_wt', 'MEDAVG_WIDTH'], {}), '(self.intensity_wt, MEDAVG_WIDTH)\n', (3073, 3106), False, 'from scipy.signal import medfilt\n'), ((3181, 3226), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[3, 1]'}), '(2, 1, height_ratios=[3, 1])\n', (3198, 3226), True, 'import matplotlib.gridspec as gridspec\n'), ((3241, 3264), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (3258, 3264), True, 'import matplotlib.gridspec as gridspec\n'), ((3515, 3540), 'numpy.max', 'np.max', (['self.intensity_wt'], {}), '(self.intensity_wt)\n', (3521, 3540), True, 'import numpy as np\n'), ((5830, 5850), 'numpy.argmax', 'np.argmax', (['self.data'], {}), '(self.data)\n', (5839, 5850), True, 'import numpy as np\n'), ((7553, 7588), 'wx.Brush', 'wx.Brush', (['"""#000000"""', 'wx.TRANSPARENT'], {}), "('#000000', wx.TRANSPARENT)\n", (7561, 7588), False, 'import wx\n'), ((7602, 7628), 'wx.Pen', 'wx.Pen', (['"""RED"""', '(1)', 'wx.SOLID'], {}), "('RED', 1, wx.SOLID)\n", (7608, 7628), False, 'import wx\n'), ((7946, 8008), 'wx.Point', 'wx.Point', (['(x + p0 * cosa + dw * sina)', '(y + p0 * sina - dw * cosa)'], {}), '(x + p0 * cosa + dw * sina, y + p0 * sina - dw * cosa)\n', (7954, 8008), False, 'import wx\n'), ((8016, 8078), 'wx.Point', 'wx.Point', (['(x + p0 * cosa - dw * sina)', '(y + p0 * sina + dw * cosa)'], {}), '(x + p0 * cosa - dw * sina, y + p0 * sina + dw * cosa)\n', (8024, 8078), False, 'import wx\n'), ((8086, 8148), 'wx.Point', 'wx.Point', (['(x + pf * cosa - dw * sina)', '(y + pf * sina + dw * cosa)'], {}), '(x + pf * cosa - dw * sina, y + pf * sina + dw * cosa)\n', (8094, 8148), False, 'import wx\n'), ((8156, 8218), 'wx.Point', 'wx.Point', (['(x + pf * cosa + dw * sina)', '(y + pf * sina - dw * cosa)'], {}), '(x + pf * cosa + dw * sina, y + pf * sina - dw * cosa)\n', (8164, 8218), False, 'import wx\n'), ((8226, 8288), 'wx.Point', 'wx.Point', (['(x + p0 * cosa + dw * sina)', '(y + p0 * sina - dw * cosa)'], {}), '(x + p0 * cosa + dw * sina, y + p0 * sina - dw * cosa)\n', (8234, 8288), False, 'import wx\n'), ((10368, 10411), 'scipy.ndimage.interpolation.rotate', 'rotate', (['self.data', '(-self.alignDegrees / deg)'], {}), '(self.data, -self.alignDegrees / deg)\n', (10374, 10411), False, 'from scipy.ndimage.interpolation import rotate\n'), ((10423, 10441), 'numpy.copy', 'np.copy', (['self.data'], {}), '(self.data)\n', (10430, 10441), True, 'import numpy as np\n'), ((11924, 11944), 'wx.EmptyBitmap', 'wx.EmptyBitmap', (['(1)', '(1)'], {}), '(1, 1)\n', (11938, 11944), False, 'import wx\n'), ((12090, 12132), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_FILE_OPEN'], {}), '(wx.ART_FILE_OPEN)\n', (12114, 12132), False, 'import wx\n'), ((12343, 12385), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_FILE_SAVE'], {}), '(wx.ART_FILE_SAVE)\n', (12367, 12385), False, 'import wx\n'), ((12946, 12983), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_QUIT'], {}), '(wx.ART_QUIT)\n', (12970, 12983), False, 'import wx\n'), ((13106, 13126), 'wx.EmptyBitmap', 'wx.EmptyBitmap', (['(1)', '(1)'], {}), '(1, 1)\n', (13120, 13126), False, 'import wx\n'), ((13237, 13277), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_BACK'], {}), '(wx.ART_GO_BACK)\n', (13261, 13277), False, 'import wx\n'), ((13482, 13525), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_FORWARD'], {}), '(wx.ART_GO_FORWARD)\n', (13506, 13525), False, 'import wx\n'), ((13682, 13720), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_UP'], {}), '(wx.ART_GO_UP)\n', (13706, 13720), False, 'import wx\n'), ((13889, 13929), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_GO_DOWN'], {}), '(wx.ART_GO_DOWN)\n', (13913, 13929), False, 'import wx\n'), ((14218, 14238), 'wx.EmptyBitmap', 'wx.EmptyBitmap', (['(1)', '(1)'], {}), '(1, 1)\n', (14232, 14238), False, 'import wx\n'), ((15146, 15190), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_REPORT_VIEW'], {}), '(wx.ART_REPORT_VIEW)\n', (15170, 15190), False, 'import wx\n'), ((16010, 16030), 'wx.EmptyBitmap', 'wx.EmptyBitmap', (['(1)', '(1)'], {}), '(1, 1)\n', (16024, 16030), False, 'import wx\n'), ((16560, 16580), 'wx.EmptyBitmap', 'wx.EmptyBitmap', (['(1)', '(1)'], {}), '(1, 1)\n', (16574, 16580), False, 'import wx\n'), ((16751, 16787), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_NEW'], {}), '(wx.ART_NEW)\n', (16775, 16787), False, 'import wx\n'), ((17467, 17487), 'wx.EmptyBitmap', 'wx.EmptyBitmap', (['(1)', '(1)'], {}), '(1, 1)\n', (17481, 17487), False, 'import wx\n'), ((17585, 17641), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_QUESTION'], {'size': '(16, 16)'}), '(wx.ART_QUESTION, size=(16, 16))\n', (17609, 17641), False, 'import wx\n'), ((17816, 17875), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_INFORMATION'], {'size': '(16, 16)'}), '(wx.ART_INFORMATION, size=(16, 16))\n', (17840, 17875), False, 'import wx\n'), ((18222, 18264), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_FILE_OPEN'], {}), '(wx.ART_FILE_OPEN)\n', (18246, 18264), False, 'import wx\n'), ((18402, 18444), 'wx.ArtProvider.GetBitmap', 'wx.ArtProvider.GetBitmap', (['wx.ART_FILE_SAVE'], {}), '(wx.ART_FILE_SAVE)\n', (18426, 18444), False, 'import wx\n'), ((21728, 21750), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (21744, 21750), False, 'import os\n'), ((23781, 23873), 'wx.MessageDialog', 'wx.MessageDialog', (['self', "('Save %s before closing?' % name)", '"""Close"""', '(wx.YES_NO | wx.CANCEL)'], {}), "(self, 'Save %s before closing?' % name, 'Close', wx.YES_NO |\n wx.CANCEL)\n", (23797, 23873), False, 'import wx\n'), ((24734, 24755), 'wx.FindWindowById', 'wx.FindWindowById', (['id'], {}), '(id)\n', (24751, 24755), False, 'import wx\n'), ((25615, 25665), 'numpy.concatenate', 'np.concatenate', (['(wave_stitch, waves[i + 1][mi:mf])'], {}), '((wave_stitch, waves[i + 1][mi:mf]))\n', (25629, 25665), True, 'import numpy as np\n'), ((25686, 25740), 'numpy.concatenate', 'np.concatenate', (['(intensity_stitch, vals[i + 1][mi:mf])'], {}), '((intensity_stitch, vals[i + 1][mi:mf]))\n', (25700, 25740), True, 'import numpy as np\n'), ((31927, 31972), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Available Plots:"""'}), "(self, label='Available Plots:')\n", (31940, 31972), False, 'import wx\n'), ((32022, 32051), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '""""""'}), "(self, label='')\n", (32035, 32051), False, 'import wx\n'), ((32064, 32109), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Plots to Stitch:"""'}), "(self, label='Plots to Stitch:')\n", (32077, 32109), False, 'import wx\n'), ((35438, 35480), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Minimum (nm):"""'}), "(self, label='Minimum (nm):')\n", (35451, 35480), False, 'import wx\n'), ((35534, 35576), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Maximum (nm):"""'}), "(self, label='Maximum (nm):')\n", (35547, 35576), False, 'import wx\n'), ((36559, 36606), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Minimum Intensity:"""'}), "(self, label='Minimum Intensity:')\n", (36572, 36606), False, 'import wx\n'), ((36702, 36749), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Maximum Intensity:"""'}), "(self, label='Maximum Intensity:')\n", (36715, 36749), False, 'import wx\n'), ((5084, 5101), 'numpy.std', 'np.std', (['self.data'], {}), '(self.data)\n', (5090, 5101), True, 'import numpy as np\n'), ((11397, 11443), 'wx.Size', 'wx.Size', (['DEFAULT_APP_WIDTH', 'DEFAULT_APP_HEIGHT'], {}), '(DEFAULT_APP_WIDTH, DEFAULT_APP_HEIGHT)\n', (11404, 11443), False, 'import wx\n'), ((21105, 21127), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (21121, 21127), False, 'import os\n'), ((24794, 24815), 'wx.FindWindowById', 'wx.FindWindowById', (['id'], {}), '(id)\n', (24811, 24815), False, 'import wx\n'), ((24846, 24867), 'wx.FindWindowById', 'wx.FindWindowById', (['id'], {}), '(id)\n', (24863, 24867), False, 'import wx\n'), ((25447, 25487), 'numpy.median', 'np.median', (['vals[i][nf - STITCH_WIDTH:nf]'], {}), '(vals[i][nf - STITCH_WIDTH:nf])\n', (25456, 25487), True, 'import numpy as np\n'), ((25486, 25530), 'numpy.median', 'np.median', (['vals[i + 1][mi:mi + STITCH_WIDTH]'], {}), '(vals[i + 1][mi:mi + STITCH_WIDTH])\n', (25495, 25530), True, 'import numpy as np\n'), ((29882, 29894), 'numpy.mod', 'np.mod', (['a', '(2)'], {}), '(a, 2)\n', (29888, 29894), True, 'import numpy as np\n'), ((31868, 31884), 'wx.Size', 'wx.Size', (['(350)', '(22)'], {}), '(350, 22)\n', (31875, 31884), False, 'import wx\n'), ((35178, 35194), 'wx.Size', 'wx.Size', (['(150)', '(22)'], {}), '(150, 22)\n', (35185, 35194), False, 'import wx\n'), ((35264, 35280), 'wx.Size', 'wx.Size', (['(150)', '(22)'], {}), '(150, 22)\n', (35271, 35280), False, 'import wx\n'), ((38193, 38205), 'math.atan', 'atan', (['(x2 / L)'], {}), '(x2 / L)\n', (38197, 38205), False, 'from math import sin, cos, atan, pi, log\n'), ((8377, 8406), 'math.log', 'log', (['(self.zoomLevel * mult)', '(2)'], {}), '(self.zoomLevel * mult, 2)\n', (8380, 8406), False, 'from math import sin, cos, atan, pi, log\n'), ((21153, 21189), 'pyfits.getdata', 'pyfits.getdata', (['path', '(0)'], {'header': '(True)'}), '(path, 0, header=True)\n', (21167, 21189), False, 'import pyfits\n'), ((22397, 22424), 'os.path.splitext', 'os.path.splitext', (['cur.fname'], {}), '(cur.fname)\n', (22413, 22424), False, 'import os\n'), ((23032, 23059), 'os.path.splitext', 'os.path.splitext', (['cur.fname'], {}), '(cur.fname)\n', (23048, 23059), False, 'import os\n'), ((21254, 21324), 'wx.MessageDialog', 'wx.MessageDialog', (['self', 'msg', '"""File Error"""'], {'style': '(wx.OK | wx.ICON_ERROR)'}), "(self, msg, 'File Error', style=wx.OK | wx.ICON_ERROR)\n", (21270, 21324), False, 'import wx\n'), ((5232, 5252), 'numpy.dstack', 'np.dstack', (['(b, b, b)'], {}), '((b, b, b))\n', (5241, 5252), True, 'import numpy as np\n'), ((7413, 7433), 'numpy.dstack', 'np.dstack', (['(b, b, b)'], {}), '((b, b, b))\n', (7422, 7433), True, 'import numpy as np\n')] |
import jug.compound
import jug.mapreduce
import numpy as np
from jug.backends.dict_store import dict_store
from jug.tests.utils import simple_execute
from jug.compound import CompoundTask
from jug.tests.test_mapreduce import mapper, reducer, dfs_run
from jug.tests.task_reset import task_reset
@task_reset
def test_compound():
jug.task.Task.store = dict_store()
A = np.random.rand(10000)
x = CompoundTask(jug.mapreduce.mapreduce,reducer, mapper, A)
dfs_run(x)
y = CompoundTask(jug.mapreduce.mapreduce,reducer, mapper, A)
assert y.can_load()
assert y.result == x.result
@task_reset
def test_w_barrier():
store, space = jug.jug.init('jug/tests/jugfiles/compound_wbarrier.py', 'dict_store')
simple_execute()
store, space = jug.jug.init('jug/tests/jugfiles/compound_wbarrier.py', store)
simple_execute()
assert 'sixteen' in space
assert space['sixteen'].result == 16
@task_reset
def test_non_simple():
store, space = jug.jug.init('jug/tests/jugfiles/compound_nonsimple.py', 'dict_store')
simple_execute()
store, space = jug.jug.init('jug/tests/jugfiles/compound_nonsimple.py', store)
simple_execute()
store, space = jug.jug.init('jug/tests/jugfiles/compound_nonsimple.py', store)
simple_execute()
assert 'sixteen' in space
assert space['sixteen'].result == 16
@task_reset
def test_non_simple():
store, space = jug.jug.init('jug/tests/jugfiles/compound.py', 'dict_store')
simple_execute()
assert 'sixteen' in space
assert space['sixteen'].result == 16
store, space = jug.jug.init('jug/tests/jugfiles/compound.py', store)
assert 'sixteen' in space
assert space['sixteen'].result == 16
@task_reset
def test_debug():
from jug.jug import execution_loop
from jug.task import alltasks
from jug.options import default_options
from collections import defaultdict
options = default_options.copy()
options.debug = True
store, space = jug.jug.init('jug/tests/jugfiles/compound.py', 'dict_store')
execution_loop(alltasks, options, defaultdict(int), defaultdict(int))
assert 'sixteen' in space
assert space['sixteen'].result == 16
store, space = jug.jug.init('jug/tests/jugfiles/compound.py', store)
assert 'sixteen' in space
assert space['sixteen'].result == 16
| [
"jug.compound.CompoundTask",
"collections.defaultdict",
"numpy.random.rand",
"jug.tests.test_mapreduce.dfs_run",
"jug.options.default_options.copy",
"jug.tests.utils.simple_execute",
"jug.backends.dict_store.dict_store"
] | [((354, 366), 'jug.backends.dict_store.dict_store', 'dict_store', ([], {}), '()\n', (364, 366), False, 'from jug.backends.dict_store import dict_store\n'), ((375, 396), 'numpy.random.rand', 'np.random.rand', (['(10000)'], {}), '(10000)\n', (389, 396), True, 'import numpy as np\n'), ((405, 462), 'jug.compound.CompoundTask', 'CompoundTask', (['jug.mapreduce.mapreduce', 'reducer', 'mapper', 'A'], {}), '(jug.mapreduce.mapreduce, reducer, mapper, A)\n', (417, 462), False, 'from jug.compound import CompoundTask\n'), ((466, 476), 'jug.tests.test_mapreduce.dfs_run', 'dfs_run', (['x'], {}), '(x)\n', (473, 476), False, 'from jug.tests.test_mapreduce import mapper, reducer, dfs_run\n'), ((485, 542), 'jug.compound.CompoundTask', 'CompoundTask', (['jug.mapreduce.mapreduce', 'reducer', 'mapper', 'A'], {}), '(jug.mapreduce.mapreduce, reducer, mapper, A)\n', (497, 542), False, 'from jug.compound import CompoundTask\n'), ((728, 744), 'jug.tests.utils.simple_execute', 'simple_execute', ([], {}), '()\n', (742, 744), False, 'from jug.tests.utils import simple_execute\n'), ((831, 847), 'jug.tests.utils.simple_execute', 'simple_execute', ([], {}), '()\n', (845, 847), False, 'from jug.tests.utils import simple_execute\n'), ((1050, 1066), 'jug.tests.utils.simple_execute', 'simple_execute', ([], {}), '()\n', (1064, 1066), False, 'from jug.tests.utils import simple_execute\n'), ((1154, 1170), 'jug.tests.utils.simple_execute', 'simple_execute', ([], {}), '()\n', (1168, 1170), False, 'from jug.tests.utils import simple_execute\n'), ((1258, 1274), 'jug.tests.utils.simple_execute', 'simple_execute', ([], {}), '()\n', (1272, 1274), False, 'from jug.tests.utils import simple_execute\n'), ((1466, 1482), 'jug.tests.utils.simple_execute', 'simple_execute', ([], {}), '()\n', (1480, 1482), False, 'from jug.tests.utils import simple_execute\n'), ((1900, 1922), 'jug.options.default_options.copy', 'default_options.copy', ([], {}), '()\n', (1920, 1922), False, 'from jug.options import default_options\n'), ((2067, 2083), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2078, 2083), False, 'from collections import defaultdict\n'), ((2085, 2101), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2096, 2101), False, 'from collections import defaultdict\n')] |
# -*- coding: utf-8 -*-
import scipy.linalg, numpy, pandas, functools
# import pdb
def dummy(DF, cols=None):
"""Dummy code select columns of a DataFrame."""
return pandas.concat((pandas.get_dummies(DF[col]) for col in (DF.columns if cols is None else cols)),
axis=1, keys = DF.columns)
def _mul(*args):
"""An internal method to multiply matrices."""
return functools.reduce(numpy.dot, args)
class mca:
"""Run MCA on selected columns of a pandas DataFrame.
If the column are specified, assume that they hold
categorical variables that need to be replaced with
dummy indicators, otherwise process the DataFrame as is.
'cols': The columns of the DataFrame to process.
'K': The number of columns before dummy coding. To be passed if cols isn't.
'benzecri': Perform Benzécri correction (default: True)
'TOL': value below which to round eigenvalues to zero
"""
def __init__(self, DF, cols=None, ncols=None, benzecri=True, TOL=1e-4):
if cols: # if you want us to do the dummy coding
K = len(cols) # the number of categories
X = dummy(DF, cols)
else: # if you want to dummy code it yourself or do all the cols
K = ncols
if ncols is None: # be sure to pass K if you did not multi-index
K = len(DF.columns) # ... it with mca.dummy()
if not K: raise ValueError("Your DataFrame has no columns.")
elif not isinstance(ncols, int) or ncols<=0 or ncols>len(DF.columns): # if you dummy coded it yourself
raise ValueError("You must pass a valid number of columns.")
X = DF
S = X.sum().sum()
Z = X/S # correspondence matrix
self.r = Z.sum(axis=1)
self.c = Z.sum()
self._numitems = len(DF)
self.cor = benzecri
self.D_r = numpy.diag(1/numpy.sqrt(self.r))
Z_c = Z - numpy.outer(self.r,self.c) # standardized residuals matrix
self.D_c = numpy.diag(1/numpy.sqrt(self.c))
# another option, not pursued here, is sklearn.decomposition.TruncatedSVD
self.P, self.s, self.Q = scipy.linalg.svd(_mul(self.D_r, Z_c, self.D_c))
if benzecri: self.E = numpy.array([(K/(K-1)*(_ - 1/K))**2
if _ > 1/K else 0 for _ in self.s**2])
self.inertia = self.E.sum() if benzecri else sum(self.s**2)
self.rank = numpy.argmax((self.E if benzecri else self.s**2) < TOL)
self.L = (self.E if benzecri else self.s**2)[:self.rank]
def fs_r(self, percent=0.9, N=None):
"""Get the row factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored.
"""
if not 0 <= percent <= 1:
raise ValueError("Percent should be a real number between 0 and 1.")
if N:
if not isinstance(N, (int, numpy.int64)) or N<=0:
raise ValueError("N should be a positive integer.")
N = min(N, self.rank)
# S = numpy.zeros((self._numitems, N))
# else:
self.k = 1 + numpy.flatnonzero(numpy.cumsum(self.L) >= sum(self.L)*percent)[0]
# S = numpy.zeros((self._numitems, self.k))
# the sign of the square root can be either way; singular value vs. eigenvalue
# numpy.fill_diagonal(S, -numpy.sqrt(self.E) if self.cor else self.s)
num2ret = N if N else self.k
s = -numpy.sqrt(self.L) if self.cor else self.s
S = scipy.linalg.diagsvd(s[:num2ret], self._numitems, num2ret)
self.F = _mul(self.D_r, self.P, S)
return self.F
def fs_c(self, percent=0.9, N=None):
"""Get the column factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored.
"""
if not 0 <= percent <= 1:
raise ValueError("Percent should be a real number between 0 and 1.")
if N:
if not isinstance(N, (int, numpy.int64)) or N<=0:
raise ValueError("N should be a positive integer.")
N = min(N, self.rank) # maybe we should notify the user?
# S = numpy.zeros((self._numitems, N))
# else:
self.k = 1 + numpy.flatnonzero(numpy.cumsum(self.L) >= sum(self.L)*percent)[0]
# S = numpy.zeros((self._numitems, self.k))
# the sign of the square root can be either way; singular value vs. eigenvalue
# numpy.fill_diagonal(S, -numpy.sqrt(self.E) if self.cor else self.s)
num2ret = N if N else self.k
s = -numpy.sqrt(self.L) if self.cor else self.s
S = scipy.linalg.diagsvd(s[:num2ret], len(self.Q), num2ret)
self.G = _mul(self.D_c, self.Q.T, S) # important! note the transpose on Q
return self.G
def cos_r(self, N=None): #percent=0.9,
"""Return the squared cosines for each row."""
if not hasattr(self, 'F') or self.F.shape[1] < self.rank:
self.fs_r(N=self.rank) # generate F
self.dr = numpy.linalg.norm(self.F, axis=1)**2
# cheaper than numpy.diag(self.F.dot(self.F.T))?
return numpy.apply_along_axis(lambda _: _/self.dr, 0, self.F[:,:N]**2)
def cos_c(self, N=None): #percent=0.9,
"""Return the squared cosines for each column."""
if not hasattr(self, 'G') or self.G.shape[1] < self.rank:
self.fs_c(N=self.rank) # generate G
self.dc = numpy.linalg.norm(self.G, axis=1)**2
# cheaper than numpy.diag(self.G.dot(self.G.T))?
return numpy.apply_along_axis(lambda _: _/self.dc, 0, self.G[:,:N]**2)
def cont_r(self, percent=0.9, N=None):
"""Return the contribution of each row."""
if not hasattr(self, 'F'): self.fs_r(N=self.rank) # generate F
return numpy.apply_along_axis(lambda _: _/self.L[:N], 1,
numpy.apply_along_axis(lambda _: _*self.r, 0, self.F[:,:N]**2))
def cont_c(self, percent=0.9, N=None): # bug? check axis number 0 vs 1 here
"""Return the contribution of each row."""
if not hasattr(self, 'G'): self.fs_c(N=self.rank) # generate G
return numpy.apply_along_axis(lambda _: _/self.L[:N], 1,
numpy.apply_along_axis(lambda _: _*self.c, 0, self.G[:,:N]**2))
def fs_r_sup(self, DF, ncols=None):
"""Find the supplementary row factor scores.
ncols: The number of singular vectors to retain.
If both are passed, cols is given preference.
"""
if not hasattr(self, 'G'): self.fs_c(N=self.rank) # generate G
if ncols and (not isinstance(ncols, int) or ncols<=0):
raise ValueError("ncols should be a positive integer.")
s = -numpy.sqrt(self.E) if self.cor else self.s
N = min(ncols, self.rank) if ncols else self.rank
S_inv = scipy.linalg.diagsvd(-1/s[:N], len(self.G.T), N)
# S = scipy.linalg.diagsvd(s[:N], len(self.tau), N)
return _mul(DF.div(DF.sum(axis=1), axis=0), self.G, S_inv)[:,:N]
def fs_c_sup(self, DF, ncols=None):
"""Find the supplementary column factor scores.
ncols: The number of singular vectors to retain.
If both are passed, cols is given preference.
"""
if not hasattr(self, 'F'): self.fs_r(N=self.rank) # generate F
if ncols and (not isinstance(ncols, int) or ncols<=0):
raise ValueError("ncols should be a positive integer.")
s = -numpy.sqrt(self.E) if self.cor else self.s
N = min(ncols, self.rank) if ncols else self.rank
S_inv = scipy.linalg.diagsvd(-1/s[:N], len(self.F.T), N)
# S = scipy.linalg.diagsvd(s[:N], len(self.tau), N)
return _mul((DF/DF.sum()).T, self.F, S_inv)[:,:N] | [
"numpy.outer",
"numpy.argmax",
"pandas.get_dummies",
"numpy.apply_along_axis",
"numpy.cumsum",
"numpy.array",
"numpy.linalg.norm",
"functools.reduce",
"numpy.sqrt"
] | [((372, 405), 'functools.reduce', 'functools.reduce', (['numpy.dot', 'args'], {}), '(numpy.dot, args)\n', (388, 405), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((2165, 2222), 'numpy.argmax', 'numpy.argmax', (['((self.E if benzecri else self.s ** 2) < TOL)'], {}), '((self.E if benzecri else self.s ** 2) < TOL)\n', (2177, 2222), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((5052, 5120), 'numpy.apply_along_axis', 'numpy.apply_along_axis', (['(lambda _: _ / self.dr)', '(0)', '(self.F[:, :N] ** 2)'], {}), '(lambda _: _ / self.dr, 0, self.F[:, :N] ** 2)\n', (5074, 5120), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((5421, 5489), 'numpy.apply_along_axis', 'numpy.apply_along_axis', (['(lambda _: _ / self.dc)', '(0)', '(self.G[:, :N] ** 2)'], {}), '(lambda _: _ / self.dc, 0, self.G[:, :N] ** 2)\n', (5443, 5489), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((184, 211), 'pandas.get_dummies', 'pandas.get_dummies', (['DF[col]'], {}), '(DF[col])\n', (202, 211), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((1725, 1752), 'numpy.outer', 'numpy.outer', (['self.r', 'self.c'], {}), '(self.r, self.c)\n', (1736, 1752), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((2009, 2103), 'numpy.array', 'numpy.array', (['[((K / (K - 1) * (_ - 1 / K)) ** 2 if _ > 1 / K else 0) for _ in self.s ** 2]'], {}), '([((K / (K - 1) * (_ - 1 / K)) ** 2 if _ > 1 / K else 0) for _ in\n self.s ** 2])\n', (2020, 2103), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((4954, 4987), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.F'], {'axis': '(1)'}), '(self.F, axis=1)\n', (4971, 4987), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((5323, 5356), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.G'], {'axis': '(1)'}), '(self.G, axis=1)\n', (5340, 5356), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((5702, 5769), 'numpy.apply_along_axis', 'numpy.apply_along_axis', (['(lambda _: _ * self.r)', '(0)', '(self.F[:, :N] ** 2)'], {}), '(lambda _: _ * self.r, 0, self.F[:, :N] ** 2)\n', (5724, 5769), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((6018, 6085), 'numpy.apply_along_axis', 'numpy.apply_along_axis', (['(lambda _: _ * self.c)', '(0)', '(self.G[:, :N] ** 2)'], {}), '(lambda _: _ * self.c, 0, self.G[:, :N] ** 2)\n', (6040, 6085), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((1693, 1711), 'numpy.sqrt', 'numpy.sqrt', (['self.r'], {}), '(self.r)\n', (1703, 1711), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((1810, 1828), 'numpy.sqrt', 'numpy.sqrt', (['self.c'], {}), '(self.c)\n', (1820, 1828), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((3314, 3332), 'numpy.sqrt', 'numpy.sqrt', (['self.L'], {}), '(self.L)\n', (3324, 3332), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((4552, 4570), 'numpy.sqrt', 'numpy.sqrt', (['self.L'], {}), '(self.L)\n', (4562, 4570), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((6462, 6480), 'numpy.sqrt', 'numpy.sqrt', (['self.E'], {}), '(self.E)\n', (6472, 6480), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((7124, 7142), 'numpy.sqrt', 'numpy.sqrt', (['self.E'], {}), '(self.E)\n', (7134, 7142), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((3026, 3046), 'numpy.cumsum', 'numpy.cumsum', (['self.L'], {}), '(self.L)\n', (3038, 3046), False, 'import scipy.linalg, numpy, pandas, functools\n'), ((4264, 4284), 'numpy.cumsum', 'numpy.cumsum', (['self.L'], {}), '(self.L)\n', (4276, 4284), False, 'import scipy.linalg, numpy, pandas, functools\n')] |
import os
import time
import argparse
import hashlib
import numpy as np
import pickle
import torch
from utils.loss import get_loss
from model.factory import build_model
from utils.config import load_config, load_and_update_eval_config
from utils.checkpoint import load_state_dict, set_seed_and_random_states
from utils.logging import print_and_log_scores
from utils.scores import compute_scores, find_best_threshold
from data.data_loader import get_dataloader
from train import batch_loop
def parse_args():
"""
Parser for the arguments.
Returns
-------
args : Namespace
The arguments.
"""
parser = argparse.ArgumentParser(description="Test one or an ensemble of CNN's")
parser.add_argument('--eval-config',
type=str,
help="Path to eval config file.")
parser.add_argument('--data-folder',
type=str,
help="Absolute path to the data folder.")
parser.add_argument('--experiment-folders',
nargs='+',
default="results/",
help="A space-separated list of absolute path to the experiment folder.")
parser.add_argument('--ignore-cache',
dest='ignore_cache',
action='store_true',
default=False,
help="Whether to compute the labels/outputs ignoring evaluation cache.")
parser.add_argument('-t', '--thresholding',
dest='thresholding',
action='store_true',
default=False,
help="Whether to compute the scores using best threshold on validation set.")
parser.add_argument('-f', '--use-full-dataset',
dest='use_full_dataset',
action='store_true',
default=False,
help="Compute scores on the entire given dataset instead of only the test set.")
args = parser.parse_args()
return args
def compute_and_print_scores(labels, prediction, splits, scores_for_thresholding, suffix=''):
"""
Compute and print scores on valid and test.
Parameters
----------
labels: dict
test_y_true : array
Test true labels.
test_y_true_5_classes : array
Test true 5 classes labels.
valid_y_true : array
Valid true labels.
valid_y_true_5_classes : array
Valid true 5 classes labels.
raw_labels : list
List of labels.
prediction: dict
test_y_proba : array
Test predicted probabilities.
valid_y_proba : array
Valid predicted probabilities.
splits : list
List of splits.
scores_for_thresholding : list
List of score to use for thresholding.
suffix : str
Print suffix (e.g. ' ENSEMBLE'). Default ''.
"""
threshold = None
for score_for_thresholding in scores_for_thresholding:
if score_for_thresholding is not None:
print(f"\n\nFinding best threshold on VALID{suffix} optimizing for {score_for_thresholding} ...", end=" ")
start_time = time.time()
threshold = find_best_threshold(labels['valid_y_true'],
prediction['valid_y_proba'],
raw_labels=labels['raw_labels'],
score_type=score_for_thresholding)
time_elapsed = time.time() - start_time
print(f"Completed in {time_elapsed//60:.0f}m {time_elapsed%60:.0f}s")
print(f"Best threshold for {score_for_thresholding} on VALID {suffix}: {threshold}")
# Compute and print score
for split in splits:
map_to_binary = False
print_suffix = suffix
for i in range(2): # Needed to map to binary
map_to_5_classes = len(labels['raw_labels']) == 2
scores = compute_scores(labels[f'{split}_y_true'],
prediction[f'{split}_y_proba'],
mode=split,
raw_labels=labels['raw_labels'],
threshold=threshold,
map_to_binary=map_to_binary,
map_to_5_classes=map_to_5_classes,
y_true_5_classes=labels[f'{split}_y_true_5_classes'])
print(f"\nModel {split.upper()}{print_suffix} (thresholding: {str(score_for_thresholding)}) Scores:")
print_and_log_scores(scores, log=False)
map_to_binary = len(labels['raw_labels']) > 2
print_suffix = ' BINARY' + suffix
if not map_to_binary:
break
def _load_and_update_config(args, experiment_folder, verbose):
# add config to args to be able to load config from given location
args.config = os.path.join(experiment_folder, 'cfg.yml')
cfg = load_config(args, test_mode=True)
cfg['experiment_folder'] = experiment_folder
# Fix for config pre commit c63337cbc53cc<PASSWORD>18fb6a24820794ca1e2b9
if 'feature_scaling' not in cfg['dataset']:
cfg['dataset']['feature_scaling'] = "MaskedStandardization"
# Fix for config pre commit <PASSWORD>
if 'filter_target' not in cfg['dataset']['train']:
cfg['dataset']['train']['filter_target'] = False
# Fix for config pre commit d<PASSWORD>9<PASSWORD>8<PASSWORD>3<PASSWORD>
if 'patience_metrics' not in cfg['training']:
cfg['training']['patience_metrics'] = ['quadratic_weighted_kappa']
# Fix for config pre commit e744fc6e696c4bc1ec7b70421d6445666ff431a7
if 'n_views' not in cfg['dataset']['train']:
cfg['dataset']['train']['n_views'] = cfg['dataset']['n_views']
cfg['dataset']['eval']['n_views'] = cfg['dataset']['n_views']
cfg['dataset']['eval']['apply_train_augmentations'] = False
if args.eval_config:
cfg = load_and_update_eval_config(args.eval_config, cfg, verbose=verbose)
return cfg
def main_test():
# Load config file
args = parse_args()
# Current device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device: {device}")
# Check that all models in the ensemble have been trained using the same target
target_names = []
for experiment_folder in args.experiment_folders:
cfg = _load_and_update_config(args, experiment_folder, verbose=False)
target_names.append(cfg['dataset']['target_name'])
if len(set(target_names)) != 1:
raise ValueError("target_name should be the same for all model in the ensemble")
# Train exact is train data set using valid data augmentations
splits = ['train_exact', 'valid', 'test']
scores_for_thresholding = [None] # To compute the score without thresholding
if args.thresholding:
scores_for_thresholding += ['kappa', 'f1_macro']
if cfg['dataset']['target_name'] == 'screening_level':
scores_for_thresholding += ['J_statistic', 'f1_c1']
for patience_metric in cfg['training']['patience_metrics']:
print(f"\n\n#### Model trained using patience metric: {patience_metric} ####")
prediction = {}
if len(args.experiment_folders) > 1:
for split in splits:
prediction[f'{split}_y_proba_ensemble'] = []
labels = {}
for model_num, experiment_folder in enumerate(args.experiment_folders):
print(f"\n\n#### Model {model_num + 1}/{len(args.experiment_folders)} ####")
cfg = _load_and_update_config(args, experiment_folder, verbose=True)
cfg_hash = hashlib.md5(str(cfg).encode()).hexdigest()
cache_folder = os.path.join(experiment_folder, 'evaluation_cache')
labels_path = os.path.join(cache_folder, f"{cfg_hash}_labels.pkl")
prediction_path = os.path.join(cache_folder, f"{cfg_hash}_prediction.pkl")
labels_prediction_exists = all([os.path.exists(path) for path in [labels_path, prediction_path]])
if labels_prediction_exists and not args.ignore_cache:
# Do not recompute labels/prediction
print(f"\nLoading labels/prediction from the evaluation_cache folder from:\n{cfg['experiment_folder']}")
# Records labels only once (independent of preprocessing)
if model_num == 0:
with open(labels_path, 'rb') as f:
labels = pickle.load(f)
with open(prediction_path, 'rb') as f:
prediction.update(pickle.load(f))
else:
# Set seed : needed when we apply transformation at valid/test time.
set_seed_and_random_states(cfg['seed'], random_states=None, cuda=cfg['cuda'])
# Set up datasets for each model used in the ensemble as the pre-processing can be different
loaders = {}
if args.use_full_dataset:
# To save the full dataset prediction (only used when args.use_full_dataset is True)
labels['full_dataset_y_true'] = np.array([])
labels['full_dataset_y_true_5_classes'] = []
for split in splits:
print(f"\nSetup {cfg['dataset']['name']} resolution {cfg['dataset']['resolution']} {split} data set")
loaders[split] = get_dataloader(args.data_folder, cfg['dataset'], split in ['train_exact', 'valid'])[split]
print(f"Using feature scaling: {cfg['dataset']['feature_scaling']}")
print(f"# of examples: {len(loaders[split].dataset)}")
# Records labels only once (independent of preprocessing)
if model_num == 0:
labels[f'{split}_y_true'] = np.array(loaders[split].dataset.patient_target)
labels[f'{split}_y_true_5_classes'] = loaders[split].dataset.patient_target_5_classes
if args.use_full_dataset:
labels['full_dataset_y_true'] = np.concatenate((labels['full_dataset_y_true'], labels[f'{split}_y_true']))
labels['full_dataset_y_true_5_classes'] += labels[f'{split}_y_true_5_classes']
# Raw labels are the same regardless of the split
if split == 'test':
labels['raw_labels'] = loaders['test'].dataset.raw_labels
# Load model
state_fname = os.path.join(cfg['experiment_folder'], f"checkpoint_state_{patience_metric}.pth.tar")
if not os.path.exists(state_fname):
# Fix for config pre commit d23d5080bcf04d0989f856955349d3754e3e748e
state_fname = os.path.join(cfg['experiment_folder'], 'checkpoint_state.pth.tar')
if os.path.exists(state_fname):
print(f"\nLoading model from:\n{state_fname}")
state = load_state_dict(state_fname, True, device)
model = build_model(cfg['model'],
loaders['test'].dataset.num_class,
cfg['dataset']['use_both_eyes'],
device,
state['model'])
model = model.eval()
else:
raise Exception(f"There is no model weights available in this folder: {cfg['experiment_folder']}")
# Define loss function
loss_function = get_loss(cfg['loss']).to(device)
if args.use_full_dataset:
prediction['full_dataset_y_proba'] = np.empty((0, 1))
for split in splits:
print(f"\nEvaluating on {split} set...", end=" ")
start_time = time.time()
_, prediction[f'{split}_y_proba'], _ = batch_loop(loaders[split],
model,
None, # optimizer
loss_function,
device,
mode='TEST')
time_elapsed = time.time() - start_time
print(f"Completed in {time_elapsed//60:.0f}m {time_elapsed%60:.0f}s")
if args.use_full_dataset:
prediction['full_dataset_y_proba'] = np.concatenate((prediction['full_dataset_y_proba'], prediction[f'{split}_y_proba']))
# Save/overwrite labels/outputs to evaluation_cache
os.makedirs(cache_folder, exist_ok=True)
with open(labels_path, 'wb') as f:
pickle.dump(labels, f, protocol=pickle.HIGHEST_PROTOCOL)
with open(prediction_path, 'wb') as f:
pickle.dump(prediction, f, protocol=pickle.HIGHEST_PROTOCOL)
if args.use_full_dataset:
# To compute scores on full data set
splits.append('full_dataset')
if len(args.experiment_folders) > 1:
for split in splits:
prediction[f'{split}_y_proba_ensemble'].append(prediction[f'{split}_y_proba'])
compute_and_print_scores(labels, prediction, splits, scores_for_thresholding)
if len(args.experiment_folders) > 1:
print(f"\n\n### Evaluating ensemble of {len(args.experiment_folders)} models ###")
for split in splits:
# Average the prediction of the different models and update y_proba
prediction[f'{split}_y_proba'] = np.array(prediction[f'{split}_y_proba_ensemble']).mean(axis=0)
compute_and_print_scores(labels, prediction, splits, scores_for_thresholding, suffix=' ENSEMBLE')
splits.remove('full_dataset')
if __name__ == "__main__":
main_test()
| [
"pickle.dump",
"argparse.ArgumentParser",
"numpy.empty",
"pickle.load",
"utils.checkpoint.load_state_dict",
"train.batch_loop",
"utils.config.load_config",
"os.path.join",
"model.factory.build_model",
"os.path.exists",
"utils.loss.get_loss",
"utils.scores.find_best_threshold",
"utils.checkpo... | [((641, 712), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test one or an ensemble of CNN\'s"""'}), '(description="Test one or an ensemble of CNN\'s")\n', (664, 712), False, 'import argparse\n'), ((5134, 5176), 'os.path.join', 'os.path.join', (['experiment_folder', '"""cfg.yml"""'], {}), "(experiment_folder, 'cfg.yml')\n", (5146, 5176), False, 'import os\n'), ((5187, 5220), 'utils.config.load_config', 'load_config', (['args'], {'test_mode': '(True)'}), '(args, test_mode=True)\n', (5198, 5220), False, 'from utils.config import load_config, load_and_update_eval_config\n'), ((6195, 6262), 'utils.config.load_and_update_eval_config', 'load_and_update_eval_config', (['args.eval_config', 'cfg'], {'verbose': 'verbose'}), '(args.eval_config, cfg, verbose=verbose)\n', (6222, 6262), False, 'from utils.config import load_config, load_and_update_eval_config\n'), ((3271, 3282), 'time.time', 'time.time', ([], {}), '()\n', (3280, 3282), False, 'import time\n'), ((3307, 3451), 'utils.scores.find_best_threshold', 'find_best_threshold', (["labels['valid_y_true']", "prediction['valid_y_proba']"], {'raw_labels': "labels['raw_labels']", 'score_type': 'score_for_thresholding'}), "(labels['valid_y_true'], prediction['valid_y_proba'],\n raw_labels=labels['raw_labels'], score_type=score_for_thresholding)\n", (3326, 3451), False, 'from utils.scores import compute_scores, find_best_threshold\n'), ((6403, 6428), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6426, 6428), False, 'import torch\n'), ((7986, 8037), 'os.path.join', 'os.path.join', (['experiment_folder', '"""evaluation_cache"""'], {}), "(experiment_folder, 'evaluation_cache')\n", (7998, 8037), False, 'import os\n'), ((8064, 8116), 'os.path.join', 'os.path.join', (['cache_folder', 'f"""{cfg_hash}_labels.pkl"""'], {}), "(cache_folder, f'{cfg_hash}_labels.pkl')\n", (8076, 8116), False, 'import os\n'), ((8147, 8203), 'os.path.join', 'os.path.join', (['cache_folder', 'f"""{cfg_hash}_prediction.pkl"""'], {}), "(cache_folder, f'{cfg_hash}_prediction.pkl')\n", (8159, 8203), False, 'import os\n'), ((3607, 3618), 'time.time', 'time.time', ([], {}), '()\n', (3616, 3618), False, 'import time\n'), ((4092, 4361), 'utils.scores.compute_scores', 'compute_scores', (["labels[f'{split}_y_true']", "prediction[f'{split}_y_proba']"], {'mode': 'split', 'raw_labels': "labels['raw_labels']", 'threshold': 'threshold', 'map_to_binary': 'map_to_binary', 'map_to_5_classes': 'map_to_5_classes', 'y_true_5_classes': "labels[f'{split}_y_true_5_classes']"}), "(labels[f'{split}_y_true'], prediction[f'{split}_y_proba'],\n mode=split, raw_labels=labels['raw_labels'], threshold=threshold,\n map_to_binary=map_to_binary, map_to_5_classes=map_to_5_classes,\n y_true_5_classes=labels[f'{split}_y_true_5_classes'])\n", (4106, 4361), False, 'from utils.scores import compute_scores, find_best_threshold\n'), ((4764, 4803), 'utils.logging.print_and_log_scores', 'print_and_log_scores', (['scores'], {'log': '(False)'}), '(scores, log=False)\n', (4784, 4803), False, 'from utils.logging import print_and_log_scores\n'), ((8996, 9073), 'utils.checkpoint.set_seed_and_random_states', 'set_seed_and_random_states', (["cfg['seed']"], {'random_states': 'None', 'cuda': "cfg['cuda']"}), "(cfg['seed'], random_states=None, cuda=cfg['cuda'])\n", (9022, 9073), False, 'from utils.checkpoint import load_state_dict, set_seed_and_random_states\n'), ((10824, 10913), 'os.path.join', 'os.path.join', (["cfg['experiment_folder']", 'f"""checkpoint_state_{patience_metric}.pth.tar"""'], {}), "(cfg['experiment_folder'],\n f'checkpoint_state_{patience_metric}.pth.tar')\n", (10836, 10913), False, 'import os\n'), ((11172, 11199), 'os.path.exists', 'os.path.exists', (['state_fname'], {}), '(state_fname)\n', (11186, 11199), False, 'import os\n'), ((13126, 13166), 'os.makedirs', 'os.makedirs', (['cache_folder'], {'exist_ok': '(True)'}), '(cache_folder, exist_ok=True)\n', (13137, 13166), False, 'import os\n'), ((8248, 8268), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8262, 8268), False, 'import os\n'), ((9412, 9424), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9420, 9424), True, 'import numpy as np\n'), ((10933, 10960), 'os.path.exists', 'os.path.exists', (['state_fname'], {}), '(state_fname)\n', (10947, 10960), False, 'import os\n'), ((11085, 11151), 'os.path.join', 'os.path.join', (["cfg['experiment_folder']", '"""checkpoint_state.pth.tar"""'], {}), "(cfg['experiment_folder'], 'checkpoint_state.pth.tar')\n", (11097, 11151), False, 'import os\n'), ((11296, 11338), 'utils.checkpoint.load_state_dict', 'load_state_dict', (['state_fname', '(True)', 'device'], {}), '(state_fname, True, device)\n', (11311, 11338), False, 'from utils.checkpoint import load_state_dict, set_seed_and_random_states\n'), ((11367, 11489), 'model.factory.build_model', 'build_model', (["cfg['model']", "loaders['test'].dataset.num_class", "cfg['dataset']['use_both_eyes']", 'device', "state['model']"], {}), "(cfg['model'], loaders['test'].dataset.num_class, cfg['dataset']\n ['use_both_eyes'], device, state['model'])\n", (11378, 11489), False, 'from model.factory import build_model\n'), ((12032, 12048), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (12040, 12048), True, 'import numpy as np\n'), ((12189, 12200), 'time.time', 'time.time', ([], {}), '()\n', (12198, 12200), False, 'import time\n'), ((12260, 12335), 'train.batch_loop', 'batch_loop', (['loaders[split]', 'model', 'None', 'loss_function', 'device'], {'mode': '"""TEST"""'}), "(loaders[split], model, None, loss_function, device, mode='TEST')\n", (12270, 12335), False, 'from train import batch_loop\n'), ((13238, 13294), 'pickle.dump', 'pickle.dump', (['labels', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(labels, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (13249, 13294), False, 'import pickle\n'), ((13370, 13430), 'pickle.dump', 'pickle.dump', (['prediction', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(prediction, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (13381, 13430), False, 'import pickle\n'), ((8752, 8766), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8763, 8766), False, 'import pickle\n'), ((8860, 8874), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8871, 8874), False, 'import pickle\n'), ((9686, 9773), 'data.data_loader.get_dataloader', 'get_dataloader', (['args.data_folder', "cfg['dataset']", "(split in ['train_exact', 'valid'])"], {}), "(args.data_folder, cfg['dataset'], split in ['train_exact',\n 'valid'])\n", (9700, 9773), False, 'from data.data_loader import get_dataloader\n'), ((10110, 10157), 'numpy.array', 'np.array', (['loaders[split].dataset.patient_target'], {}), '(loaders[split].dataset.patient_target)\n', (10118, 10157), True, 'import numpy as np\n'), ((11899, 11920), 'utils.loss.get_loss', 'get_loss', (["cfg['loss']"], {}), "(cfg['loss'])\n", (11907, 11920), False, 'from utils.loss import get_loss\n'), ((12734, 12745), 'time.time', 'time.time', ([], {}), '()\n', (12743, 12745), False, 'import time\n'), ((12956, 13045), 'numpy.concatenate', 'np.concatenate', (["(prediction['full_dataset_y_proba'], prediction[f'{split}_y_proba'])"], {}), "((prediction['full_dataset_y_proba'], prediction[\n f'{split}_y_proba']))\n", (12970, 13045), True, 'import numpy as np\n'), ((14154, 14203), 'numpy.array', 'np.array', (["prediction[f'{split}_y_proba_ensemble']"], {}), "(prediction[f'{split}_y_proba_ensemble'])\n", (14162, 14203), True, 'import numpy as np\n'), ((10378, 10452), 'numpy.concatenate', 'np.concatenate', (["(labels['full_dataset_y_true'], labels[f'{split}_y_true'])"], {}), "((labels['full_dataset_y_true'], labels[f'{split}_y_true']))\n", (10392, 10452), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from nose.plugins.attrib import attr
from nose.tools import assert_raises, raises
import numpy as np
import pandas as pd
from ..match import Match, whichMatched
from ..data import gerber_green_imai
def create_example_matches(method='one-to-one'):
np.random.seed(23456)
match=Match(groups=[1,2,1,2], propensity=[0.1, 0.2, 0.15, 0.11])
match.create(method)
return match
@raises(AssertionError)
def test_match_errors():
match=Match(groups=[1,2,1,2], propensity=[0.1, 0.2, 0.15, 0.22, 0.11])
@raises(AssertionError)
def test_match_errors():
match=Match(groups=[1,2,1,3], propensity=[0.1, 0.2, 0.15, 0.22])
@raises(AssertionError)
def test_match_errors():
match=Match(groups=[1,2,1,2], propensity=[-0.1, 0.2, 0.15, 0.11])
@raises(ValueError)
def test_match_errors():
match=Match(groups=[1,2,1,2], propensity=[0.1, 0.2, 0.15, 0.11])
match.create(method='something made up')
@raises(AssertionError)
def test_match_errors():
match=Match(groups=[1,2,1,2], propensity=[-0.1, 0.2, 0.15, 0.11])
match.create(method='many-to-one', caliper_method=None, many_method='caliper')
def test_match_onetoone():
match = create_example_matches()
expected_matches = {'match_pairs' : {0:3, 2:1},
'treated' : np.array([0, 2]),
'control' : np.array([1, 3]),
'dropped' : np.array([])}
np.testing.assert_equal(match.matches, expected_matches)
np.testing.assert_equal(match.weights, np.ones(4))
match.create(caliper_scale='propensity', caliper=0.01)
expected_matches = {'match_pairs' : {0:3},
'treated' : np.array([0]),
'control' : np.array([3]),
'dropped' : np.array([1, 2])}
np.testing.assert_equal(match.matches, expected_matches)
np.testing.assert_equal(match.weights, np.array([1,0,0,1]))
match.create(replace=True)
expected_matches = {'match_pairs' : {0:3, 2:3},
'treated' : np.array([0, 2]),
'control' : np.array([3]),
'dropped' : np.array([1])}
np.testing.assert_equal(match.matches, expected_matches)
np.testing.assert_equal(match.weights, np.array([1,0,1,2]))
def test_match_manytoone():
match = create_example_matches(method='many-to-one')
expected_matches = {'match_pairs' : {0:np.array([3]), 2: np.array([3])},
'treated' : np.array([0,2]),
'control' : np.array([3]),
'dropped' : np.array([1])}
np.testing.assert_equal(match.matches, expected_matches)
np.testing.assert_equal(match.weights, np.array([1,0,1,2]))
match.create(method='many-to-one', caliper_scale='logit', replace=False)
expected_matches = {'match_pairs' : {0:np.array([1]), 2: np.array([3])},
'treated' : np.array([0,2]),
'control' : np.array([1,3]),
'dropped' : np.array([])}
np.testing.assert_equal(match.matches, expected_matches)
np.testing.assert_equal(match.weights, np.ones(4))
match.create(method='many-to-one', many_method='knn', k=2, replace=True)
expected_matches = {'match_pairs' : {0:np.array([3,1]), 2: np.array([3,1])},
'treated' : np.array([0,2]),
'control' : np.array([1,3]),
'dropped' : np.array([])}
np.testing.assert_equal(match.matches, expected_matches)
np.testing.assert_equal(match.weights, np.ones(4))
@raises(ValueError)
def test_match_plot_inputs():
match = create_example_matches(method='one-to-one')
match.plot_balance(pd.DataFrame([0.1, 0.2, 0.15, 0.11]), test='fake')
def test_whichMatched():
df = pd.DataFrame([0.1, 0.2, 0.15, 0.11])
match = create_example_matches(method='many-to-one')
res = whichMatched(match, df)
np.testing.assert_equal(list(res[0]), list(df.ix[[0,2,3,3]][0]))
res = whichMatched(match, df, show_duplicates=False)
np.testing.assert_equal(list(res[0]), list(df.ix[[0,2,3]][0]))
np.testing.assert_equal(list(res.frequency), [1,1,2])
match.create(method='many-to-one', caliper_scale='logit', replace=False)
res = whichMatched(match, df)
np.testing.assert_equal(list(res[0]), list(df[0])) | [
"pandas.DataFrame",
"numpy.random.seed",
"numpy.ones",
"numpy.array",
"numpy.testing.assert_equal",
"nose.tools.raises"
] | [((499, 521), 'nose.tools.raises', 'raises', (['AssertionError'], {}), '(AssertionError)\n', (505, 521), False, 'from nose.tools import assert_raises, raises\n'), ((628, 650), 'nose.tools.raises', 'raises', (['AssertionError'], {}), '(AssertionError)\n', (634, 650), False, 'from nose.tools import assert_raises, raises\n'), ((751, 773), 'nose.tools.raises', 'raises', (['AssertionError'], {}), '(AssertionError)\n', (757, 773), False, 'from nose.tools import assert_raises, raises\n'), ((875, 893), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (881, 893), False, 'from nose.tools import assert_raises, raises\n'), ((1036, 1058), 'nose.tools.raises', 'raises', (['AssertionError'], {}), '(AssertionError)\n', (1042, 1058), False, 'from nose.tools import assert_raises, raises\n'), ((3720, 3738), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (3726, 3738), False, 'from nose.tools import assert_raises, raises\n'), ((364, 385), 'numpy.random.seed', 'np.random.seed', (['(23456)'], {}), '(23456)\n', (378, 385), True, 'import numpy as np\n'), ((1516, 1572), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['match.matches', 'expected_matches'], {}), '(match.matches, expected_matches)\n', (1539, 1572), True, 'import numpy as np\n'), ((1899, 1955), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['match.matches', 'expected_matches'], {}), '(match.matches, expected_matches)\n', (1922, 1955), True, 'import numpy as np\n'), ((2272, 2328), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['match.matches', 'expected_matches'], {}), '(match.matches, expected_matches)\n', (2295, 2328), True, 'import numpy as np\n'), ((2719, 2775), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['match.matches', 'expected_matches'], {}), '(match.matches, expected_matches)\n', (2742, 2775), True, 'import numpy as np\n'), ((3159, 3215), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['match.matches', 'expected_matches'], {}), '(match.matches, expected_matches)\n', (3182, 3215), True, 'import numpy as np\n'), ((3598, 3654), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['match.matches', 'expected_matches'], {}), '(match.matches, expected_matches)\n', (3621, 3654), True, 'import numpy as np\n'), ((3939, 3975), 'pandas.DataFrame', 'pd.DataFrame', (['[0.1, 0.2, 0.15, 0.11]'], {}), '([0.1, 0.2, 0.15, 0.11])\n', (3951, 3975), True, 'import pandas as pd\n'), ((1390, 1406), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (1398, 1406), True, 'import numpy as np\n'), ((1444, 1460), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1452, 1460), True, 'import numpy as np\n'), ((1498, 1510), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1506, 1510), True, 'import numpy as np\n'), ((1616, 1626), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1623, 1626), True, 'import numpy as np\n'), ((1775, 1788), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1783, 1788), True, 'import numpy as np\n'), ((1826, 1839), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (1834, 1839), True, 'import numpy as np\n'), ((1877, 1893), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1885, 1893), True, 'import numpy as np\n'), ((1999, 2021), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (2007, 2021), True, 'import numpy as np\n'), ((2148, 2164), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (2156, 2164), True, 'import numpy as np\n'), ((2202, 2215), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2210, 2215), True, 'import numpy as np\n'), ((2253, 2266), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2261, 2266), True, 'import numpy as np\n'), ((2372, 2394), 'numpy.array', 'np.array', (['[1, 0, 1, 2]'], {}), '([1, 0, 1, 2])\n', (2380, 2394), True, 'import numpy as np\n'), ((2596, 2612), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (2604, 2612), True, 'import numpy as np\n'), ((2649, 2662), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2657, 2662), True, 'import numpy as np\n'), ((2700, 2713), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2708, 2713), True, 'import numpy as np\n'), ((2819, 2841), 'numpy.array', 'np.array', (['[1, 0, 1, 2]'], {}), '([1, 0, 1, 2])\n', (2827, 2841), True, 'import numpy as np\n'), ((3035, 3051), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (3043, 3051), True, 'import numpy as np\n'), ((3088, 3104), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (3096, 3104), True, 'import numpy as np\n'), ((3141, 3153), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3149, 3153), True, 'import numpy as np\n'), ((3263, 3273), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3270, 3273), True, 'import numpy as np\n'), ((3474, 3490), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (3482, 3490), True, 'import numpy as np\n'), ((3527, 3543), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (3535, 3543), True, 'import numpy as np\n'), ((3580, 3592), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3588, 3592), True, 'import numpy as np\n'), ((3702, 3712), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3709, 3712), True, 'import numpy as np\n'), ((3848, 3884), 'pandas.DataFrame', 'pd.DataFrame', (['[0.1, 0.2, 0.15, 0.11]'], {}), '([0.1, 0.2, 0.15, 0.11])\n', (3860, 3884), True, 'import pandas as pd\n'), ((2526, 2539), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2534, 2539), True, 'import numpy as np\n'), ((2544, 2557), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2552, 2557), True, 'import numpy as np\n'), ((2965, 2978), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2973, 2978), True, 'import numpy as np\n'), ((2983, 2996), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2991, 2996), True, 'import numpy as np\n'), ((3400, 3416), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (3408, 3416), True, 'import numpy as np\n'), ((3420, 3436), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (3428, 3436), True, 'import numpy as np\n')] |
#####################
##### UNIT TEST #####
#####################
import unittest
import numpy as np
import pandas as pd
#from ranky import *
import ranky as rk
def test_distance_matrix():
print('Distance matrix...')
m_template = pd.read_csv('data/matrix.csv')
m_template.index = m_template['index']
m_template = m_template.drop('index', axis=1).rename_axis(None, axis = 0)
print('Default')
dist_matrix = rk.distance_matrix(m_template)
print(dist_matrix)
print('Levenshtein')
dist_matrix = rk.distance_matrix(m_template, method='levenshtein')
print(dist_matrix)
def test_generator():
print('Testing generator...')
G = rk.Generator()
R = [5, 4, 3, 2, 1]
G.fit(R)
print('Judgement matrix')
m = G.sample(n=9)
print(m)
print('Score ranking')
r = rk.score(m)
print(r)
distance = rk.dist(rk.rank(R), rk.rank(r))
correlation = rk.corr(R, r)
print('hamming distance: {}'.format(distance))
print('kendall tau correlation: {}'.format(correlation))
print('Uninominal ranking')
r = rk.uninominal(m)
print(r)
distance = rk.dist(rk.rank(R), rk.rank(r))
correlation = rk.corr(R, r)
print('hamming distance: {}'.format(distance))
print('kendall tau correlation: {}'.format(correlation))
def test_metric():
""" This simply test if the metrics got computed withtout error.
It is not an unit testing.
"""
print('Testing metrics...')
y_true = pd.read_csv('data/test_metric/task.solution', sep=' ', header=None)
y_pred = pd.read_csv('data/test_metric/task.predict', sep=' ', header=None)
y_pred_proba = pd.read_csv('data/test_metric/task_proba.predict', sep=' ', header=None)
for m in ['accuracy', 'balanced_accuracy', 'precision', 'average_precision', 'f1_score', 'mxe', 'recall', 'jaccard', 'roc_auc', 'mse', 'rmse']:
try:
print('{}: {}'.format(m, rk.metric(y_true, y_pred, method=m)))
print('{}: {}'.format(m, rk.metric(y_true, y_pred_proba, method=m)))
except Exception as e:
print('Failed for {}'.format(m))
print(e)
print('Combined loss (SAR by default)')
print('{}'.format(rk.combined_metric(y_true, y_pred)))
print('{}'.format(rk.combined_metric(y_true, y_pred_proba)))
def test_utilities():
print('Testing utilities...')
leaderboard = rk.read_codalab_csv('data/chems.csv')
print(leaderboard.head())
class Test(unittest.TestCase):
M = np.array([[0.3, 0.4, 0.6], [0.8, 0.8, 0.8], [0.1, 0.5, 0.7], [0.2, 0.2, 0.2], [0, 0, 0]])
def test_rank(self):
rank_M = np.array([[2., 3., 3.], [1., 1., 1.], [4., 2., 2.], [3., 4., 4.], [5., 5., 5.]])
np.testing.assert_array_equal(rk.rank(self.__class__.M), rank_M)
def test_borda(self):
np.testing.assert_array_almost_equal(rk.borda(self.__class__.M), np.array([2.66666667, 1., 2.66666667, 3.66666667, 5.]))
def test_majority(self):
np.testing.assert_array_equal(rk.majority(self.__class__.M), np.array([0.4, 0.8, 0.5, 0.2, 0.]))
def test_condorcet(self):
np.testing.assert_array_equal(rk.condorcet(self.__class__.M), np.array([2., 4., 3., 1., 0.]))
def test_condorcet2(self):
np.testing.assert_array_equal(rk.condorcet(self.__class__.M, wins=rk.p_wins, pval=0.2), np.array([0., 0., 0., 0., 0.]))
def test_consensus(self):
rank_M = np.array([[1., 2., 3.], [1., 3., 2.], [1., 2., 3.]])
np.testing.assert_array_equal(rk.consensus(rank_M, axis=1), np.array([True, False, False]))
def test_winner_distance(self):
self.assertEqual(rk.dist([1, 2, 3], [2, 1, 3], method='winner'), 1)
def test_optimal_spearman_is_borda(self):
""" Check that Borda count and Spearman optimal rank aggregation returns the same output on the template matrix.
"""
m_template = pd.read_csv('data/matrix.csv')
m_template.index = m_template['index']
m_template = m_template.drop('index', axis=1).rename_axis(None, axis = 0)
borda_rank = rk.rank(rk.borda(m_template), reverse=True)
optimal_spearman_rank = rk.rank(rk.center(m_template, method='spearman'))
print(borda_rank)
print(optimal_spearman_rank)
np.testing.assert_array_equal(borda_rank, optimal_spearman_rank)
def test_kendall_w(self):
M2 = np.array([[1, 2.5, 2.5, 4], [1, 2.5, 2.5, 4], [1, 2.5, 2.5, 4]])
M3 = np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])
self.assertEqual(rk.kendall_w(M2), 0.9)
self.assertEqual(rk.kendall_w(M2, ties=True), 1.0)
self.assertEqual(rk.kendall_w(M3), 0.0)
def test_bayes_wins(self):
a = [0, 0, 0.2, 0, 0.3]
b = [1, 0.8, 1, 0.2, 0.2]
bw = rk.bayes_wins(a, b)
self.assertEqual(bw, False)
def test_relative_difference(self):
rd = rk.relative_difference([0, 0, 1], [0, 0, 1])
self.assertEqual(rd, 0)
rd = rk.relative_difference([0, 0], [0, 0])
self.assertEqual(rd, 0)
rd = rk.relative_difference([0.8, 0.1, 0.8], [0.2, 0.1, 0.2])
self.assertAlmostEqual(rd, 0.4)
def test_winner_distance(self):
d = rk.winner_distance([1, 0.7, 0.2, 0.1, 0.1], [0.7, 1, 0.5, 0.4, 0.1])
self.assertEqual(d, 0.25)
if __name__ == '__main__':
print('Compute various measures...')
m = np.array([[1, 2, 3, 4], [1, 2, 4, 3], [1, 2, 4, 3], [1, 3, 2, 4], [2, 1, 3, 4], [1, 4, 3, 2]])
#print(evolution_strategy(m, axis=1, l=5))
print('Matrix:\n{}'.format(m))
print('Concordance: {}'.format(rk.concordance(m, axis=0)))
print('Kendall W: {}'.format(rk.kendall_w(m, axis=0)))
print('Concordance (axis=1): {}'.format(rk.concordance(m, axis=1)))
print('Kendall W (axis=1): {}'.format(rk.kendall_w(m, axis=1)))
print('Euclidean center method: {}'.format(rk.center(m, method='euclidean', axis=0)))
print('Pearson center method: {}'.format(rk.center(m, method='pearson', axis=0)))
test_generator()
test_metric()
test_utilities()
test_distance_matrix()
print('Unit testing...')
unittest.main()
'''
TODO to_binary
>>> m = np.array([[0.2, 0.3, 0.1], [0.5, 0.6, 0.7]])
>>> m
array([[0.2, 0.3, 0.1],
[0.5, 0.6, 0.7]])
>>> rk.to_binary(m)
array([[0, 0, 0],
[0, 1, 1]])
>>> rk.to_binary(m, unilabel=True)
array([[0, 1, 0],
[0, 0, 1]])
>>> rk.to_binary(m, unilabel=True, at_least_one_class=True)
array([[0, 1, 0],
[0, 0, 1]])
>>> rk.to_binary(m, unilabel=False, at_least_one_class=True)
array([[0, 1, 0],
[0, 1, 1]])
>>> m
array([[0.2, 0.3, 0.1],
[0.5, 0.6, 0.7]])
>>> rk.to_binary(m, unilabel=False, at_least_one_class=False)
array([[0, 0, 0],
[0, 1, 1]])
'''
| [
"pandas.read_csv",
"ranky.concordance",
"unittest.main",
"ranky.borda",
"ranky.corr",
"ranky.combined_metric",
"ranky.center",
"ranky.distance_matrix",
"ranky.kendall_w",
"ranky.read_codalab_csv",
"numpy.testing.assert_array_equal",
"ranky.score",
"ranky.Generator",
"ranky.rank",
"ranky.... | [((240, 270), 'pandas.read_csv', 'pd.read_csv', (['"""data/matrix.csv"""'], {}), "('data/matrix.csv')\n", (251, 270), True, 'import pandas as pd\n'), ((431, 461), 'ranky.distance_matrix', 'rk.distance_matrix', (['m_template'], {}), '(m_template)\n', (449, 461), True, 'import ranky as rk\n'), ((528, 580), 'ranky.distance_matrix', 'rk.distance_matrix', (['m_template'], {'method': '"""levenshtein"""'}), "(m_template, method='levenshtein')\n", (546, 580), True, 'import ranky as rk\n'), ((669, 683), 'ranky.Generator', 'rk.Generator', ([], {}), '()\n', (681, 683), True, 'import ranky as rk\n'), ((821, 832), 'ranky.score', 'rk.score', (['m'], {}), '(m)\n', (829, 832), True, 'import ranky as rk\n'), ((911, 924), 'ranky.corr', 'rk.corr', (['R', 'r'], {}), '(R, r)\n', (918, 924), True, 'import ranky as rk\n'), ((1077, 1093), 'ranky.uninominal', 'rk.uninominal', (['m'], {}), '(m)\n', (1090, 1093), True, 'import ranky as rk\n'), ((1172, 1185), 'ranky.corr', 'rk.corr', (['R', 'r'], {}), '(R, r)\n', (1179, 1185), True, 'import ranky as rk\n'), ((1475, 1542), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_metric/task.solution"""'], {'sep': '""" """', 'header': 'None'}), "('data/test_metric/task.solution', sep=' ', header=None)\n", (1486, 1542), True, 'import pandas as pd\n'), ((1556, 1622), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_metric/task.predict"""'], {'sep': '""" """', 'header': 'None'}), "('data/test_metric/task.predict', sep=' ', header=None)\n", (1567, 1622), True, 'import pandas as pd\n'), ((1642, 1714), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_metric/task_proba.predict"""'], {'sep': '""" """', 'header': 'None'}), "('data/test_metric/task_proba.predict', sep=' ', header=None)\n", (1653, 1714), True, 'import pandas as pd\n'), ((2372, 2409), 'ranky.read_codalab_csv', 'rk.read_codalab_csv', (['"""data/chems.csv"""'], {}), "('data/chems.csv')\n", (2391, 2409), True, 'import ranky as rk\n'), ((2480, 2574), 'numpy.array', 'np.array', (['[[0.3, 0.4, 0.6], [0.8, 0.8, 0.8], [0.1, 0.5, 0.7], [0.2, 0.2, 0.2], [0, 0, 0]]'], {}), '([[0.3, 0.4, 0.6], [0.8, 0.8, 0.8], [0.1, 0.5, 0.7], [0.2, 0.2, 0.2\n ], [0, 0, 0]])\n', (2488, 2574), True, 'import numpy as np\n'), ((5348, 5446), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [1, 2, 4, 3], [1, 2, 4, 3], [1, 3, 2, 4], [2, 1, 3, 4], [1, \n 4, 3, 2]]'], {}), '([[1, 2, 3, 4], [1, 2, 4, 3], [1, 2, 4, 3], [1, 3, 2, 4], [2, 1, 3,\n 4], [1, 4, 3, 2]])\n', (5356, 5446), True, 'import numpy as np\n'), ((6085, 6100), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6098, 6100), False, 'import unittest\n'), ((869, 879), 'ranky.rank', 'rk.rank', (['R'], {}), '(R)\n', (876, 879), True, 'import ranky as rk\n'), ((881, 891), 'ranky.rank', 'rk.rank', (['r'], {}), '(r)\n', (888, 891), True, 'import ranky as rk\n'), ((1130, 1140), 'ranky.rank', 'rk.rank', (['R'], {}), '(R)\n', (1137, 1140), True, 'import ranky as rk\n'), ((1142, 1152), 'ranky.rank', 'rk.rank', (['r'], {}), '(r)\n', (1149, 1152), True, 'import ranky as rk\n'), ((2612, 2712), 'numpy.array', 'np.array', (['[[2.0, 3.0, 3.0], [1.0, 1.0, 1.0], [4.0, 2.0, 2.0], [3.0, 4.0, 4.0], [5.0, \n 5.0, 5.0]]'], {}), '([[2.0, 3.0, 3.0], [1.0, 1.0, 1.0], [4.0, 2.0, 2.0], [3.0, 4.0, 4.0\n ], [5.0, 5.0, 5.0]])\n', (2620, 2712), True, 'import numpy as np\n'), ((3393, 3454), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [1.0, 3.0, 2.0], [1.0, 2.0, 3.0]]'], {}), '([[1.0, 2.0, 3.0], [1.0, 3.0, 2.0], [1.0, 2.0, 3.0]])\n', (3401, 3454), True, 'import numpy as np\n'), ((3858, 3888), 'pandas.read_csv', 'pd.read_csv', (['"""data/matrix.csv"""'], {}), "('data/matrix.csv')\n", (3869, 3888), True, 'import pandas as pd\n'), ((4236, 4300), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['borda_rank', 'optimal_spearman_rank'], {}), '(borda_rank, optimal_spearman_rank)\n', (4265, 4300), True, 'import numpy as np\n'), ((4344, 4408), 'numpy.array', 'np.array', (['[[1, 2.5, 2.5, 4], [1, 2.5, 2.5, 4], [1, 2.5, 2.5, 4]]'], {}), '([[1, 2.5, 2.5, 4], [1, 2.5, 2.5, 4], [1, 2.5, 2.5, 4]])\n', (4352, 4408), True, 'import numpy as np\n'), ((4422, 4474), 'numpy.array', 'np.array', (['[[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]'], {}), '([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])\n', (4430, 4474), True, 'import numpy as np\n'), ((4740, 4759), 'ranky.bayes_wins', 'rk.bayes_wins', (['a', 'b'], {}), '(a, b)\n', (4753, 4759), True, 'import ranky as rk\n'), ((4849, 4893), 'ranky.relative_difference', 'rk.relative_difference', (['[0, 0, 1]', '[0, 0, 1]'], {}), '([0, 0, 1], [0, 0, 1])\n', (4871, 4893), True, 'import ranky as rk\n'), ((4939, 4977), 'ranky.relative_difference', 'rk.relative_difference', (['[0, 0]', '[0, 0]'], {}), '([0, 0], [0, 0])\n', (4961, 4977), True, 'import ranky as rk\n'), ((5023, 5079), 'ranky.relative_difference', 'rk.relative_difference', (['[0.8, 0.1, 0.8]', '[0.2, 0.1, 0.2]'], {}), '([0.8, 0.1, 0.8], [0.2, 0.1, 0.2])\n', (5045, 5079), True, 'import ranky as rk\n'), ((5168, 5236), 'ranky.winner_distance', 'rk.winner_distance', (['[1, 0.7, 0.2, 0.1, 0.1]', '[0.7, 1, 0.5, 0.4, 0.1]'], {}), '([1, 0.7, 0.2, 0.1, 0.1], [0.7, 1, 0.5, 0.4, 0.1])\n', (5186, 5236), True, 'import ranky as rk\n'), ((2195, 2229), 'ranky.combined_metric', 'rk.combined_metric', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2213, 2229), True, 'import ranky as rk\n'), ((2254, 2294), 'ranky.combined_metric', 'rk.combined_metric', (['y_true', 'y_pred_proba'], {}), '(y_true, y_pred_proba)\n', (2272, 2294), True, 'import ranky as rk\n'), ((2731, 2756), 'ranky.rank', 'rk.rank', (['self.__class__.M'], {}), '(self.__class__.M)\n', (2738, 2756), True, 'import ranky as rk\n'), ((2837, 2863), 'ranky.borda', 'rk.borda', (['self.__class__.M'], {}), '(self.__class__.M)\n', (2845, 2863), True, 'import ranky as rk\n'), ((2865, 2921), 'numpy.array', 'np.array', (['[2.66666667, 1.0, 2.66666667, 3.66666667, 5.0]'], {}), '([2.66666667, 1.0, 2.66666667, 3.66666667, 5.0])\n', (2873, 2921), True, 'import numpy as np\n'), ((2988, 3017), 'ranky.majority', 'rk.majority', (['self.__class__.M'], {}), '(self.__class__.M)\n', (2999, 3017), True, 'import ranky as rk\n'), ((3019, 3054), 'numpy.array', 'np.array', (['[0.4, 0.8, 0.5, 0.2, 0.0]'], {}), '([0.4, 0.8, 0.5, 0.2, 0.0])\n', (3027, 3054), True, 'import numpy as np\n'), ((3123, 3153), 'ranky.condorcet', 'rk.condorcet', (['self.__class__.M'], {}), '(self.__class__.M)\n', (3135, 3153), True, 'import ranky as rk\n'), ((3155, 3190), 'numpy.array', 'np.array', (['[2.0, 4.0, 3.0, 1.0, 0.0]'], {}), '([2.0, 4.0, 3.0, 1.0, 0.0])\n', (3163, 3190), True, 'import numpy as np\n'), ((3256, 3312), 'ranky.condorcet', 'rk.condorcet', (['self.__class__.M'], {'wins': 'rk.p_wins', 'pval': '(0.2)'}), '(self.__class__.M, wins=rk.p_wins, pval=0.2)\n', (3268, 3312), True, 'import ranky as rk\n'), ((3314, 3349), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0])\n', (3322, 3349), True, 'import numpy as np\n'), ((3484, 3512), 'ranky.consensus', 'rk.consensus', (['rank_M'], {'axis': '(1)'}), '(rank_M, axis=1)\n', (3496, 3512), True, 'import ranky as rk\n'), ((3514, 3544), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (3522, 3544), True, 'import numpy as np\n'), ((3607, 3653), 'ranky.dist', 'rk.dist', (['[1, 2, 3]', '[2, 1, 3]'], {'method': '"""winner"""'}), "([1, 2, 3], [2, 1, 3], method='winner')\n", (3614, 3653), True, 'import ranky as rk\n'), ((4047, 4067), 'ranky.borda', 'rk.borda', (['m_template'], {}), '(m_template)\n', (4055, 4067), True, 'import ranky as rk\n'), ((4123, 4163), 'ranky.center', 'rk.center', (['m_template'], {'method': '"""spearman"""'}), "(m_template, method='spearman')\n", (4132, 4163), True, 'import ranky as rk\n'), ((4500, 4516), 'ranky.kendall_w', 'rk.kendall_w', (['M2'], {}), '(M2)\n', (4512, 4516), True, 'import ranky as rk\n'), ((4548, 4575), 'ranky.kendall_w', 'rk.kendall_w', (['M2'], {'ties': '(True)'}), '(M2, ties=True)\n', (4560, 4575), True, 'import ranky as rk\n'), ((4607, 4623), 'ranky.kendall_w', 'rk.kendall_w', (['M3'], {}), '(M3)\n', (4619, 4623), True, 'import ranky as rk\n'), ((5560, 5585), 'ranky.concordance', 'rk.concordance', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (5574, 5585), True, 'import ranky as rk\n'), ((5621, 5644), 'ranky.kendall_w', 'rk.kendall_w', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (5633, 5644), True, 'import ranky as rk\n'), ((5691, 5716), 'ranky.concordance', 'rk.concordance', (['m'], {'axis': '(1)'}), '(m, axis=1)\n', (5705, 5716), True, 'import ranky as rk\n'), ((5761, 5784), 'ranky.kendall_w', 'rk.kendall_w', (['m'], {'axis': '(1)'}), '(m, axis=1)\n', (5773, 5784), True, 'import ranky as rk\n'), ((5834, 5874), 'ranky.center', 'rk.center', (['m'], {'method': '"""euclidean"""', 'axis': '(0)'}), "(m, method='euclidean', axis=0)\n", (5843, 5874), True, 'import ranky as rk\n'), ((5922, 5960), 'ranky.center', 'rk.center', (['m'], {'method': '"""pearson"""', 'axis': '(0)'}), "(m, method='pearson', axis=0)\n", (5931, 5960), True, 'import ranky as rk\n'), ((1913, 1948), 'ranky.metric', 'rk.metric', (['y_true', 'y_pred'], {'method': 'm'}), '(y_true, y_pred, method=m)\n', (1922, 1948), True, 'import ranky as rk\n'), ((1988, 2029), 'ranky.metric', 'rk.metric', (['y_true', 'y_pred_proba'], {'method': 'm'}), '(y_true, y_pred_proba, method=m)\n', (1997, 2029), True, 'import ranky as rk\n')] |
"""Сontains ECG processing tools."""
import os
import struct
import datetime
from xml.etree import ElementTree
import numpy as np
from numba import njit
from scipy.io import wavfile
import pyedflib
import wfdb
try:
import pydicom as dicom
except ImportError:
import dicom
# Constants
# This is the predefined keys of the meta component.
# Each key is initialized with None.
META_KEYS = [
"age",
"sex",
"timestamp",
"comments",
"fs",
"signame",
"units",
]
# This is the mapping from inner HMM states to human-understandable
# cardiological terms.
P_STATES = np.array([14, 15, 16], np.int64)
T_STATES = np.array([5, 6, 7, 8, 9, 10], np.int64)
QRS_STATES = np.array([0, 1, 2], np.int64)
Q_STATE = np.array([0], np.int64)
R_STATE = np.array([1], np.int64)
S_STATE = np.array([2], np.int64)
def check_signames(signame, nsig):
"""Check that signame is in proper format.
Check if signame is a list of values that can be casted
to string, othervise generate new signame list with numbers
0 to `nsig`-1 as strings.
Parameters
----------
signame : misc
Signal names from file.
nsig : int
Number of signals / channels.
Returns
-------
signame : list
List of string names of signals / channels.
"""
if isinstance(signame, (tuple, list)) and len(signame) == nsig:
signame = [str(name) for name in signame]
else:
signame = [str(number) for number in range(nsig)]
return np.array(signame)
def check_units(units, nsig):
"""Check that units are in proper format.
Check if units is a list of values with lenght
equal to number of channels.
Parameters
----------
units : misc
Units from file.
nsig : int
Number of signals / channels.
Returns
-------
units : list
List of units of signal / channel.
"""
if not (isinstance(units, (tuple, list)) and len(units) == nsig):
units = [None for number in range(nsig)]
return np.array(units)
def unify_sex(sex):
"""Maps the sex of a patient into one of the following values: "M", "F" or
``None``.
Parameters
----------
sex : str
Sex of the patient.
Returns
-------
sex : str
Transformed sex of the patient.
"""
transform_dict = {
"MALE": "M",
"M": "M",
"FEMALE": "F",
"F": "F",
}
return transform_dict.get(sex)
def load_wfdb(path, components, *args, **kwargs):
"""Load given components from wfdb file.
Parameters
----------
path : str
Path to .hea file.
components : iterable
Components to load.
ann_ext: str
Extension of the annotation file.
Returns
-------
ecg_data : list
List of ecg data components.
"""
_ = args
ann_ext = kwargs.get("ann_ext")
path = os.path.splitext(path)[0]
record = wfdb.rdsamp(path)
signal = record.__dict__.pop("p_signals").T
record_meta = record.__dict__
nsig = record_meta["nsig"]
if "annotation" in components and ann_ext is not None:
annotation = wfdb.rdann(path, ann_ext)
annot = {"annsamp": annotation.sample,
"anntype": annotation.symbol}
else:
annot = {}
# Initialize meta with defined keys, load values from record
# meta and preprocess to our format.
meta = dict(zip(META_KEYS, [None] * len(META_KEYS)))
meta.update(record_meta)
meta["signame"] = check_signames(meta["signame"], nsig)
meta["units"] = check_units(meta["units"], nsig)
data = {"signal": signal,
"annotation": annot,
"meta": meta}
return [data[comp] for comp in components]
def load_dicom(path, components, *args, **kwargs):
"""
Load given components from DICOM file.
Parameters
----------
path : str
Path to .hea file.
components : iterable
Components to load.
Returns
-------
ecg_data : list
List of ecg data components.
"""
def signal_decoder(record, nsig):
"""
Helper function to decode signal from binaries when reading from dicom.
"""
definition = record.WaveformSequence[0].ChannelDefinitionSequence
data = record.WaveformSequence[0].WaveformData
unpack_fmt = "<{}h".format(int(len(data) / 2))
factor = np.ones(nsig)
baseline = np.zeros(nsig)
for i in range(nsig):
assert definition[i].WaveformBitsStored == 16
channel_sens = definition[i].get("ChannelSensitivity")
channel_sens_cf = definition[i].get("ChannelSensitivityCorrectionFactor")
if channel_sens is not None and channel_sens_cf is not None:
factor[i] = float(channel_sens) * float(channel_sens_cf)
channel_bl = definition[i].get("ChannelBaseline")
if channel_bl is not None:
baseline[i] = float(channel_bl)
unpacked_data = struct.unpack(unpack_fmt, data)
signals = np.asarray(unpacked_data, dtype=np.float32).reshape(-1, nsig)
signals = ((signals + baseline) * factor).T
return signals
_ = args, kwargs
record = dicom.read_file(path)
sequence = record.WaveformSequence[0]
assert sequence.WaveformSampleInterpretation == 'SS'
assert sequence.WaveformBitsAllocated == 16
nsig = sequence.NumberOfWaveformChannels
annot = {}
meta = dict(zip(META_KEYS, [None] * len(META_KEYS)))
if record.PatientAge[-1] == "Y":
age = np.int(record.PatientAge[:-1])
else:
age = np.int(record.PatientAge[:-1]) / 12.0
meta["age"] = age
meta["sex"] = record.PatientSex
meta["timestamp"] = record.AcquisitionDateTime
meta["comments"] = [section.UnformattedTextValue for section in
record.WaveformAnnotationSequence if section.AnnotationGroupNumber == 0]
meta["fs"] = sequence.SamplingFrequency
meta["signame"] = [section.ChannelSourceSequence[0].CodeMeaning for section in
sequence.ChannelDefinitionSequence]
meta["units"] = [section.ChannelSensitivityUnitsSequence[0].CodeValue for section in
sequence.ChannelDefinitionSequence]
meta["signame"] = check_signames(meta["signame"], nsig)
meta["units"] = check_units(meta["units"], nsig)
signal = signal_decoder(record, nsig)
data = {"signal": signal,
"annotation": annot,
"meta": meta}
return [data[comp] for comp in components]
def load_edf(path, components, *args, **kwargs):
"""
Load given components from EDF file.
Parameters
----------
path : str
Path to .hea file.
components : iterable
Components to load.
Returns
-------
ecg_data : list
List of ecg data components.
"""
_ = args, kwargs
record = pyedflib.EdfReader(path)
annot = {}
meta = dict(zip(META_KEYS, [None] * len(META_KEYS)))
meta["sex"] = record.getGender() if record.getGender() != '' else None
meta["timestamp"] = record.getStartdatetime().strftime("%Y%m%d%H%M%S")
nsig = record.signals_in_file
if len(np.unique(record.getNSamples())) != 1:
raise ValueError("Different signal lenghts are not supported!")
if len(np.unique(record.getSampleFrequencies())) == 1:
meta["fs"] = record.getSampleFrequencies()[0]
else:
raise ValueError("Different sampling rates are not supported!")
meta["signame"] = record.getSignalLabels()
meta["units"] = [record.getSignalHeader(sig)["dimension"] for sig in range(nsig)]
meta.update(record.getHeader())
meta["signame"] = check_signames(meta["signame"], nsig)
meta["units"] = check_units(meta["units"], nsig)
signal = np.array([record.readSignal(i) for i in range(nsig)])
data = {"signal": signal,
"annotation": annot,
"meta": meta}
return [data[comp] for comp in components]
def load_wav(path, components, *args, **kwargs):
"""
Load given components from wav file.
Parameters
----------
path : str
Path to .hea file.
components : iterable
Components to load.
Returns
-------
ecg_data : list
List of ecg data components.
"""
_ = args, kwargs
fs, signal = wavfile.read(path)
if signal.ndim == 1:
nsig = 1
signal = signal.reshape([-1, 1])
elif signal.ndim == 2:
nsig = signal.shape[1]
else:
raise ValueError("Unexpected number of dimensions in signal array: {}".format(signal.ndim))
signal = signal.T
annot = {}
meta = dict(zip(META_KEYS, [None] * len(META_KEYS)))
meta["fs"] = fs
meta["signame"] = check_signames(meta["signame"], nsig)
meta["units"] = check_units(meta["units"], nsig)
data = {"signal": signal,
"annotation": annot,
"meta": meta}
return [data[comp] for comp in components]
def load_xml(path, components, xml_type, *args, **kwargs):
"""Load given components from an XML file.
Parameters
----------
path : str
A path to an .xml file.
components : iterable
Components to load.
xml_type : str
Defines the structure of the file. The following values of the
argument are supported:
* "schiller" - Schiller XML
Returns
-------
loaded_data : list
A list of loaded ECG data components.
"""
loaders = {
"schiller": load_xml_schiller,
}
loader = loaders.get(xml_type)
if loader is None:
err_str = "Unsupported XML type {}. Currently supported XML types: {}"
err_msg = err_str.format(xml_type, ", ".join(sorted(loaders.keys())))
raise ValueError(err_msg)
return loader(path, components, *args, **kwargs)
def load_xml_schiller(path, components, *args, **kwargs): # pylint: disable=too-many-locals
"""Load given components from a Schiller XML file.
Parameters
----------
path : str
A path to an .xml file.
components : iterable
Components to load.
Returns
-------
loaded_data : list
A list of loaded ECG data components.
"""
_ = args, kwargs
root = ElementTree.parse(path).getroot()
birthdate = root.find("./patdata/birthdate").text
if birthdate is None:
age = None
else:
today = datetime.date.today()
birthdate = datetime.datetime.strptime(birthdate, "%Y%m%d")
age = today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))
sex = unify_sex(root.find("./patdata/gender").text)
date = root.find("./examdescript/startdatetime/date").text
time = root.find("./examdescript/startdatetime/time").text
timestamp = datetime.datetime.strptime(date + time, "%Y%m%d%H%M%S%f")
ecg_data = root.find("./eventdata/event/wavedata[type='ECG_RHYTHMS']")
sig_info = []
for channel in ecg_data.findall("./channel"):
sig = [float(val) for val in channel.find("data").text.split(",") if val]
name = channel.find("name").text
sig_info.append((sig, name))
signal, signame = zip(*sig_info)
signal = np.array(signal)
signame = np.array(signame)
fs = float(ecg_data.find("./resolution/samplerate/value").text)
units = ecg_data.find("./resolution/yres/units").text
if units == "UV":
units = "uV"
units = np.array([units] * len(signame))
meta = {
"age": age,
"sex": sex,
"timestamp": timestamp,
"comments": None,
"fs": fs,
"signame": signame,
"units": units,
}
data = {
"signal": signal,
"annotation": {},
"meta": meta
}
return [data[comp] for comp in components]
@njit(nogil=True)
def split_signals(signals, length, step):
"""Split signals along axis 1 with given ``length`` and ``step``.
Parameters
----------
signals : 2-D ndarray
Signals to split.
length : positive int
Length of each segment along axis 1.
step : positive int
Segmentation step.
Returns
-------
signals : 3-D ndarray
Split signals stacked along new axis with index 0.
"""
res = np.empty(((signals.shape[1] - length) // step + 1, signals.shape[0], length), dtype=signals.dtype)
for i in range(res.shape[0]):
res[i, :, :] = signals[:, i * step : i * step + length]
return res
@njit(nogil=True)
def random_split_signals(signals, length, n_segments):
"""Split signals along axis 1 ``n_segments`` times with random start
position and given ``length``.
Parameters
----------
signals : 2-D ndarray
Signals to split.
length : positive int
Length of each segment along axis 1.
n_segments : positive int
Number of segments.
Returns
-------
signals : 3-D ndarray
Split signals stacked along new axis with index 0.
"""
res = np.empty((n_segments, signals.shape[0], length), dtype=signals.dtype)
for i in range(res.shape[0]):
ix = np.random.randint(0, signals.shape[1] - length + 1)
res[i, :, :] = signals[:, ix : ix + length]
return res
@njit(nogil=True)
def resample_signals(signals, new_length):
"""Resample signals to new length along axis 1 using linear interpolation.
Parameters
----------
signals : 2-D ndarray
Signals to resample.
new_length : positive int
New signals shape along axis 1.
Returns
-------
signals : 2-D ndarray
Resampled signals.
"""
arg = np.linspace(0, signals.shape[1] - 1, new_length)
x_left = arg.astype(np.int32) # pylint: disable=no-member
x_right = x_left + 1
x_right[-1] = x_left[-1]
alpha = arg - x_left
y_left = signals[:, x_left]
y_right = signals[:, x_right]
return y_left + (y_right - y_left) * alpha
def convolve_signals(signals, kernel, padding_mode="edge", axis=-1, **kwargs):
"""Convolve signals with given ``kernel``.
Parameters
----------
signals : ndarray
Signals to convolve.
kernel : array_like
Convolution kernel.
padding_mode : str or function
``np.pad`` padding mode.
axis : int
Axis along which signals are sliced.
kwargs : misc
Any additional named argments to ``np.pad``.
Returns
-------
signals : ndarray
Convolved signals.
Raises
------
ValueError
If ``kernel`` is not one-dimensional or has non-numeric ``dtype``.
"""
kernel = np.asarray(kernel)
if len(kernel.shape) == 0:
kernel = kernel.ravel()
if len(kernel.shape) != 1:
raise ValueError("Kernel must be 1-D array")
if not np.issubdtype(kernel.dtype, np.number):
raise ValueError("Kernel must have numeric dtype")
pad = len(kernel) // 2
def conv_func(x):
"""Convolve padded signal."""
x_pad = np.pad(x, pad, padding_mode, **kwargs)
conv = np.convolve(x_pad, kernel, "same")
if pad > 0:
conv = conv[pad:-pad]
return conv
signals = np.apply_along_axis(conv_func, arr=signals, axis=axis)
return signals
def band_pass_signals(signals, freq, low=None, high=None, axis=-1):
"""Reject frequencies outside given range.
Parameters
----------
signals : ndarray
Signals to filter.
freq : positive float
Sampling rate.
low : positive float
High-pass filter cutoff frequency (Hz).
high : positive float
Low-pass filter cutoff frequency (Hz).
axis : int
Axis along which signals are sliced.
Returns
-------
signals : ndarray
Filtered signals.
Raises
------
ValueError
If ``freq`` is negative or non-numeric.
"""
if freq <= 0:
raise ValueError("Sampling rate must be a positive float")
sig_rfft = np.fft.rfft(signals, axis=axis)
sig_freq = np.fft.rfftfreq(signals.shape[axis], 1 / freq)
mask = np.zeros(len(sig_freq), dtype=bool)
if low is not None:
mask |= (sig_freq <= low)
if high is not None:
mask |= (sig_freq >= high)
slc = [slice(None)] * signals.ndim
slc[axis] = mask
sig_rfft[slc] = 0
return np.fft.irfft(sig_rfft, n=signals.shape[axis], axis=axis)
@njit(nogil=True)
def find_intervals_borders(hmm_annotation, inter_val):
"""Find starts and ends of the intervals.
This function finds starts and ends of continuous intervals of values
from inter_val in hmm_annotation.
Parameters
----------
hmm_annotation : numpy.array
Annotation for the signal from hmm_annotation model.
inter_val : array_like
Values that form interval of interest.
Returns
-------
starts : 1-D ndarray
Indices of the starts of the intervals.
ends : 1-D ndarray
Indices of the ends of the intervals.
"""
intervals = np.zeros(hmm_annotation.shape, dtype=np.int8)
for val in inter_val:
intervals = np.logical_or(intervals, (hmm_annotation == val).astype(np.int8)).astype(np.int8)
masque = np.diff(intervals)
starts = np.where(masque == 1)[0] + 1
ends = np.where(masque == -1)[0] + 1
if np.any(inter_val == hmm_annotation[:1]):
ends = ends[1:]
if np.any(inter_val == hmm_annotation[-1:]):
starts = starts[:-1]
return starts, ends
@njit(nogil=True)
def find_maxes(signal, starts, ends):
""" Find index of the maximum of the segment.
Parameters
----------
signal : 2-D ndarray
ECG signal.
starts : 1-D ndarray
Indices of the starts of the intervals.
ends : 1-D ndarray
Indices of the ens of the intervals.
Returns
-------
maxes : 1-D ndarray
Indices of max values of each interval.
Notes
-----
Currently works with first lead only.
"""
maxes = np.empty(starts.shape, dtype=np.float64)
for i in range(maxes.shape[0]):
maxes[i] = starts[i] + np.argmax(signal[0][starts[i]:ends[i]])
return maxes
@njit(nogil=True)
def calc_hr(signal, hmm_annotation, fs, r_state=R_STATE):
""" Calculate heart rate based on HMM prediction.
Parameters
----------
signal : 2-D ndarray
ECG signal.
hmm_annotation : 1-D ndarray
Annotation for the signal from hmm_annotation model.
fs : float
Sampling rate of the signal.
r_state : 1-D ndarray
Array with values that represent R peak.
Default value is R_STATE, which is a constant of this module.
Returns
-------
hr_val : float
Heart rate in beats per minute.
"""
starts, ends = find_intervals_borders(hmm_annotation, r_state)
# NOTE: Currently works on first lead signal only
maxes = find_maxes(signal, starts, ends)
diff = maxes[1:] - maxes[:-1]
hr_val = (np.median(diff / fs) ** -1) * 60
return hr_val
@njit(nogil=True)
def calc_pq(hmm_annotation, fs, p_states=P_STATES, q_state=Q_STATE, r_state=R_STATE):
""" Calculate PQ based on HMM prediction.
Parameters
----------
hmm_annotation : numpy.array
Annotation for the signal from hmm_annotation model.
fs : float
Sampling rate of the signal.
p_states : 1-D ndarray
Array with values that represent P peak.
Default value is P_STATES, which is a constant of this module.
q_state : 1-D ndarray
Array with values that represent Q peak.
Default value is Q_STATE, which is a constant of this module.
r_state : 1-D ndarray
Array with values that represent R peak.
Default value is R_STATE, which is a constant of this module.
Returns
-------
pq_val : float
Duration of PQ interval in seconds.
"""
p_starts, _ = find_intervals_borders(hmm_annotation, p_states)
q_starts, _ = find_intervals_borders(hmm_annotation, q_state)
r_starts, _ = find_intervals_borders(hmm_annotation, r_state)
p_final = - np.ones(r_starts.shape[0] - 1)
q_final = - np.ones(r_starts.shape[0] - 1)
maxlen = hmm_annotation.shape[0]
if not p_starts.shape[0] * q_starts.shape[0] * r_starts.shape[0]:
return 0.00
temp_p = np.zeros(maxlen)
temp_p[p_starts] = 1
temp_q = np.zeros(maxlen)
temp_q[q_starts] = 1
for i in range(len(r_starts) - 1):
low = r_starts[i]
high = r_starts[i + 1]
inds_p = np.where(temp_p[low:high])[0] + low
inds_q = np.where(temp_q[low:high])[0] + low
if inds_p.shape[0] == 1 and inds_q.shape[0] == 1:
p_final[i] = inds_p[0]
q_final[i] = inds_q[0]
p_final = p_final[p_final > -1]
q_final = q_final[q_final > -1]
intervals = q_final - p_final
return np.median(intervals) / fs
@njit(nogil=True)
def calc_qt(hmm_annotation, fs, t_states=T_STATES, q_state=Q_STATE, r_state=R_STATE):
""" Calculate QT interval based on HMM prediction.
Parameters
----------
hmm_annotation : numpy.array
Annotation for the signal from hmm_annotation model.
fs : float
Sampling rate of the signal.
t_states : 1-D ndarray
Array with values that represent T peak.
Default value is T_STATES, which is a constant of this module.
q_state : 1-D ndarray
Array with values that represent Q peak.
Default value is Q_STATE, which is a constant of this module.
r_state : 1-D ndarray
Array with values that represent R peak.
Default value is R_STATE, which is a constant of this module.
Returns
-------
qt_val : float
Duration of QT interval in seconds.
"""
_, t_ends = find_intervals_borders(hmm_annotation, t_states)
q_starts, _ = find_intervals_borders(hmm_annotation, q_state)
r_starts, _ = find_intervals_borders(hmm_annotation, r_state)
t_final = - np.ones(r_starts.shape[0] - 1)
q_final = - np.ones(r_starts.shape[0] - 1)
maxlen = hmm_annotation.shape[0]
if not t_ends.shape[0] * q_starts.shape[0] * r_starts.shape[0]:
return 0.00
temp_t = np.zeros(maxlen)
temp_t[t_ends] = 1
temp_q = np.zeros(maxlen)
temp_q[q_starts] = 1
for i in range(len(r_starts) - 1):
low = r_starts[i]
high = r_starts[i + 1]
inds_t = np.where(temp_t[low:high])[0] + low
inds_q = np.where(temp_q[low:high])[0] + low
if inds_t.shape[0] == 1 and inds_q.shape[0] == 1:
t_final[i] = inds_t[0]
q_final[i] = inds_q[0]
t_final = t_final[t_final > -1][1:]
q_final = q_final[q_final > -1][:-1]
intervals = t_final - q_final
return np.median(intervals) / fs
@njit(nogil=True)
def calc_qrs(hmm_annotation, fs, s_state=S_STATE, q_state=Q_STATE, r_state=R_STATE):
""" Calculate QRS interval based on HMM prediction.
Parameters
----------
hmm_annotation : numpy.array
Annotation for the signal from hmm_annotation model.
fs : float
Sampling rate of the signal.
s_state : 1-D ndarray
Array with values that represent S peak.
Default value is S_STATE, which is a constant of this module.
q_state : 1-D ndarray
Array with values that represent Q peak.
Default value is Q_STATE, which is a constant of this module.
r_state : 1-D ndarray
Array with values that represent R peak.
Default value is R_STATE, which is a constant of this module.
Returns
-------
qrs_val : float
Duration of QRS complex in seconds.
"""
_, s_ends = find_intervals_borders(hmm_annotation, s_state)
q_starts, _ = find_intervals_borders(hmm_annotation, q_state)
r_starts, _ = find_intervals_borders(hmm_annotation, r_state)
s_final = - np.ones(r_starts.shape[0] - 1)
q_final = - np.ones(r_starts.shape[0] - 1)
maxlen = hmm_annotation.shape[0]
if not s_ends.shape[0] * q_starts.shape[0] * r_starts.shape[0]:
return 0.00
temp_s = np.zeros(maxlen)
temp_s[s_ends] = 1
temp_q = np.zeros(maxlen)
temp_q[q_starts] = 1
for i in range(len(r_starts) - 1):
low = r_starts[i]
high = r_starts[i + 1]
inds_s = np.where(temp_s[low:high])[0] + low
inds_q = np.where(temp_q[low:high])[0] + low
if inds_s.shape[0] == 1 and inds_q.shape[0] == 1:
s_final[i] = inds_s[0]
q_final[i] = inds_q[0]
s_final = s_final[s_final > -1][1:]
q_final = q_final[q_final > -1][:-1]
intervals = s_final - q_final
return np.median(intervals) / fs
| [
"numpy.fft.rfft",
"numpy.argmax",
"wfdb.rdsamp",
"numpy.empty",
"numba.njit",
"numpy.ones",
"scipy.io.wavfile.read",
"numpy.random.randint",
"numpy.convolve",
"numpy.pad",
"numpy.fft.irfft",
"wfdb.rdann",
"numpy.apply_along_axis",
"numpy.int",
"numpy.linspace",
"xml.etree.ElementTree.p... | [((599, 631), 'numpy.array', 'np.array', (['[14, 15, 16]', 'np.int64'], {}), '([14, 15, 16], np.int64)\n', (607, 631), True, 'import numpy as np\n'), ((643, 682), 'numpy.array', 'np.array', (['[5, 6, 7, 8, 9, 10]', 'np.int64'], {}), '([5, 6, 7, 8, 9, 10], np.int64)\n', (651, 682), True, 'import numpy as np\n'), ((696, 725), 'numpy.array', 'np.array', (['[0, 1, 2]', 'np.int64'], {}), '([0, 1, 2], np.int64)\n', (704, 725), True, 'import numpy as np\n'), ((736, 759), 'numpy.array', 'np.array', (['[0]', 'np.int64'], {}), '([0], np.int64)\n', (744, 759), True, 'import numpy as np\n'), ((770, 793), 'numpy.array', 'np.array', (['[1]', 'np.int64'], {}), '([1], np.int64)\n', (778, 793), True, 'import numpy as np\n'), ((804, 827), 'numpy.array', 'np.array', (['[2]', 'np.int64'], {}), '([2], np.int64)\n', (812, 827), True, 'import numpy as np\n'), ((11872, 11888), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (11876, 11888), False, 'from numba import njit\n'), ((12549, 12565), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (12553, 12565), False, 'from numba import njit\n'), ((13308, 13324), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (13312, 13324), False, 'from numba import njit\n'), ((16435, 16451), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (16439, 16451), False, 'from numba import njit\n'), ((17522, 17538), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (17526, 17538), False, 'from numba import njit\n'), ((18193, 18209), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (18197, 18209), False, 'from numba import njit\n'), ((19050, 19066), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (19054, 19066), False, 'from numba import njit\n'), ((20925, 20941), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (20929, 20941), False, 'from numba import njit\n'), ((22812, 22828), 'numba.njit', 'njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (22816, 22828), False, 'from numba import njit\n'), ((1503, 1520), 'numpy.array', 'np.array', (['signame'], {}), '(signame)\n', (1511, 1520), True, 'import numpy as np\n'), ((2033, 2048), 'numpy.array', 'np.array', (['units'], {}), '(units)\n', (2041, 2048), True, 'import numpy as np\n'), ((2942, 2959), 'wfdb.rdsamp', 'wfdb.rdsamp', (['path'], {}), '(path)\n', (2953, 2959), False, 'import wfdb\n'), ((5251, 5272), 'dicom.read_file', 'dicom.read_file', (['path'], {}), '(path)\n', (5266, 5272), False, 'import dicom\n'), ((6940, 6964), 'pyedflib.EdfReader', 'pyedflib.EdfReader', (['path'], {}), '(path)\n', (6958, 6964), False, 'import pyedflib\n'), ((8388, 8406), 'scipy.io.wavfile.read', 'wavfile.read', (['path'], {}), '(path)\n', (8400, 8406), False, 'from scipy.io import wavfile\n'), ((10865, 10922), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['(date + time)', '"""%Y%m%d%H%M%S%f"""'], {}), "(date + time, '%Y%m%d%H%M%S%f')\n", (10891, 10922), False, 'import datetime\n'), ((11277, 11293), 'numpy.array', 'np.array', (['signal'], {}), '(signal)\n', (11285, 11293), True, 'import numpy as np\n'), ((11308, 11325), 'numpy.array', 'np.array', (['signame'], {}), '(signame)\n', (11316, 11325), True, 'import numpy as np\n'), ((12334, 12437), 'numpy.empty', 'np.empty', (['((signals.shape[1] - length) // step + 1, signals.shape[0], length)'], {'dtype': 'signals.dtype'}), '(((signals.shape[1] - length) // step + 1, signals.shape[0], length\n ), dtype=signals.dtype)\n', (12342, 12437), True, 'import numpy as np\n'), ((13069, 13138), 'numpy.empty', 'np.empty', (['(n_segments, signals.shape[0], length)'], {'dtype': 'signals.dtype'}), '((n_segments, signals.shape[0], length), dtype=signals.dtype)\n', (13077, 13138), True, 'import numpy as np\n'), ((13699, 13747), 'numpy.linspace', 'np.linspace', (['(0)', '(signals.shape[1] - 1)', 'new_length'], {}), '(0, signals.shape[1] - 1, new_length)\n', (13710, 13747), True, 'import numpy as np\n'), ((14672, 14690), 'numpy.asarray', 'np.asarray', (['kernel'], {}), '(kernel)\n', (14682, 14690), True, 'import numpy as np\n'), ((15230, 15284), 'numpy.apply_along_axis', 'np.apply_along_axis', (['conv_func'], {'arr': 'signals', 'axis': 'axis'}), '(conv_func, arr=signals, axis=axis)\n', (15249, 15284), True, 'import numpy as np\n'), ((16023, 16054), 'numpy.fft.rfft', 'np.fft.rfft', (['signals'], {'axis': 'axis'}), '(signals, axis=axis)\n', (16034, 16054), True, 'import numpy as np\n'), ((16070, 16116), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['signals.shape[axis]', '(1 / freq)'], {}), '(signals.shape[axis], 1 / freq)\n', (16085, 16116), True, 'import numpy as np\n'), ((16375, 16431), 'numpy.fft.irfft', 'np.fft.irfft', (['sig_rfft'], {'n': 'signals.shape[axis]', 'axis': 'axis'}), '(sig_rfft, n=signals.shape[axis], axis=axis)\n', (16387, 16431), True, 'import numpy as np\n'), ((17056, 17101), 'numpy.zeros', 'np.zeros', (['hmm_annotation.shape'], {'dtype': 'np.int8'}), '(hmm_annotation.shape, dtype=np.int8)\n', (17064, 17101), True, 'import numpy as np\n'), ((17243, 17261), 'numpy.diff', 'np.diff', (['intervals'], {}), '(intervals)\n', (17250, 17261), True, 'import numpy as np\n'), ((17352, 17391), 'numpy.any', 'np.any', (['(inter_val == hmm_annotation[:1])'], {}), '(inter_val == hmm_annotation[:1])\n', (17358, 17391), True, 'import numpy as np\n'), ((17424, 17464), 'numpy.any', 'np.any', (['(inter_val == hmm_annotation[-1:])'], {}), '(inter_val == hmm_annotation[-1:])\n', (17430, 17464), True, 'import numpy as np\n'), ((18025, 18065), 'numpy.empty', 'np.empty', (['starts.shape'], {'dtype': 'np.float64'}), '(starts.shape, dtype=np.float64)\n', (18033, 18065), True, 'import numpy as np\n'), ((20347, 20363), 'numpy.zeros', 'np.zeros', (['maxlen'], {}), '(maxlen)\n', (20355, 20363), True, 'import numpy as np\n'), ((20402, 20418), 'numpy.zeros', 'np.zeros', (['maxlen'], {}), '(maxlen)\n', (20410, 20418), True, 'import numpy as np\n'), ((22227, 22243), 'numpy.zeros', 'np.zeros', (['maxlen'], {}), '(maxlen)\n', (22235, 22243), True, 'import numpy as np\n'), ((22280, 22296), 'numpy.zeros', 'np.zeros', (['maxlen'], {}), '(maxlen)\n', (22288, 22296), True, 'import numpy as np\n'), ((24111, 24127), 'numpy.zeros', 'np.zeros', (['maxlen'], {}), '(maxlen)\n', (24119, 24127), True, 'import numpy as np\n'), ((24164, 24180), 'numpy.zeros', 'np.zeros', (['maxlen'], {}), '(maxlen)\n', (24172, 24180), True, 'import numpy as np\n'), ((2903, 2925), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (2919, 2925), False, 'import os\n'), ((3154, 3179), 'wfdb.rdann', 'wfdb.rdann', (['path', 'ann_ext'], {}), '(path, ann_ext)\n', (3164, 3179), False, 'import wfdb\n'), ((4413, 4426), 'numpy.ones', 'np.ones', (['nsig'], {}), '(nsig)\n', (4420, 4426), True, 'import numpy as np\n'), ((4446, 4460), 'numpy.zeros', 'np.zeros', (['nsig'], {}), '(nsig)\n', (4454, 4460), True, 'import numpy as np\n'), ((5026, 5057), 'struct.unpack', 'struct.unpack', (['unpack_fmt', 'data'], {}), '(unpack_fmt, data)\n', (5039, 5057), False, 'import struct\n'), ((5594, 5624), 'numpy.int', 'np.int', (['record.PatientAge[:-1]'], {}), '(record.PatientAge[:-1])\n', (5600, 5624), True, 'import numpy as np\n'), ((10469, 10490), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (10488, 10490), False, 'import datetime\n'), ((10511, 10558), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['birthdate', '"""%Y%m%d"""'], {}), "(birthdate, '%Y%m%d')\n", (10537, 10558), False, 'import datetime\n'), ((13186, 13237), 'numpy.random.randint', 'np.random.randint', (['(0)', '(signals.shape[1] - length + 1)'], {}), '(0, signals.shape[1] - length + 1)\n', (13203, 13237), True, 'import numpy as np\n'), ((14849, 14887), 'numpy.issubdtype', 'np.issubdtype', (['kernel.dtype', 'np.number'], {}), '(kernel.dtype, np.number)\n', (14862, 14887), True, 'import numpy as np\n'), ((15052, 15090), 'numpy.pad', 'np.pad', (['x', 'pad', 'padding_mode'], {}), '(x, pad, padding_mode, **kwargs)\n', (15058, 15090), True, 'import numpy as np\n'), ((15106, 15140), 'numpy.convolve', 'np.convolve', (['x_pad', 'kernel', '"""same"""'], {}), "(x_pad, kernel, 'same')\n", (15117, 15140), True, 'import numpy as np\n'), ((20126, 20156), 'numpy.ones', 'np.ones', (['(r_starts.shape[0] - 1)'], {}), '(r_starts.shape[0] - 1)\n', (20133, 20156), True, 'import numpy as np\n'), ((20173, 20203), 'numpy.ones', 'np.ones', (['(r_starts.shape[0] - 1)'], {}), '(r_starts.shape[0] - 1)\n', (20180, 20203), True, 'import numpy as np\n'), ((20896, 20916), 'numpy.median', 'np.median', (['intervals'], {}), '(intervals)\n', (20905, 20916), True, 'import numpy as np\n'), ((22008, 22038), 'numpy.ones', 'np.ones', (['(r_starts.shape[0] - 1)'], {}), '(r_starts.shape[0] - 1)\n', (22015, 22038), True, 'import numpy as np\n'), ((22055, 22085), 'numpy.ones', 'np.ones', (['(r_starts.shape[0] - 1)'], {}), '(r_starts.shape[0] - 1)\n', (22062, 22085), True, 'import numpy as np\n'), ((22783, 22803), 'numpy.median', 'np.median', (['intervals'], {}), '(intervals)\n', (22792, 22803), True, 'import numpy as np\n'), ((23892, 23922), 'numpy.ones', 'np.ones', (['(r_starts.shape[0] - 1)'], {}), '(r_starts.shape[0] - 1)\n', (23899, 23922), True, 'import numpy as np\n'), ((23939, 23969), 'numpy.ones', 'np.ones', (['(r_starts.shape[0] - 1)'], {}), '(r_starts.shape[0] - 1)\n', (23946, 23969), True, 'import numpy as np\n'), ((24667, 24687), 'numpy.median', 'np.median', (['intervals'], {}), '(intervals)\n', (24676, 24687), True, 'import numpy as np\n'), ((5649, 5679), 'numpy.int', 'np.int', (['record.PatientAge[:-1]'], {}), '(record.PatientAge[:-1])\n', (5655, 5679), True, 'import numpy as np\n'), ((10309, 10332), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['path'], {}), '(path)\n', (10326, 10332), False, 'from xml.etree import ElementTree\n'), ((17275, 17296), 'numpy.where', 'np.where', (['(masque == 1)'], {}), '(masque == 1)\n', (17283, 17296), True, 'import numpy as np\n'), ((17315, 17337), 'numpy.where', 'np.where', (['(masque == -1)'], {}), '(masque == -1)\n', (17323, 17337), True, 'import numpy as np\n'), ((18133, 18172), 'numpy.argmax', 'np.argmax', (['signal[0][starts[i]:ends[i]]'], {}), '(signal[0][starts[i]:ends[i]])\n', (18142, 18172), True, 'import numpy as np\n'), ((18996, 19016), 'numpy.median', 'np.median', (['(diff / fs)'], {}), '(diff / fs)\n', (19005, 19016), True, 'import numpy as np\n'), ((5077, 5120), 'numpy.asarray', 'np.asarray', (['unpacked_data'], {'dtype': 'np.float32'}), '(unpacked_data, dtype=np.float32)\n', (5087, 5120), True, 'import numpy as np\n'), ((20559, 20585), 'numpy.where', 'np.where', (['temp_p[low:high]'], {}), '(temp_p[low:high])\n', (20567, 20585), True, 'import numpy as np\n'), ((20612, 20638), 'numpy.where', 'np.where', (['temp_q[low:high]'], {}), '(temp_q[low:high])\n', (20620, 20638), True, 'import numpy as np\n'), ((22437, 22463), 'numpy.where', 'np.where', (['temp_t[low:high]'], {}), '(temp_t[low:high])\n', (22445, 22463), True, 'import numpy as np\n'), ((22490, 22516), 'numpy.where', 'np.where', (['temp_q[low:high]'], {}), '(temp_q[low:high])\n', (22498, 22516), True, 'import numpy as np\n'), ((24321, 24347), 'numpy.where', 'np.where', (['temp_s[low:high]'], {}), '(temp_s[low:high])\n', (24329, 24347), True, 'import numpy as np\n'), ((24374, 24400), 'numpy.where', 'np.where', (['temp_q[low:high]'], {}), '(temp_q[low:high])\n', (24382, 24400), True, 'import numpy as np\n')] |
# -----------------------------------------------------------
# Stacked Cross Attention Network implementation based on
# https://arxiv.org/abs/1803.08024.
# "Stacked Cross Attention for Image-Text Matching"
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Writen by <NAME>, 2018
# ---------------------------------------------------------------
"""Data provider"""
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import os
import nltk
from PIL import Image
import numpy as np
import json as jsonmod
class PrecompDataset(data.Dataset):
"""
Load precomputed captions and image features
Possible options: f30k_precomp, coco_precomp
"""
def __init__(
self, data_path, data_split,
vocab, adapt_set=False, sigma=0.0):
self.data_path = data_path
self.split = data_split
self.vocab = vocab
loc = data_path + '/'
self.adapt_set = adapt_set
self.sigma = sigma
data_split = data_split.replace('val', 'dev')
# Captions
self.captions = []
with open(loc+'%s_caps.txt' % data_split, 'rb') as f:
for line in f:
self.captions.append(line.strip())
# Image features
self.images = np.load(loc+'%s_ims.npy' % data_split)
self.length = len(self.captions)
# rkiros data has redundancy in images, we divide by 5, 10crop doesn't
if self.images.shape[0] != self.length:
self.im_div = 5
else:
self.im_div = 1
# the development set for coco is large and so validation would be slow
if data_split == 'dev':
self.length = 5000
def __getitem__(self, index):
# handle the image redundancy
img_id = index/self.im_div
image = torch.Tensor(self.images[img_id])
caption = self.captions[index]
vocab = self.vocab
# Convert caption (string) to word ids.
tokens = nltk.tokenize.word_tokenize(
str(caption).lower().decode('utf-8'))
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
if self.adapt_set:
image_adapt = add_noise(image, self.sigma)
image = add_noise(image, self.sigma)
image = (image, image_adapt)
return image, target, index, img_id
def __len__(self):
return self.length
def add_noise(x, sigma=0.):
return x+x.clone().normal_(0., sigma)
def collate_fn(data):
"""Build mini-batch tensors from a list of (image, caption) tuples.
Args:
data: list of (image, caption) tuple.
- image: torch tensor of shape (3, 256, 256).
- caption: torch tensor of shape (?); variable length.
Returns:
images: torch tensor of shape (batch_size, 3, 256, 256).
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
"""
# Sort a data list by caption length
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions, ids, img_ids = zip(*data)
images_ema = None
if len(images[0]) == 2:
images, images_ema = zip(*images)
images_ema = torch.stack(images_ema, 0)
# Merge images (convert tuple of 3D tensor to 4D tensor)
images = torch.stack(images, 0)
# Merget captions (convert tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
if images_ema is not None:
return images, images_ema, targets, lengths, ids
return images, targets, lengths, ids
def get_precomp_loader(
data_path, data_split, vocab,
opt, batch_size=100, shuffle=True,
num_workers=2, adapt_set=False, noise=0.0
):
"""Returns torch.utils.data.DataLoader for custom coco dataset."""
dset = PrecompDataset(
data_path, data_split, vocab, adapt_set, sigma=noise)
data_loader = torch.utils.data.DataLoader(
dataset=dset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
collate_fn=collate_fn,
)
return data_loader
def _get_loaders(data_name, vocab, batch_size, workers, opt):
dpath = os.path.join(opt.data_path, data_name)
train_loader = get_precomp_loader(
dpath, 'train', vocab, opt,
batch_size, True, workers
)
val_loader = get_precomp_loader(
dpath, 'dev', vocab, opt,
batch_size, False, workers
)
return train_loader, val_loader
def get_test_loader(split_name, data_name, vocab, batch_size,
workers, opt):
dpath = os.path.join(opt.data_path, data_name)
test_loader = get_precomp_loader(
dpath, split_name, vocab, opt,
batch_size, False, workers
)
return test_loader
### EDIT BALLESTER ###
def get_loader(
data_name, batch_size, workers, opt,
split='train', adapt_set=False, vocab=None
):
dpath = os.path.join(opt.data_path, data_name)
if opt.data_name.endswith('_precomp'):
if split in ['train', 'val', 'test']:
loader = get_precomp_loader(
data_path=dpath,
data_split=split,
vocab=vocab,
opt=opt,
batch_size=batch_size,
shuffle=(split == 'train'),
num_workers=workers,
adapt_set=adapt_set,
noise=opt.noise,
)
elif split == 'adapt':
adapt_dataset = UnlabeledPrecompDataset(
data_path=dpath,
sigma=opt.noise,
)
loader = torch.utils.data.DataLoader(
dataset=adapt_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
)
return loader
| [
"numpy.load",
"torch.stack",
"torch.utils.data.DataLoader",
"torch.Tensor",
"os.path.join"
] | [((3458, 3480), 'torch.stack', 'torch.stack', (['images', '(0)'], {}), '(images, 0)\n', (3469, 3480), False, 'import torch\n'), ((4234, 4360), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dset', 'batch_size': 'batch_size', 'shuffle': 'shuffle', 'pin_memory': '(True)', 'collate_fn': 'collate_fn'}), '(dataset=dset, batch_size=batch_size, shuffle=\n shuffle, pin_memory=True, collate_fn=collate_fn)\n', (4261, 4360), False, 'import torch\n'), ((4502, 4540), 'os.path.join', 'os.path.join', (['opt.data_path', 'data_name'], {}), '(opt.data_path, data_name)\n', (4514, 4540), False, 'import os\n'), ((4915, 4953), 'os.path.join', 'os.path.join', (['opt.data_path', 'data_name'], {}), '(opt.data_path, data_name)\n', (4927, 4953), False, 'import os\n'), ((5251, 5289), 'os.path.join', 'os.path.join', (['opt.data_path', 'data_name'], {}), '(opt.data_path, data_name)\n', (5263, 5289), False, 'import os\n'), ((1266, 1306), 'numpy.load', 'np.load', (["(loc + '%s_ims.npy' % data_split)"], {}), "(loc + '%s_ims.npy' % data_split)\n", (1273, 1306), True, 'import numpy as np\n'), ((1810, 1843), 'torch.Tensor', 'torch.Tensor', (['self.images[img_id]'], {}), '(self.images[img_id])\n', (1822, 1843), False, 'import torch\n'), ((2232, 2253), 'torch.Tensor', 'torch.Tensor', (['caption'], {}), '(caption)\n', (2244, 2253), False, 'import torch\n'), ((3356, 3382), 'torch.stack', 'torch.stack', (['images_ema', '(0)'], {}), '(images_ema, 0)\n', (3367, 3382), False, 'import torch\n'), ((5964, 6072), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'adapt_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(dataset=adapt_dataset, batch_size=batch_size,\n shuffle=True, pin_memory=True)\n', (5991, 6072), False, 'import torch\n')] |
import time
from datetime import timedelta
import cv2
import numpy as np
class PoseAnalyser:
#def _print(*args, **kwargs):
# print(args, kwargs)
last_inference_time = 0
average_inference = 0.0
remaining_time = 0.00
def viewKeypointsOnSample(self, sample_dir, sample_type="mixed", drawKeypoints=None, options={}):
if drawKeypoints is None:
self._print("Please provide a draw key_point method")
return
video_file = sample_dir+"test.avi"
keypoint_file = sample_dir+sample_type+"_points.npy"
keypoints = np.load(keypoint_file)
cap = cv2.VideoCapture(video_file)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
c=0
if length != len(keypoints):
self._print("Number of keypoints less than the number of frames")
return
while cap.isOpened():
ret, raw_img = cap.read()
c = c+1
if ret == False:
break
img = raw_img
kp = keypoints[c-1]
img = drawKeypoints(img, kp, options=options)
cv2.imshow("keypoints", img)
time.sleep(1./fps)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def getTime(self):
return self.time
def analyse(self, video_file, out_file, showVideo=False, infer_method=None, print=print):
self._print =print
if infer_method == None:
self._print("Please provide an infer method")
return
data =[]
cap = cv2.VideoCapture(video_file)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.last_inference_time = 0
self.average_inference = 0.0
self.remaining_time = 0.00
c = 0
while cap.isOpened():
ret, raw_img = cap.read()
c = c + 1.0
if ret == False:
self._print("Cannot open ", video_file)
break
progress = c
tdelta = str(timedelta(seconds=self.remaining_time))
self._print("Progress = %d/%d Remaining:%s %.2fs/f \r" % (progress, length, tdelta, self.average_inference), end="")
start_time = time.time()
output = infer_method(raw_img)
self.last_inference_time = time.time()- start_time
if c > 1:
self.average_inference = ( (self.average_inference * (c-1)) + self.last_inference_time)/c
else:
self.average_inference = self.last_inference_time
self.remaining_time = (length - c) * self.average_inference
data.append(output)
if showVideo:
cv2.imshow("Pose Capture", raw_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
npdata = np.array(data)
np.save(out_file, npdata)
cap.release()
cv2.destroyAllWindows()
| [
"numpy.load",
"numpy.save",
"cv2.waitKey",
"cv2.imshow",
"time.sleep",
"cv2.VideoCapture",
"time.time",
"numpy.array",
"datetime.timedelta",
"cv2.destroyAllWindows"
] | [((603, 625), 'numpy.load', 'np.load', (['keypoint_file'], {}), '(keypoint_file)\n', (610, 625), True, 'import numpy as np\n'), ((640, 668), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (656, 668), False, 'import cv2\n'), ((1651, 1679), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (1667, 1679), False, 'import cv2\n'), ((2931, 2945), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2939, 2945), True, 'import numpy as np\n'), ((2954, 2979), 'numpy.save', 'np.save', (['out_file', 'npdata'], {}), '(out_file, npdata)\n', (2961, 2979), True, 'import numpy as np\n'), ((3011, 3034), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3032, 3034), False, 'import cv2\n'), ((1201, 1229), 'cv2.imshow', 'cv2.imshow', (['"""keypoints"""', 'img'], {}), "('keypoints', img)\n", (1211, 1229), False, 'import cv2\n'), ((1242, 1263), 'time.sleep', 'time.sleep', (['(1.0 / fps)'], {}), '(1.0 / fps)\n', (1252, 1263), False, 'import time\n'), ((2318, 2329), 'time.time', 'time.time', ([], {}), '()\n', (2327, 2329), False, 'import time\n'), ((2111, 2149), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.remaining_time'}), '(seconds=self.remaining_time)\n', (2120, 2149), False, 'from datetime import timedelta\n'), ((2412, 2423), 'time.time', 'time.time', ([], {}), '()\n', (2421, 2423), False, 'import time\n'), ((2797, 2832), 'cv2.imshow', 'cv2.imshow', (['"""Pose Capture"""', 'raw_img'], {}), "('Pose Capture', raw_img)\n", (2807, 2832), False, 'import cv2\n'), ((1276, 1290), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1287, 1290), False, 'import cv2\n'), ((2852, 2866), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2863, 2866), False, 'import cv2\n')] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import OneHotEncoder
'''
客户流失预测模型
owNumber:行号 ×
CustomerID:用户编号 ×
Surname:用户姓名 ×
CreditScore:信用分数
Geography:用户所在国家/地区 ×
Gender:用户性别
Age:年龄
Tenure:当了本银行多少年用户
Balance:存贷款情况
NumOfProducts:使用产品数量
HasCrCard:是否有本行信用卡
IsActiveMember:是否活跃用户
EstimatedSalary:估计收入
Exited:是否已流失,这将作为我们的标签数据
'''
df = pd.read_csv("./data/select-data.csv")
df_test = pd.read_csv("./data/scalar-test.csv")
print("构建向量.........")
# 构建向量
train = []
target = []
for i in range(0, len(df["EstimatedSalary"])):
mid = []
mid.append(df["Geography"][i])
mid.append(df["Gender"][i])
mid.append(df["EB"][i])
mid.append(df["Age"][i])
mid.append(df["EstimatedSalary"][i])
mid.append(df["NumOfProducts"][i])
mid.append(df["CreditScore"][i])
mid.append(df["Tenure"][i])
mid.append(df["HasCrCard"][i])
mid.append(df["IsActiveMember"][i])
target.append(df["Exited"][i])
train.append(mid)
train = np.array(train)
target = np.array(target)
test = []
test_target = []
for i in range(0, len(df_test["EstimatedSalary"])):
mid = []
mid.append(df_test["Geography"][i])
mid.append(df_test["Gender"][i])
mid.append(df_test["EB"][i])
mid.append(df_test["Age"][i])
mid.append(df_test["EstimatedSalary"][i])
mid.append(df_test["NumOfProducts"][i])
mid.append(df_test["CreditScore"][i])
mid.append(df_test["Tenure"][i])
mid.append(df_test["HasCrCard"][i])
mid.append(df_test["IsActiveMember"][i])
test_target.append(df_test["Exited"][i])
test.append(mid)
test = np.array(test)
test_target = np.array(test_target)
# train = np.trunc(train * 100)
# 随机打乱训练集与标签
train, target = shuffle(train, target)
target = target.reshape(-1, 1)
test_target = test_target.reshape(-1, 1)
# One-Hot编码
enc = OneHotEncoder()
enc.fit(test_target)
test_target = enc.transform(test_target).toarray()
enc.fit(target)
target = enc.transform(target).toarray()
enc.fit(test_target)
# 定义输入占位符
x = tf.placeholder(tf.float32, shape=(None, 10))
# # 二分类问题 [0,1]
y = tf.placeholder(tf.float32, shape=(None, 2))
keep = tf.placeholder(tf.float32)
# 定义网络结构
# layer1
var1 = tf.Variable(tf.truncated_normal([10, 256], stddev=0.1))
bias1 = tf.Variable(tf.zeros([256]))
hc1 = tf.add(tf.matmul(x, var1), bias1)
h1 = tf.sigmoid(hc1)
h1 = tf.nn.dropout(h1, keep_prob=keep)
# layer2
var2 = tf.Variable(tf.truncated_normal([256, 256], stddev=0.1))
bias2 = tf.Variable(tf.zeros([256]))
hc2 = tf.add(tf.matmul(h1, var2), bias2)
h2 = tf.sigmoid(hc2)
h2 = tf.nn.dropout(h2, keep_prob=keep)
# layer3
var3 = tf.Variable(tf.truncated_normal([256, 2], stddev=0.1))
bias3 = tf.Variable(tf.zeros([2]))
hc3 = tf.add(tf.matmul(h2, var3), bias3)
h3 = tf.nn.softmax(hc3)
# 定义损失
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=h3, labels=y))
tf.summary.scalar('loss', loss)
# 定义正确率
ac = tf.cast(tf.equal(tf.argmax(h3, 1), tf.argmax(y, 1)), tf.float32)
acc = tf.reduce_mean(ac)
tf.summary.scalar('accuracy', acc)
# 定义优化器
optimzer = tf.train.AdamOptimizer(1e-3).minimize(loss)
merge_summary = tf.summary.merge_all()
isTrain = 1
# 定义训练
print("正在训练.....")
saver = tf.train.Saver(max_to_keep=1)
with tf.Session() as sess:
if isTrain:
init_op = tf.global_variables_initializer()
sess.run(init_op)
summary_writer = tf.summary.FileWriter('./logs/', sess.graph)
for i in range(0, 10001):
sess.run(optimzer, feed_dict={x: train, y: target, keep: 0.5})
train_summary = sess.run(merge_summary, feed_dict={x: train, y: target, keep: 1})
summary_writer.add_summary(train_summary, i)
if i % 50 == 0:
accu = sess.run(acc, feed_dict={x: train, y: target, keep: 1})
accuT = sess.run(acc, feed_dict={x: test, y: test_target, keep: 1})
losss = sess.run(loss, feed_dict={x: train, y: target, keep: 1})
print("epoch:" + str(i) + " train_acc:" + str(accu) + " test_acc:" + str(accuT) + " loss:" + str(
losss))
saver.save(sess, './model/bank.ckpt', global_step=i)
'''
else:
f = open("./result/NN-target.txt", "w")
model_file = tf.train.latest_checkpoint('./NN-model/')
saver.restore(sess, model_file)
tar = sess.run(h3, feed_dict={x: test, y: test_target, keep: 1})
tar = sess.run(tf.argmax(tar, 1))
for i in range(0, len(tar)):
f.write(str(tar[i]) + " ")
f.close()
print(tar)'''
| [
"pandas.read_csv",
"tensorflow.matmul",
"tensorflow.sigmoid",
"tensorflow.truncated_normal",
"tensorflow.nn.softmax",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.summary.merge_all",
"tensorflow.summary.scalar",
"ten... | [((466, 503), 'pandas.read_csv', 'pd.read_csv', (['"""./data/select-data.csv"""'], {}), "('./data/select-data.csv')\n", (477, 503), True, 'import pandas as pd\n'), ((514, 551), 'pandas.read_csv', 'pd.read_csv', (['"""./data/scalar-test.csv"""'], {}), "('./data/scalar-test.csv')\n", (525, 551), True, 'import pandas as pd\n'), ((1078, 1093), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (1086, 1093), True, 'import numpy as np\n'), ((1103, 1119), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (1111, 1119), True, 'import numpy as np\n'), ((1685, 1699), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (1693, 1699), True, 'import numpy as np\n'), ((1714, 1735), 'numpy.array', 'np.array', (['test_target'], {}), '(test_target)\n', (1722, 1735), True, 'import numpy as np\n'), ((1798, 1820), 'sklearn.utils.shuffle', 'shuffle', (['train', 'target'], {}), '(train, target)\n', (1805, 1820), False, 'from sklearn.utils import shuffle\n'), ((1912, 1927), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1925, 1927), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((2094, 2138), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 10)'}), '(tf.float32, shape=(None, 10))\n', (2108, 2138), True, 'import tensorflow as tf\n'), ((2159, 2202), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 2)'}), '(tf.float32, shape=(None, 2))\n', (2173, 2202), True, 'import tensorflow as tf\n'), ((2210, 2236), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2224, 2236), True, 'import tensorflow as tf\n'), ((2401, 2416), 'tensorflow.sigmoid', 'tf.sigmoid', (['hc1'], {}), '(hc1)\n', (2411, 2416), True, 'import tensorflow as tf\n'), ((2422, 2455), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h1'], {'keep_prob': 'keep'}), '(h1, keep_prob=keep)\n', (2435, 2455), True, 'import tensorflow as tf\n'), ((2613, 2628), 'tensorflow.sigmoid', 'tf.sigmoid', (['hc2'], {}), '(hc2)\n', (2623, 2628), True, 'import tensorflow as tf\n'), ((2634, 2667), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h2'], {'keep_prob': 'keep'}), '(h2, keep_prob=keep)\n', (2647, 2667), True, 'import tensorflow as tf\n'), ((2821, 2839), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['hc3'], {}), '(hc3)\n', (2834, 2839), True, 'import tensorflow as tf\n'), ((2936, 2967), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (2953, 2967), True, 'import tensorflow as tf\n'), ((3053, 3071), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ac'], {}), '(ac)\n', (3067, 3071), True, 'import tensorflow as tf\n'), ((3072, 3106), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'acc'], {}), "('accuracy', acc)\n", (3089, 3106), True, 'import tensorflow as tf\n'), ((3188, 3210), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3208, 3210), True, 'import tensorflow as tf\n'), ((3259, 3288), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1)'}), '(max_to_keep=1)\n', (3273, 3288), True, 'import tensorflow as tf\n'), ((2275, 2317), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[10, 256]'], {'stddev': '(0.1)'}), '([10, 256], stddev=0.1)\n', (2294, 2317), True, 'import tensorflow as tf\n'), ((2339, 2354), 'tensorflow.zeros', 'tf.zeros', (['[256]'], {}), '([256])\n', (2347, 2354), True, 'import tensorflow as tf\n'), ((2369, 2387), 'tensorflow.matmul', 'tf.matmul', (['x', 'var1'], {}), '(x, var1)\n', (2378, 2387), True, 'import tensorflow as tf\n'), ((2485, 2528), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[256, 256]'], {'stddev': '(0.1)'}), '([256, 256], stddev=0.1)\n', (2504, 2528), True, 'import tensorflow as tf\n'), ((2550, 2565), 'tensorflow.zeros', 'tf.zeros', (['[256]'], {}), '([256])\n', (2558, 2565), True, 'import tensorflow as tf\n'), ((2580, 2599), 'tensorflow.matmul', 'tf.matmul', (['h1', 'var2'], {}), '(h1, var2)\n', (2589, 2599), True, 'import tensorflow as tf\n'), ((2697, 2738), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[256, 2]'], {'stddev': '(0.1)'}), '([256, 2], stddev=0.1)\n', (2716, 2738), True, 'import tensorflow as tf\n'), ((2760, 2773), 'tensorflow.zeros', 'tf.zeros', (['[2]'], {}), '([2])\n', (2768, 2773), True, 'import tensorflow as tf\n'), ((2788, 2807), 'tensorflow.matmul', 'tf.matmul', (['h2', 'var3'], {}), '(h2, var3)\n', (2797, 2807), True, 'import tensorflow as tf\n'), ((2871, 2934), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'h3', 'labels': 'y'}), '(logits=h3, labels=y)\n', (2913, 2934), True, 'import tensorflow as tf\n'), ((3295, 3307), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3305, 3307), True, 'import tensorflow as tf\n'), ((2999, 3015), 'tensorflow.argmax', 'tf.argmax', (['h3', '(1)'], {}), '(h3, 1)\n', (3008, 3015), True, 'import tensorflow as tf\n'), ((3017, 3032), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (3026, 3032), True, 'import tensorflow as tf\n'), ((3127, 3156), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (3149, 3156), True, 'import tensorflow as tf\n'), ((3351, 3384), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3382, 3384), True, 'import tensorflow as tf\n'), ((3436, 3480), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./logs/"""', 'sess.graph'], {}), "('./logs/', sess.graph)\n", (3457, 3480), True, 'import tensorflow as tf\n')] |
"""
Performance check of DGL model + trainer + dataset
"""
import numpy as np
from tqdm import tqdm
import pickle
import torch
import torch.nn.functional as F
from dgl.data import CoraGraphDataset, PubmedGraphDataset, CiteseerGraphDataset
from dgl.nn.pytorch import GraphConv, GATConv, SAGEConv
import logging
logging.basicConfig(level=logging.ERROR)
class GCN(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(GCN, self).__init__()
self.conv1 = GraphConv(num_features, 16)
self.conv2 = GraphConv(16, num_classes)
def forward(self, graph):
features = graph.ndata['feat']
features = F.relu(self.conv1(graph, features))
features = F.dropout(features, training=self.training)
features = self.conv2(graph, features)
return F.log_softmax(features, dim=-1)
class GAT(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(GAT, self).__init__()
self.conv1 = GATConv(num_features, 8, 8, feat_drop=.6, attn_drop=.6, activation=F.relu)
self.conv2 = GATConv(8 * 8, num_classes, 1, feat_drop=.6, attn_drop=.6)
def forward(self, graph):
features = graph.ndata['feat']
features = self.conv1(graph, features).flatten(1)
features = self.conv2(graph, features).mean(1)
return F.log_softmax(features, dim=-1)
class SAGE(torch.nn.Module):
def __init__(self, num_features, hidden_channels, num_layers, num_classes):
super(SAGE, self).__init__()
self.num_layers = num_layers
self.convs = torch.nn.ModuleList()
for i in range(num_layers):
inc = outc = hidden_channels
if i == 0:
inc = num_features
if i == num_layers - 1:
outc = num_classes
self.convs.append(SAGEConv(inc, outc, "gcn"))
self.dropout = torch.nn.Dropout()
def forward(self, graph):
h = graph.ndata['feat']
h = self.dropout(h)
for i, conv in enumerate(self.convs):
h = conv(graph, h)
if i != self.num_layers - 1:
h = h.relu()
h = self.dropout(h)
return F.log_softmax(h, dim=-1)
def test(model, graph, mask, label):
model.eval()
pred = model(graph)[mask].max(1)[1]
acc = pred.eq(label[mask]).sum().item() / mask.sum().item()
return acc
def train(model, graph, args, label, train_mask, val_mask):
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
parameters = model.state_dict()
best_acc = 0.
for epoch in range(args.epoch):
model.train()
optimizer.zero_grad()
output = model(graph)
loss = F.nll_loss(output[train_mask], label[train_mask])
loss.backward()
optimizer.step()
val_acc = test(model, graph, val_mask, label)
if val_acc > best_acc:
best_acc = val_acc
parameters = pickle.dumps(model.state_dict())
model.load_state_dict(pickle.loads(parameters))
return model
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('dgl')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--dataset', type=str, choices=['Cora', 'CiteSeer', 'PubMed'], default='Cora')
parser.add_argument('--repeat', type=int, default=50)
parser.add_argument('--model', type=str, choices=['gat', 'gcn', 'sage'], default='gat')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--epoch', type=int, default=200)
args = parser.parse_args()
# seed = 100
if args.dataset == 'Cora':
dataset = CoraGraphDataset()
elif args.dataset == 'CiteSeer':
dataset = CiteseerGraphDataset()
elif args.dataset == 'PubMed':
dataset = PubmedGraphDataset()
graph = dataset[0].to(args.device)
label = graph.ndata['label']
train_mask = graph.ndata['train_mask']
val_mask = graph.ndata['val_mask']
test_mask = graph.ndata['test_mask']
accs = []
for seed in tqdm(range(args.repeat)):
np.random.seed(seed)
torch.manual_seed(seed)
if args.model == 'gat':
model = GAT(graph.ndata['feat'].size(1), dataset.num_classes)
elif args.model == 'gcn':
model = GCN(graph.ndata['feat'].size(1), dataset.num_classes)
elif args.model == 'sage':
model = SAGE(graph.ndata['feat'].size(1), 64, 2, dataset.num_classes)
model.to(args.device)
train(model, graph, args, label, train_mask, val_mask)
acc = test(model, graph, test_mask, label)
accs.append(acc)
print('{:.4f} ~ {:.4f}'.format(np.mean(accs), np.std(accs)))
| [
"torch.nn.Dropout",
"pickle.loads",
"dgl.data.CoraGraphDataset",
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.basicConfig",
"dgl.nn.pytorch.GATConv",
"torch.nn.ModuleList",
"torch.manual_seed",
"dgl.nn.pytorch.GraphConv",
"numpy.std",
"torch.nn.functional.dropout",
"dgl.data.Cite... | [((313, 353), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (332, 353), False, 'import logging\n'), ((3157, 3187), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""dgl"""'], {}), "('dgl')\n", (3180, 3187), False, 'import argparse\n'), ((491, 518), 'dgl.nn.pytorch.GraphConv', 'GraphConv', (['num_features', '(16)'], {}), '(num_features, 16)\n', (500, 518), False, 'from dgl.nn.pytorch import GraphConv, GATConv, SAGEConv\n'), ((540, 566), 'dgl.nn.pytorch.GraphConv', 'GraphConv', (['(16)', 'num_classes'], {}), '(16, num_classes)\n', (549, 566), False, 'from dgl.nn.pytorch import GraphConv, GATConv, SAGEConv\n'), ((711, 754), 'torch.nn.functional.dropout', 'F.dropout', (['features'], {'training': 'self.training'}), '(features, training=self.training)\n', (720, 754), True, 'import torch.nn.functional as F\n'), ((817, 848), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['features'], {'dim': '(-1)'}), '(features, dim=-1)\n', (830, 848), True, 'import torch.nn.functional as F\n'), ((986, 1062), 'dgl.nn.pytorch.GATConv', 'GATConv', (['num_features', '(8)', '(8)'], {'feat_drop': '(0.6)', 'attn_drop': '(0.6)', 'activation': 'F.relu'}), '(num_features, 8, 8, feat_drop=0.6, attn_drop=0.6, activation=F.relu)\n', (993, 1062), False, 'from dgl.nn.pytorch import GraphConv, GATConv, SAGEConv\n'), ((1082, 1142), 'dgl.nn.pytorch.GATConv', 'GATConv', (['(8 * 8)', 'num_classes', '(1)'], {'feat_drop': '(0.6)', 'attn_drop': '(0.6)'}), '(8 * 8, num_classes, 1, feat_drop=0.6, attn_drop=0.6)\n', (1089, 1142), False, 'from dgl.nn.pytorch import GraphConv, GATConv, SAGEConv\n'), ((1339, 1370), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['features'], {'dim': '(-1)'}), '(features, dim=-1)\n', (1352, 1370), True, 'import torch.nn.functional as F\n'), ((1576, 1597), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (1595, 1597), False, 'import torch\n'), ((1885, 1903), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {}), '()\n', (1901, 1903), False, 'import torch\n'), ((2193, 2217), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['h'], {'dim': '(-1)'}), '(h, dim=-1)\n', (2206, 2217), True, 'import torch.nn.functional as F\n'), ((2739, 2788), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output[train_mask]', 'label[train_mask]'], {}), '(output[train_mask], label[train_mask])\n', (2749, 2788), True, 'import torch.nn.functional as F\n'), ((3052, 3076), 'pickle.loads', 'pickle.loads', (['parameters'], {}), '(parameters)\n', (3064, 3076), False, 'import pickle\n'), ((3785, 3803), 'dgl.data.CoraGraphDataset', 'CoraGraphDataset', ([], {}), '()\n', (3801, 3803), False, 'from dgl.data import CoraGraphDataset, PubmedGraphDataset, CiteseerGraphDataset\n'), ((4216, 4236), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4230, 4236), True, 'import numpy as np\n'), ((4245, 4268), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4262, 4268), False, 'import torch\n'), ((3859, 3881), 'dgl.data.CiteseerGraphDataset', 'CiteseerGraphDataset', ([], {}), '()\n', (3879, 3881), False, 'from dgl.data import CoraGraphDataset, PubmedGraphDataset, CiteseerGraphDataset\n'), ((4815, 4828), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (4822, 4828), True, 'import numpy as np\n'), ((4830, 4842), 'numpy.std', 'np.std', (['accs'], {}), '(accs)\n', (4836, 4842), True, 'import numpy as np\n'), ((1834, 1860), 'dgl.nn.pytorch.SAGEConv', 'SAGEConv', (['inc', 'outc', '"""gcn"""'], {}), "(inc, outc, 'gcn')\n", (1842, 1860), False, 'from dgl.nn.pytorch import GraphConv, GATConv, SAGEConv\n'), ((3935, 3955), 'dgl.data.PubmedGraphDataset', 'PubmedGraphDataset', ([], {}), '()\n', (3953, 3955), False, 'from dgl.data import CoraGraphDataset, PubmedGraphDataset, CiteseerGraphDataset\n')] |
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
"""
Test of backend non-maximum supression compare with numpy results
The nms functions are tested on both GPU and CPU backends
"""
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import itertools as itt
import numpy as np
def py_cpu_nms(dets, thresh, normalized):
"""Pure Python NMS baseline."""
if normalized:
offset = 0
else:
offset = 1
keep = np.where(dets[:, 4] != 0)[0]
x1 = dets[keep, 0]
y1 = dets[keep, 1]
x2 = dets[keep, 2]
y2 = dets[keep, 3]
scores = dets[keep, 4]
areas = (x2 - x1 + offset) * (y2 - y1 + offset)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + offset)
h = np.maximum(0.0, yy2 - yy1 + offset)
inter = w * h
ovr = old_div(inter, (areas[i] + areas[order[1:]] - inter))
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def pytest_generate_tests(metafunc):
thre_rng = [0.5, 0.7]
count_rng = [300, 600, 1000]
num_zero_boxes = [0, 50, 100]
normalized = [True, False]
if 'fargs' in metafunc.fixturenames:
fargs = itt.product(thre_rng, count_rng, normalized, num_zero_boxes)
metafunc.parametrize('fargs', fargs)
def test_nms(backend_pair, fargs):
thre, box_count, normalized, num_zero_boxes = fargs
x1, y1, x2, y2, score = 0, 1, 2, 3, 4
dets = np.zeros((box_count, 5), dtype=np.float32)
dets[:, x1] = np.random.random((box_count,)) * 10
dets[:, x2] = dets[:, x1] + (np.random.random((box_count,)) * 10 + 400)
dets[:, y1] = np.random.random((box_count,)) * 10
dets[:, y2] = dets[:, y1] + (np.random.random((box_count,)) * 10 + 600)
dets[:, score] = np.sort(np.random.random((box_count,)))[::-1]
dets = np.vstack([dets, np.zeros((num_zero_boxes, 5))])
ng, nc = backend_pair
# call reference nms
keep_ref = py_cpu_nms(dets, thre, normalized)
# call cpu nms
dets_nc = nc.array(dets)
tic_cpu = nc.init_mark()
toc_cpu = nc.init_mark()
nc.record_mark(tic_cpu)
keep_nc = nc.nms(dets_nc, thre)
nc.record_mark(toc_cpu)
nc.synchronize_mark(toc_cpu)
print("cpu NMS time (ms): {}".format(nc.get_time(tic_cpu, toc_cpu)))
assert keep_nc == keep_ref
# call gpu nms kernel, the kernels takes sorted detection boxes
dets_ng = ng.array(dets)
scores = dets_ng[:, 4].get()
order = scores.argsort()[::-1]
sorted_dets_dev = dets_ng[order, :]
tic_gpu = ng.init_mark()
toc_gpu = ng.init_mark()
# call through backend
ng.record_mark(tic_gpu)
keep_ng = ng.nms(sorted_dets_dev, thre, normalized)
ng.record_mark(toc_gpu)
ng.synchronize_mark(toc_gpu)
print("gpu NMS time (ms): {}".format(ng.get_time(tic_gpu, toc_gpu)))
assert keep_ng == keep_ref
if __name__ == '__main__':
from neon.backends.nervanagpu import NervanaGPU
from neon.backends.nervanacpu import NervanaCPU
ng = NervanaGPU()
nc = NervanaCPU()
test_nms((ng, nc), (0.7, 300, True, 100))
| [
"numpy.minimum",
"numpy.maximum",
"neon.backends.nervanacpu.NervanaCPU",
"past.utils.old_div",
"numpy.zeros",
"numpy.where",
"numpy.random.random",
"neon.backends.nervanagpu.NervanaGPU",
"itertools.product"
] | [((2466, 2508), 'numpy.zeros', 'np.zeros', (['(box_count, 5)'], {'dtype': 'np.float32'}), '((box_count, 5), dtype=np.float32)\n', (2474, 2508), True, 'import numpy as np\n'), ((4030, 4042), 'neon.backends.nervanagpu.NervanaGPU', 'NervanaGPU', ([], {}), '()\n', (4040, 4042), False, 'from neon.backends.nervanagpu import NervanaGPU\n'), ((4052, 4064), 'neon.backends.nervanacpu.NervanaCPU', 'NervanaCPU', ([], {}), '()\n', (4062, 4064), False, 'from neon.backends.nervanacpu import NervanaCPU\n'), ((1202, 1227), 'numpy.where', 'np.where', (['(dets[:, 4] != 0)'], {}), '(dets[:, 4] != 0)\n', (1210, 1227), True, 'import numpy as np\n'), ((1537, 1569), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (1547, 1569), True, 'import numpy as np\n'), ((1584, 1616), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (1594, 1616), True, 'import numpy as np\n'), ((1631, 1663), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (1641, 1663), True, 'import numpy as np\n'), ((1678, 1710), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (1688, 1710), True, 'import numpy as np\n'), ((1724, 1759), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + offset)'], {}), '(0.0, xx2 - xx1 + offset)\n', (1734, 1759), True, 'import numpy as np\n'), ((1772, 1807), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + offset)'], {}), '(0.0, yy2 - yy1 + offset)\n', (1782, 1807), True, 'import numpy as np\n'), ((1844, 1895), 'past.utils.old_div', 'old_div', (['inter', '(areas[i] + areas[order[1:]] - inter)'], {}), '(inter, areas[i] + areas[order[1:]] - inter)\n', (1851, 1895), False, 'from past.utils import old_div\n'), ((2212, 2272), 'itertools.product', 'itt.product', (['thre_rng', 'count_rng', 'normalized', 'num_zero_boxes'], {}), '(thre_rng, count_rng, normalized, num_zero_boxes)\n', (2223, 2272), True, 'import itertools as itt\n'), ((2527, 2557), 'numpy.random.random', 'np.random.random', (['(box_count,)'], {}), '((box_count,))\n', (2543, 2557), True, 'import numpy as np\n'), ((2657, 2687), 'numpy.random.random', 'np.random.random', (['(box_count,)'], {}), '((box_count,))\n', (2673, 2687), True, 'import numpy as np\n'), ((1914, 1937), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1922, 1937), True, 'import numpy as np\n'), ((2799, 2829), 'numpy.random.random', 'np.random.random', (['(box_count,)'], {}), '((box_count,))\n', (2815, 2829), True, 'import numpy as np\n'), ((2866, 2895), 'numpy.zeros', 'np.zeros', (['(num_zero_boxes, 5)'], {}), '((num_zero_boxes, 5))\n', (2874, 2895), True, 'import numpy as np\n'), ((2596, 2626), 'numpy.random.random', 'np.random.random', (['(box_count,)'], {}), '((box_count,))\n', (2612, 2626), True, 'import numpy as np\n'), ((2726, 2756), 'numpy.random.random', 'np.random.random', (['(box_count,)'], {}), '((box_count,))\n', (2742, 2756), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# This file is part of MIEZE simulation.
# Copyright (C) 2019, 2020 TUM FRM2 E21 Research Group.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Utilities and helper functions."""
import csv
import numpy as np
def add_inverse(a, b):
"""Adds two values as a parallel connection.
Parameters
----------
a: float, ndarray
b: float, ndarray
Returns
-------
out: float
the inverse of the sum of their inverses
"""
if a and b:
return (a**(-1) + b**(-1))**(-1)
else:
return 0
def transform_frequency(eigenfrequency_value, prefactor=3.8/143000):
"""Transform eigenfrequency to match the Larmor precesion.
# ToDo: need more info on this
Parameters
----------
eigenfrequency_value: float
Eigenfrequency to be converted.
prefactor: float, optional
#Todo: Draft
Conversion factor that matches the data.
Defaults to 3.8/143000
Returns
-------
out: float
"""
return prefactor * eigenfrequency_value
def convert_decimal_to_binary(number):
"""
Parameters
----------
number: int
Returns
-------
out: str
>>> convert_decimal_to_binary(10)
'1010'
"""
return bin(number)[2:]
def convert_binary_to_decimal(binary_number):
"""
Parameters
----------
binary_number
Returns
-------
>>> convert_binary_to_decimal('1010')
10
"""
decimal = 0
i = 0
n = len(binary_number) - 1
for bit in binary_number:
decimal += + int(bit) * pow(2, n - i)
i += 1
return decimal
def read_data_from_file(file_name):
"""Read data from file.
Parameters
----------
file_name: str
Name of the file to be read from.
"""
_frequency = list()
_capacity_1 = list()
_capacity_2 = list()
connection_type = list()
with open(file_name) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if row[1] and row[2] and row[3]:
if line_count == 0:
# print(f'Column names are {", ".join(row)}')
line_count += 1
else:
try:
_frequency.append(float(row[0]))
_capacity_1.append(float(row[1]))
_capacity_2.append(float(row[2]))
connection_type.append(int(row[3]))
except IndexError:
print("wtf")
return np.asarray(_frequency), np.asarray(_capacity_1), np.asarray(_capacity_2), np.asarray(connection_type)
def read_capacities_data_from_file(file_name):
"""Read data from file.
Parameters
----------
file_name: str
Name of the file to be read from.
"""
capacities = list()
index_1 = list()
index_2 = list()
index_3 = list()
connection_type = list()
with open(file_name) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if row[1] and row[2] and row[3]:
if line_count == 0:
# print(f'Column names are {", ".join(row)}')
line_count += 1
else:
try:
capacities.append(float(row[0]))
index_1.append(float(row[1]))
index_2.append(float(row[2]))
index_3.append(float(row[3]))
connection_type.append(int(row[4]))
except IndexError:
print("wtf")
return np.asarray(capacities), np.asarray(index_1), np.asarray(index_2), np.asarray(index_3), \
np.asarray(connection_type)
def save_capacities_data_to_file(data, file_name, extension='.csv'):
"""Save data to file."""
if extension in file_name:
full_filename = file_name
else:
full_filename = f'{file_name}{extension}'
with open(full_filename, 'w') as file:
csv_writer = csv.writer(file, delimiter=',')
csv_writer.writerow(["capacity", "c1_box_index", "c2_box_index", "c3_box_index", "connection_type"])
for capacity, connection_data in data.items():
row = list((capacity, connection_data[0], connection_data[1], connection_data[2], connection_data[3]))
csv_writer.writerow(row)
| [
"numpy.asarray",
"csv.reader",
"csv.writer"
] | [((2064, 2099), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (2074, 2099), False, 'import csv\n'), ((3165, 3200), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (3175, 3200), False, 'import csv\n'), ((4281, 4312), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (4291, 4312), False, 'import csv\n'), ((2710, 2732), 'numpy.asarray', 'np.asarray', (['_frequency'], {}), '(_frequency)\n', (2720, 2732), True, 'import numpy as np\n'), ((2734, 2757), 'numpy.asarray', 'np.asarray', (['_capacity_1'], {}), '(_capacity_1)\n', (2744, 2757), True, 'import numpy as np\n'), ((2759, 2782), 'numpy.asarray', 'np.asarray', (['_capacity_2'], {}), '(_capacity_2)\n', (2769, 2782), True, 'import numpy as np\n'), ((2784, 2811), 'numpy.asarray', 'np.asarray', (['connection_type'], {}), '(connection_type)\n', (2794, 2811), True, 'import numpy as np\n'), ((3858, 3880), 'numpy.asarray', 'np.asarray', (['capacities'], {}), '(capacities)\n', (3868, 3880), True, 'import numpy as np\n'), ((3882, 3901), 'numpy.asarray', 'np.asarray', (['index_1'], {}), '(index_1)\n', (3892, 3901), True, 'import numpy as np\n'), ((3903, 3922), 'numpy.asarray', 'np.asarray', (['index_2'], {}), '(index_2)\n', (3913, 3922), True, 'import numpy as np\n'), ((3924, 3943), 'numpy.asarray', 'np.asarray', (['index_3'], {}), '(index_3)\n', (3934, 3943), True, 'import numpy as np\n'), ((3962, 3989), 'numpy.asarray', 'np.asarray', (['connection_type'], {}), '(connection_type)\n', (3972, 3989), True, 'import numpy as np\n')] |
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
import numpy as np
def col_names_check():
iris_wheader = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
assert iris_wheader.col_names == ["sepal_len","sepal_wid","petal_len","petal_wid","class"], \
"Expected {0} for column names but got {1}".format(["sepal_len","sepal_wid","petal_len","petal_wid","class"],
iris_wheader.col_names)
iris = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris.csv"))
assert iris.col_names == ["C1","C2","C3","C4","C5"], "Expected {0} for column names but got " \
"{1}".format(["C1","C2","C3","C4","C5"], iris.col_names)
df = h2o.H2OFrame.from_python(zip(*np.random.randn(100,4).tolist()), column_names=list("ABCD"), column_types=["enum"]*4)
df.head()
assert df.col_names == list("ABCD"), "Expected {} for column names but got {}".format(list("ABCD"), df.col_names)
assert df.types.values() == ["enum"]*4, "Expected {} for column types but got {}".format(["enum"]*4, df.types)
df = h2o.H2OFrame(zip(*np.random.randn(100,4).tolist()))
df.head()
assert df.col_names == ["C1","C2","C3","C4"], "Expected {} for column names but got {}".format(["C1","C2","C3","C4"]
, df.col_names)
assert df.types.values() == ["real"]*4, "Expected {} for column types but got {}".format(["real"]*4, df.types)
df = h2o.H2OFrame({'B': ['a', 'a', 'b', 'NA', 'NA']})
df.head()
assert df.col_names == ["B"], "Expected {} for column names but got {}".format(["B"], df.col_names)
df = h2o.H2OFrame.from_python({'B': ['a', 'a', 'b', 'NA', 'NA']}, column_names=["X"])
df.head()
assert df.col_names == ["X"], "Expected {} for column names but got {}".format(["X"], df.col_names)
if __name__ == "__main__":
pyunit_utils.standalone_test(col_names_check)
else:
col_names_check()
| [
"h2o.H2OFrame.from_python",
"numpy.random.randn",
"tests.pyunit_utils.standalone_test",
"sys.path.insert",
"h2o.H2OFrame",
"tests.pyunit_utils.locate"
] | [((11, 39), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../"""'], {}), "(1, '../../')\n", (26, 39), False, 'import sys\n'), ((1583, 1631), 'h2o.H2OFrame', 'h2o.H2OFrame', (["{'B': ['a', 'a', 'b', 'NA', 'NA']}"], {}), "({'B': ['a', 'a', 'b', 'NA', 'NA']})\n", (1595, 1631), False, 'import h2o\n'), ((1754, 1839), 'h2o.H2OFrame.from_python', 'h2o.H2OFrame.from_python', (["{'B': ['a', 'a', 'b', 'NA', 'NA']}"], {'column_names': "['X']"}), "({'B': ['a', 'a', 'b', 'NA', 'NA']}, column_names=['X']\n )\n", (1778, 1839), False, 'import h2o\n'), ((1982, 2027), 'tests.pyunit_utils.standalone_test', 'pyunit_utils.standalone_test', (['col_names_check'], {}), '(col_names_check)\n', (2010, 2027), False, 'from tests import pyunit_utils\n'), ((158, 212), 'tests.pyunit_utils.locate', 'pyunit_utils.locate', (['"""smalldata/iris/iris_wheader.csv"""'], {}), "('smalldata/iris/iris_wheader.csv')\n", (177, 212), False, 'from tests import pyunit_utils\n'), ((533, 579), 'tests.pyunit_utils.locate', 'pyunit_utils.locate', (['"""smalldata/iris/iris.csv"""'], {}), "('smalldata/iris/iris.csv')\n", (552, 579), False, 'from tests import pyunit_utils\n'), ((831, 854), 'numpy.random.randn', 'np.random.randn', (['(100)', '(4)'], {}), '(100, 4)\n', (846, 854), True, 'import numpy as np\n'), ((1184, 1207), 'numpy.random.randn', 'np.random.randn', (['(100)', '(4)'], {}), '(100, 4)\n', (1199, 1207), True, 'import numpy as np\n')] |
import os
from curses import wrapper
import csv
import numpy as np
from settings import settings
from utils import data
from action_utils import parse_action_args
from args import get_args
from comm import CommNetMLP
from evaluator import Evaluator
from nns.models import *
from utils.util_fns import *
from utils.game_tracker import GameTracker
from nns.probe import Probe
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
def load_parent_child_probes(probe_seed, probe_dropout_rate):
tracker_path = os.path.join(args.load, args.env_name, args.exp_name, "seed" + str(args.seed), 'tracker.pkl')
old_tracker = GameTracker.from_file(tracker_path)
c_dim = old_tracker.data[0][2][0].detach().numpy().shape[1]
probe_pred_dim = 49
c_probes = [Probe(c_dim, probe_pred_dim, num_layers=3) for _ in range(2)]
[c_probe.load_state_dict(torch.load(os.path.join(args.load, args.env_name, args.exp_name, "seed" +
str(args.seed), 'probes', 'seed' + str(probe_seed),
'c_probe_' + str(i) + '_dropout_' + str(
probe_dropout_rate) + '.pth'))) for i, c_probe in
enumerate(c_probes)]
[c_probe.eval() for c_probe in c_probes]
h_probes = [Probe(c_dim, probe_pred_dim, num_layers=3) for _ in range(self.num_agents)]
[h_probe.load_state_dict(torch.load(
os.path.join(args.load, args.env_name, args.exp_name, "seed" + str(args.seed), 'probes',
'seed' + str(probe_seed), 'h_probe_' +
str(i) + '_dropout_' + str(probe_dropout_rate) + '.pth'))) for i, h_probe in
enumerate(h_probes)]
[h_probe.eval() for h_probe in h_probes]
return c_probes, h_probes
def run_eval(_):
def load(path):
load_path = os.path.join(args.load, args.env_name, args.exp_name, "seed" + str(args.seed), "models")
print(f"load directory is {load_path}")
log_path = os.path.join(args.load, args.env_name, args.exp_name, "seed" + str(args.seed), "logs")
print(f"log dir directory is {log_path}")
assert 'model.pt' in os.listdir(load_path), "No model to load!?"
model_path = os.path.join(load_path, "model.pt")
d = torch.load(model_path)
policy_net.load_state_dict(d['policy_net'])
if args.ic3net:
args.commnet = 1
args.hard_attn = 1
args.mean_ratio = 0
# For TJ set comm action to 1 as specified in paper to showcase
# importance of individual rewards even in cooperative games
if args.env_name == "traffic_junction":
args.comm_action_one = True
# Enemy comm
args.nfriendly = args.nagents
if hasattr(args, 'enemy_comm') and args.enemy_comm:
if hasattr(args, 'nenemies'):
args.nagents += args.nenemies
else:
raise RuntimeError("Env. needs to pass argument 'nenemy'.")
env = data.init(args.env_name, args, False)
num_inputs = env.observation_dim
args.num_actions = env.num_actions
# Multi-action
if not isinstance(args.num_actions, (list, tuple)): # single action case
args.num_actions = [args.num_actions]
args.dim_actions = env.dim_actions
args.num_inputs = num_inputs
# Hard attention
if args.hard_attn and args.commnet:
# add comm_action as last dim in actions
args.num_actions = [*args.num_actions, 2]
args.dim_actions = env.dim_actions + 1
# Recurrence
if args.commnet and (args.recurrent or args.rnn_type == 'LSTM'):
args.recurrent = True
args.rnn_type = 'LSTM'
parse_action_args(args)
if args.seed == -1:
args.seed = np.random.randint(0, 10000)
torch.manual_seed(args.seed)
print(args)
print(args.seed)
if args.commnet:
print("Creating commnet mlp")
policy_net = CommNetMLP(args, num_inputs, train_mode=False)
elif args.random:
policy_net = Random(args, num_inputs)
# this is what we are working with for IC3 Net predator prey.
elif args.recurrent:
print("Creating an RNN!")
policy_net = RNN(args, num_inputs)
else:
policy_net = MLP(args, num_inputs)
load(args.load)
if not args.display:
display_models([policy_net])
# share parameters among threads, but not gradients
for p in policy_net.parameters():
p.data.share_memory_()
if args.use_tracker:
evaluator = Evaluator(args, policy_net, data.init(args.env_name, args), 0, 0,
-1)
all_stats = []
for i in range(1000):
ep, stat, all_comms = evaluator.run_episode()
all_stats.append(stat)
if i % 50 == 0:
print("Episode number", i)
tracker_path = os.path.join(args.load, args.env_name, args.exp_name, "seed" + str(args.seed), "tracker.pkl")
evaluator.tracker.to_file(tracker_path)
return
# num_steps = [i for i in range(1, 2)]
# For traffic, always intervene
num_steps = [10]
dropout_rates = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# dropout_rates = [0.1]
# dropout_rates = [0.0, 0.1, 0.2, 0.3, 0.4]
# dropout_rates = [0.0]
probe_seeds = [i for i in range(0, 5)]
# probe_seeds = [0]
# xfact_steps = [50, 250, 500, 750, 1000, 2000, 3000, 4000, 5000]
xfact_steps = [200]
success_table = np.zeros((len(dropout_rates), len(num_steps), 2))
collision_table = np.zeros((len(dropout_rates), len(num_steps), 2))
time_table = np.zeros((len(dropout_rates), len(num_steps), 2))
for xfact_step in xfact_steps:
settings.NUM_XFACT_STEPS = xfact_step
for inter_idx, num_intervention_steps in enumerate(num_steps):
for dropout_idx, probe_dropout_rate in enumerate(dropout_rates):
succ_for_seed = []
collision_for_seed = []
time_for_seed = []
for probe_seed in probe_seeds:
print("Eval for dropout", probe_dropout_rate, "for", num_intervention_steps, "steps for probe seed", probe_seed)
evaluator = Evaluator(args, policy_net, data.init(args.env_name, args), probe_dropout_rate, probe_seed, num_intervention_steps)
st_time = time.time()
all_stats = []
for i in range(100):
ep, stat, all_comms = evaluator.run_episode()
all_stats.append(stat)
if i % 20 == 0:
print("Episode", i)
if args.use_tracker:
tracker_path = os.path.join(args.load, args.env_name, args.exp_name, "seed" + str(args.seed), "tracker.pkl")
evaluator.tracker.to_file(tracker_path)
total_episode_time = time.time() - st_time
average_stat = {}
for key in all_stats[0].keys():
average_stat[key] = np.mean([stat.get(key) for stat in all_stats])
print("average stats is: ", average_stat)
succ_for_seed.append(average_stat.get('success'))
if average_stat.get('collisions') is not None:
collision_for_seed.append(average_stat.get('collisions'))
time_for_seed.append(total_episode_time/average_stat['num_steps'])
success_table[dropout_idx, inter_idx, 0] = np.mean(succ_for_seed)
success_table[dropout_idx, inter_idx, 1] = np.std(succ_for_seed)
collision_table[dropout_idx, inter_idx, 0] = np.mean(collision_for_seed)
collision_table[dropout_idx, inter_idx, 1] = np.std(collision_for_seed)
time_table[dropout_idx, inter_idx, 0] = np.mean(time_for_seed)
time_table[dropout_idx, inter_idx, 1] = np.std(time_for_seed)
# print("Success table\n", success_table[:, :, 0])
# print("Collision table\n", collision_table[:, :, 0])
# print("Success std table\n", success_table[:, :, 1])
# print("Collision std table\n", collision_table[:, :, 1])
with open(os.path.join(args.load, args.env_name, args.exp_name, "seed" + str(args.seed),
"success_table_" + str(xfact_step) + ".csv"), 'w') as f:
f.write("Success mean\n")
writer = csv.writer(f)
writer.writerows(success_table[:, :, 0])
f.write("Success std\n")
writer.writerows(success_table[:, :, 1])
f.write("Collision mean\n")
writer.writerows(collision_table[:, :, 0])
f.write("Collision std\n")
writer.writerows(collision_table[:, :, 1])
f.write("Time mean\n")
writer.writerows(time_table[:, :, 0])
f.write("Time std\n")
writer.writerows(time_table[:, :, 1])
if __name__ == '__main__':
# Wrap entire execution in curses wrapper to protect terminal against ugly end states.
parser = get_args()
init_args_for_env(parser)
args = parser.parse_args()
if args.display:
wrapper(run_eval)
else:
run_eval(None)
| [
"utils.game_tracker.GameTracker.from_file",
"csv.writer",
"utils.data.init",
"numpy.std",
"curses.wrapper",
"args.get_args",
"numpy.random.randint",
"numpy.mean",
"nns.probe.Probe",
"action_utils.parse_action_args",
"comm.CommNetMLP",
"os.path.join",
"os.listdir"
] | [((734, 769), 'utils.game_tracker.GameTracker.from_file', 'GameTracker.from_file', (['tracker_path'], {}), '(tracker_path)\n', (755, 769), False, 'from utils.game_tracker import GameTracker\n'), ((3091, 3128), 'utils.data.init', 'data.init', (['args.env_name', 'args', '(False)'], {}), '(args.env_name, args, False)\n', (3100, 3128), False, 'from utils import data\n'), ((3782, 3805), 'action_utils.parse_action_args', 'parse_action_args', (['args'], {}), '(args)\n', (3799, 3805), False, 'from action_utils import parse_action_args\n'), ((9401, 9411), 'args.get_args', 'get_args', ([], {}), '()\n', (9409, 9411), False, 'from args import get_args\n'), ((874, 916), 'nns.probe.Probe', 'Probe', (['c_dim', 'probe_pred_dim'], {'num_layers': '(3)'}), '(c_dim, probe_pred_dim, num_layers=3)\n', (879, 916), False, 'from nns.probe import Probe\n'), ((1433, 1475), 'nns.probe.Probe', 'Probe', (['c_dim', 'probe_pred_dim'], {'num_layers': '(3)'}), '(c_dim, probe_pred_dim, num_layers=3)\n', (1438, 1475), False, 'from nns.probe import Probe\n'), ((2353, 2388), 'os.path.join', 'os.path.join', (['load_path', '"""model.pt"""'], {}), "(load_path, 'model.pt')\n", (2365, 2388), False, 'import os\n'), ((3851, 3878), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (3868, 3878), True, 'import numpy as np\n'), ((4031, 4077), 'comm.CommNetMLP', 'CommNetMLP', (['args', 'num_inputs'], {'train_mode': '(False)'}), '(args, num_inputs, train_mode=False)\n', (4041, 4077), False, 'from comm import CommNetMLP\n'), ((9502, 9519), 'curses.wrapper', 'wrapper', (['run_eval'], {}), '(run_eval)\n', (9509, 9519), False, 'from curses import wrapper\n'), ((2288, 2309), 'os.listdir', 'os.listdir', (['load_path'], {}), '(load_path)\n', (2298, 2309), False, 'import os\n'), ((4652, 4682), 'utils.data.init', 'data.init', (['args.env_name', 'args'], {}), '(args.env_name, args)\n', (4661, 4682), False, 'from utils import data\n'), ((7667, 7689), 'numpy.mean', 'np.mean', (['succ_for_seed'], {}), '(succ_for_seed)\n', (7674, 7689), True, 'import numpy as np\n'), ((7749, 7770), 'numpy.std', 'np.std', (['succ_for_seed'], {}), '(succ_for_seed)\n', (7755, 7770), True, 'import numpy as np\n'), ((7832, 7859), 'numpy.mean', 'np.mean', (['collision_for_seed'], {}), '(collision_for_seed)\n', (7839, 7859), True, 'import numpy as np\n'), ((7921, 7947), 'numpy.std', 'np.std', (['collision_for_seed'], {}), '(collision_for_seed)\n', (7927, 7947), True, 'import numpy as np\n'), ((8004, 8026), 'numpy.mean', 'np.mean', (['time_for_seed'], {}), '(time_for_seed)\n', (8011, 8026), True, 'import numpy as np\n'), ((8083, 8104), 'numpy.std', 'np.std', (['time_for_seed'], {}), '(time_for_seed)\n', (8089, 8104), True, 'import numpy as np\n'), ((8665, 8678), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (8675, 8678), False, 'import csv\n'), ((6346, 6376), 'utils.data.init', 'data.init', (['args.env_name', 'args'], {}), '(args.env_name, args)\n', (6355, 6376), False, 'from utils import data\n')] |
"""
Dqn agent that records RGB screenshots and RAM during training.
"""
from rl.agents.dqn import DQNAgent
import warnings
from copy import deepcopy
import numpy as np
from keras.callbacks import History
from rl.callbacks import (
CallbackList,
TestLogger,
TrainEpisodeLogger,
TrainIntervalLogger,
Visualizer
)
class NewDQNAgent(DQNAgent):
def __init__(self, *args, **kwargs):
super(NewDQNAgent, self).__init__(*args, **kwargs)
print("Using new agent!")
def new_fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None, save_every_episode=1, save_every_step=1):
"""Trains the agent on the given environment.
Save both RGB images and RAM to /train_history/environments/
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_steps': nb_steps,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_train_begin()
callbacks.on_train_begin()
episode = np.int16(0)
self.step = np.int16(0)
observation = None
episode_reward = None
episode_step = None
did_abort = False
try:
while self.step < nb_steps:
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
episode_step = np.int16(0)
episode_reward = np.float32(0)
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, reward, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = np.float32(0)
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation_ram = deepcopy(env.unwrapped._get_ram())
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
'observation_ram': observation_ram
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# This episode is finished, report and reset.
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
'save_observations': True,
'save_every_episode': save_every_episode,
'save_every_step': save_every_step,
'env_name': env.unwrapped.spec.id
}
callbacks.on_episode_end(episode, episode_logs)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
callbacks.on_train_end(logs={'did_abort': did_abort})
self._on_train_end()
return history
| [
"rl.callbacks.TrainEpisodeLogger",
"rl.callbacks.TrainIntervalLogger",
"copy.deepcopy",
"keras.callbacks.History",
"numpy.isreal",
"numpy.zeros_like",
"numpy.float32",
"numpy.random.randint",
"rl.callbacks.Visualizer",
"numpy.int16",
"rl.callbacks.CallbackList"
] | [((3633, 3642), 'keras.callbacks.History', 'History', ([], {}), '()\n', (3640, 3642), False, 'from keras.callbacks import History\n'), ((3694, 3717), 'rl.callbacks.CallbackList', 'CallbackList', (['callbacks'], {}), '(callbacks)\n', (3706, 3717), False, 'from rl.callbacks import CallbackList, TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer\n'), ((4175, 4186), 'numpy.int16', 'np.int16', (['(0)'], {}), '(0)\n', (4183, 4186), True, 'import numpy as np\n'), ((4207, 4218), 'numpy.int16', 'np.int16', (['(0)'], {}), '(0)\n', (4215, 4218), True, 'import numpy as np\n'), ((3435, 3477), 'rl.callbacks.TrainIntervalLogger', 'TrainIntervalLogger', ([], {'interval': 'log_interval'}), '(interval=log_interval)\n', (3454, 3477), False, 'from rl.callbacks import CallbackList, TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer\n'), ((3601, 3613), 'rl.callbacks.Visualizer', 'Visualizer', ([], {}), '()\n', (3611, 3613), False, 'from rl.callbacks import CallbackList, TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer\n'), ((7274, 7287), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (7284, 7287), True, 'import numpy as np\n'), ((3531, 3551), 'rl.callbacks.TrainEpisodeLogger', 'TrainEpisodeLogger', ([], {}), '()\n', (3549, 3551), False, 'from rl.callbacks import CallbackList, TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer\n'), ((4540, 4551), 'numpy.int16', 'np.int16', (['(0)'], {}), '(0)\n', (4548, 4551), True, 'import numpy as np\n'), ((4589, 4602), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (4599, 4602), True, 'import numpy as np\n'), ((7633, 7654), 'copy.deepcopy', 'deepcopy', (['observation'], {}), '(observation)\n', (7641, 7654), False, 'from copy import deepcopy\n'), ((5240, 5277), 'numpy.random.randint', 'np.random.randint', (['nb_max_start_steps'], {}), '(nb_max_start_steps)\n', (5257, 5277), True, 'import numpy as np\n'), ((5853, 5874), 'copy.deepcopy', 'deepcopy', (['observation'], {}), '(observation)\n', (5861, 5874), False, 'from copy import deepcopy\n'), ((7898, 7914), 'numpy.isreal', 'np.isreal', (['value'], {}), '(value)\n', (7907, 7914), True, 'import numpy as np\n'), ((8061, 8081), 'numpy.zeros_like', 'np.zeros_like', (['value'], {}), '(value)\n', (8074, 8081), True, 'import numpy as np\n')] |
## MY FIRST NEURAL NETWORK !!! WITH ACCURACY 93%
## This NN can tell whether an image is a triangle or not
# increase the shapes' sizes => increase accuracy (easier to recognize)
# introducing noise (50,20,20) => decrease accuracy by ~3%
# using tanh as activation function instead of sigmoid => the result converge faster
# increase nepoch => increase accuracy
# still 2 hidden layers, 1 layer has more neurons => increase accuracy, slower training speed
# decrease isize => faster training speed
# => use average pooling to increase the training speed!!! (IMPLEMENT THIS NOW)
# ReLU seems to have much careful steps, but the convergence is not as fast as tanh
# using (tanh, tanh, sigmoid) can achieve the error rate of 0.0005 in 300 epochs :) and error rate of 0.0 after 400 epochs on training set :)
##import os
##os.environ["CUDA_PATH"] = "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.2"
##import cupy as cp
import numpy as np
import random
import time
from winsound import Beep
# generate a random 64x64 image of a shape: square, triangle
def genSquare(size=64):
ssize = random.randint(size//3,size) # square size
image = np.zeros((size,size))
sx, sy = [random.randint(0, size-ssize) for _ in range(2)]
image[sx:sx+ssize, sy:sy+ssize] = 1
return image
def genTriangle(size=64): # only odd length base
tsize = random.randint(size>>2,(size+1)>>1) # the height
image = np.zeros((size,size))
sx = random.randint(0, size-tsize)
sy = random.randint(0, size - (tsize << 1) + 1)
for i in range(tsize):
image[sx+i, sy+tsize-i-1:sy+tsize+i] = 1
return image
def genNoise(size=64):
return np.random.choice([0., 1.], (size,size))
def genLabeledDataset(n, size=64): # 0.5 triangle, 0.25 square, 0.25 noise
data, labels = [], []
for _ in range(n):
c = random.getrandbits(2)
if c >> 1:
data.append(genTriangle(size))
labels.append(1)
else:
if c & 1:
data.append(genSquare(size))
else:
data.append(genNoise(size))
labels.append(0)
return np.array(data), np.array(labels)[:, np.newaxis, np.newaxis] # this choice is because we are doing binary classification, last layer only have 1 neuron
def draw(shape):
image = '\n'.join(map(lambda row : ''.join(map(lambda pixel : '▓' if pixel else '░', row)), shape))
print(image)
def sigmoid(x, deriv=False):
sigmoid_ = np.reciprocal(1 + np.exp(-x))
if deriv:
return sigmoid_ * (1 - sigmoid_)
return sigmoid_
def tanh(x, deriv=False):
if deriv:
return np.reciprocal(np.cosh(x)**2)
return np.tanh(x)
def ReLU(x, deriv=False):
if deriv:
return x > 0
return x * (x > 0)
def main():
startTime = time.time()
print('--- Initializing ---')
dsize, tsize, isize = 10000, 100, 10 # train & test may overlap
nepoch = 5
step = 1 # length of the step to take in gradient descent
print('Training set size: ' + str(dsize) + ' images')
print('Testing set size: ' + str(tsize) + ' images')
print('Image size: ' + str(isize) + 'x' + str(isize))
print('#epochs:', nepoch)
print('Step:', step)
# we use a NN that have 4 layers with config (isize**2, 32, 32, 1), each number represent the number of neurons in each layer
n1, n2 = 64, 64
print('--- Initialize the neural network (' + str(n1) + ',' + str(n2) + ',1) ---')
w1 = np.random.randn(n1, isize**2)
b1 = np.random.randn(n1, 1)
w2 = np.random.randn(n2, n1)
b2 = np.random.randn(n2, 1)
w3 = np.random.randn(1, n2)
b3 = np.random.randn(1, 1)
print('--- Generating dataset ---')
images, labels = genLabeledDataset(n=dsize, size=isize)
# show an example from the generated dataset
print('--- Example ---')
exImage, exLabel = random.choice(list(zip(images,labels)))
print('Label:', exLabel)
print('Image:')
draw(exImage)
# preprocessing
print('--- Preprocessing images ---')
data = images.reshape((dsize, -1, 1)) # data in the first layer l0
# the activation function
activate = tanh
activateLast = sigmoid
# actual learning process - each epoch we forward `dsize` images, then backprop 1 time to update the NN
print('\n--- Training ---')
error = 1
while True:
for epoch in range(nepoch):
print('Epoch #' + str(epoch) + '/' + str(nepoch))
# forward propagation - the prediction
a0s = data # feeding `dsize` images at the same time!
a1s = np.array([activate(w1 @ a0 + b1) for a0 in a0s])
a2s = np.array([activate(w2 @ a1 + b2) for a1 in a1s])
a3s = np.array([activateLast(w3 @ a2 + b3) for a2 in a2s])
# the errors
oldError, error = error, (np.array([round(a3) for a3 in a3s.reshape(-1)]) ^ labels.reshape(-1)).sum() / dsize
print('Error rate:', error)
if error > oldError:
step *= 0.5
print('Step changed:', step)
# back propagation function - to update weigths and biases to do the gradient descent
# lấy tổng trước rồi mới normalize
db3s = np.array([2 * (a3 - y) * activateLast(w3 @ a2 + b3, deriv=True) for a2,a3,y in zip(a2s,a3s,labels)])
dw3s = np.array([db3 * a2.T for a2,db3 in zip(a2s,db3s)])
da2s = np.array([w3.T @ db3 for db3 in db3s])
db2s = np.array([da2 * activate(w2 @ a1 + b2, deriv=True) for a1,da2 in zip(a1s,da2s)])
dw2s = np.array([db2 * a1.T for a1,db2 in zip(a1s,db2s)])
da1s = np.array([w2.T @ db2 for db2 in db2s])
db1s = np.array([da1 * activate(w1 @ a0 + b1, deriv=True) for a0,da1 in zip(a0s,da1s)])
dw1s = np.array([db1 * a0.T for a0,db1 in zip(a0s,db1s)])
# sum all the opinions of the dataset, then normalize the coordinates to take the given `step`
dw1, db1 = dw1s.sum(axis=0), db1s.sum(axis=0)
dw2, db2 = dw2s.sum(axis=0), db2s.sum(axis=0)
dw3, db3 = dw3s.sum(axis=0), db3s.sum(axis=0)
# the minus sign: minimize the cost function, since gradient maximizes it
denom = sum([(dx**2).sum() for dx in [dw1,db1,dw2,db2,dw3,db3]]) ** 0.5 + 1e-300 # divide by 0
dw1 *= -step / denom
db1 *= -step / denom
dw2 *= -step / denom
db2 *= -step / denom
dw3 *= -step / denom
db3 *= -step / denom
# gradient descent
w1 += dw1
b1 += db1
w2 += dw2
b2 += db2
w3 += dw3
b3 += db3
print('\nTraining completed!')
print('Time elapsed: ' + str(time.time() - startTime) + ' seconds.')
#Beep(440, 10000)
cont = input('Continue to learn? (y?) ')
if cont != 'y':
break
# 1: triangle, 0: not triangle
def isTriangle(image):
# layer 0
x = image.reshape(-1, 1)
# layer 1
x = activate(w1 @ x + b1)
# layer 2
x = activate(w2 @ x + b2)
# layer 3
x = activateLast(w3 @ x + b3)
return round(x.item())
print('\n--- Testing ---')
while True:
terror = 0
print('Testing set size: ' + str(tsize))
for test in range(tsize):
shapeID = random.getrandbits(2)
if shapeID >> 1:
shape = genTriangle(size=isize)
else:
if shapeID & 1:
shape = genSquare(size=isize)
else:
shape = genNoise(size=isize)
error_ = (shapeID >> 1) ^ isTriangle(shape)
terror += error_
if error_:
print('===== Test #' + str(test) + ' =====')
print('Label:', shapeID >> 1)
print('Predicted:', (shapeID >> 1) ^ 1)
draw(shape)
print('Error rate: ' + str(terror / tsize))
cont = input('Continue to test? (y?) ')
if cont != 'y':
break
print('--- See you later! ---')
if __name__ == '__main__':
main()
| [
"numpy.tanh",
"random.randint",
"numpy.random.randn",
"random.getrandbits",
"numpy.zeros",
"time.time",
"numpy.array",
"numpy.exp",
"numpy.random.choice",
"numpy.cosh"
] | [((1125, 1156), 'random.randint', 'random.randint', (['(size // 3)', 'size'], {}), '(size // 3, size)\n', (1139, 1156), False, 'import random\n'), ((1181, 1203), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (1189, 1203), True, 'import numpy as np\n'), ((1391, 1431), 'random.randint', 'random.randint', (['(size >> 2)', '(size + 1 >> 1)'], {}), '(size >> 2, size + 1 >> 1)\n', (1405, 1431), False, 'import random\n'), ((1453, 1475), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (1461, 1475), True, 'import numpy as np\n'), ((1485, 1516), 'random.randint', 'random.randint', (['(0)', '(size - tsize)'], {}), '(0, size - tsize)\n', (1499, 1516), False, 'import random\n'), ((1525, 1567), 'random.randint', 'random.randint', (['(0)', '(size - (tsize << 1) + 1)'], {}), '(0, size - (tsize << 1) + 1)\n', (1539, 1567), False, 'import random\n'), ((1702, 1744), 'numpy.random.choice', 'np.random.choice', (['[0.0, 1.0]', '(size, size)'], {}), '([0.0, 1.0], (size, size))\n', (1718, 1744), True, 'import numpy as np\n'), ((2741, 2751), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (2748, 2751), True, 'import numpy as np\n'), ((2874, 2885), 'time.time', 'time.time', ([], {}), '()\n', (2883, 2885), False, 'import time\n'), ((3560, 3591), 'numpy.random.randn', 'np.random.randn', (['n1', '(isize ** 2)'], {}), '(n1, isize ** 2)\n', (3575, 3591), True, 'import numpy as np\n'), ((3600, 3622), 'numpy.random.randn', 'np.random.randn', (['n1', '(1)'], {}), '(n1, 1)\n', (3615, 3622), True, 'import numpy as np\n'), ((3633, 3656), 'numpy.random.randn', 'np.random.randn', (['n2', 'n1'], {}), '(n2, n1)\n', (3648, 3656), True, 'import numpy as np\n'), ((3667, 3689), 'numpy.random.randn', 'np.random.randn', (['n2', '(1)'], {}), '(n2, 1)\n', (3682, 3689), True, 'import numpy as np\n'), ((3700, 3722), 'numpy.random.randn', 'np.random.randn', (['(1)', 'n2'], {}), '(1, n2)\n', (3715, 3722), True, 'import numpy as np\n'), ((3733, 3754), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (3748, 3754), True, 'import numpy as np\n'), ((1218, 1249), 'random.randint', 'random.randint', (['(0)', '(size - ssize)'], {}), '(0, size - ssize)\n', (1232, 1249), False, 'import random\n'), ((1884, 1905), 'random.getrandbits', 'random.getrandbits', (['(2)'], {}), '(2)\n', (1902, 1905), False, 'import random\n'), ((2190, 2204), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2198, 2204), True, 'import numpy as np\n'), ((2206, 2222), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2214, 2222), True, 'import numpy as np\n'), ((2550, 2560), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2556, 2560), True, 'import numpy as np\n'), ((5576, 5616), 'numpy.array', 'np.array', (['[(w3.T @ db3) for db3 in db3s]'], {}), '([(w3.T @ db3) for db3 in db3s])\n', (5584, 5616), True, 'import numpy as np\n'), ((5809, 5849), 'numpy.array', 'np.array', (['[(w2.T @ db2) for db2 in db2s]'], {}), '([(w2.T @ db2) for db2 in db2s])\n', (5817, 5849), True, 'import numpy as np\n'), ((7631, 7652), 'random.getrandbits', 'random.getrandbits', (['(2)'], {}), '(2)\n', (7649, 7652), False, 'import random\n'), ((2714, 2724), 'numpy.cosh', 'np.cosh', (['x'], {}), '(x)\n', (2721, 2724), True, 'import numpy as np\n'), ((6962, 6973), 'time.time', 'time.time', ([], {}), '()\n', (6971, 6973), False, 'import time\n')] |
import copy
import logging
import os
from collections import defaultdict, deque, namedtuple
import numpy as np
from ray.tune.schedulers import FIFOScheduler, TrialScheduler
from ray.tune.suggest.variant_generator import format_vars
from ray.tune.trial import Checkpoint, Trial
from tune_tf2.defaults import EXPLOIT_CSV, HPS_CSV, PBT_CSV
from tune_tf2.pbt import exploiters, explorers
from tune_tf2.pbt.hps import HyperParam
# use the ray logger
logger = logging.getLogger("ray.tune.schedulers.pbt")
TrialState = namedtuple(
"TrialState",
[
"orig_tag",
"score",
"ckpt",
"generation",
"last_perturbation_time",
],
)
def make_experiment_tag(orig_tag, config, mutations):
"""Appends perturbed params to the trial name to show in the console."""
resolved_vars = {}
for k in mutations.keys():
resolved_vars[("config", k)] = config[k]
return "{}@perturbed[{}]".format(orig_tag, format_vars(resolved_vars))
class MultiStrategyPBT(FIFOScheduler):
"""A scheduler for Population Based Training.
The main job of the scheduler is to decide which models
and hyperparameter combinations will be run at each
generation.
"""
def __init__(
self,
hyperparam_space,
exploit_method="binary_tournament",
explore_method="perturb",
time_attr="epoch",
metric="smth_val_nll_heldin",
mode="min",
patience=4,
max_generations=50,
min_percent_improvement=0.0005,
):
"""Creates a MultiStrategyPBT scheduler.
Parameters
----------
hyperparam_space : dict of tune_tf2.pbt.HyperParam
A dictionary mapping hyperparameter config names to
HyperParam objects. It specifies allowable mutations
for the hyperparameters.
exploit_method : str, optional
The method to use for exploitation, must be defined
in tune_tf2.pbt.exploiters, by default "binary_tournament"
explore_method : str, optional
The method to use for exploration, must be defined
in tune_tf2.pbt.explorers, by default "perturb"
time_attr : str, optional
The result attribute to use for tracking time,
by default 'epoch'
metric : str, optional
The metric to optimize during PBT, by default
"smth_val_nll_heldin"
mode : {"min", "max"}, optional
Whether to minimize or maximize the metric, by
default "min"
patience : int, optional
The number of generations to use for determining if
performance is still decreasing, by default 4
max_generations : int, optional
The maximum number of generations to train for, by default 50
min_percent_improvement : float, optional
The minimum percent improvement in metric per generation to
allow training to continue, by default 0.0005
"""
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
def check_hp_space(space):
for value in space.values():
if isinstance(value, dict):
check_hp_space(value)
elif not isinstance(value, HyperParam):
raise TypeError(
"`hyperparam_space` must be a hierarchical "
"dict of `HyperParam` objects."
)
check_hp_space(hyperparam_space)
FIFOScheduler.__init__(self)
self._hyperparam_space = hyperparam_space
self._time_attr = time_attr
self._generation = 1
self._max_generations = max_generations
self._exploit_method = exploit_method
self._explore_method = explore_method
self._exploit = getattr(exploiters, exploit_method)
self._explore = getattr(explorers, explore_method)
self._metric = metric
self._trial_state = defaultdict(list)
self._trial_result = defaultdict(list)
# best_scores is a circular buffer
self._best_scores = deque(maxlen=patience)
self._min_percent_improvement = min_percent_improvement
self._percent_improvement = 0.0
if mode == "max":
self._metric_op = 1.0
elif mode == "min":
self._metric_op = -1.0
self._num_perturbations = 0
def on_trial_add(self, trial_runner, trial):
"""Called when a new trial is added to the trial runner."""
trial_state = TrialState(
orig_tag=trial.experiment_tag,
score=None,
ckpt=None,
generation=0,
last_perturbation_time=0,
)
self._trial_state[trial].append(trial_state)
def on_trial_result(self, trial_runner, trial, result):
"""Called on each intermediate result returned by a trial.
At this point, the trial scheduler can make a decision by returning
one of CONTINUE, PAUSE, and STOP. This will only be called when the
trial is in the RUNNING state."""
prev_state = self._trial_state[trial][-1]
time = result[self._time_attr]
# save the state of this trial
current_ckpt = trial_runner.trial_executor.save(
trial, Checkpoint.MEMORY, result=result
)
current_state = TrialState(
orig_tag=trial.experiment_tag,
score=self._metric_op * result[self._metric],
ckpt=current_ckpt,
generation=prev_state.generation + 1,
last_perturbation_time=time,
)
self._trial_state[trial].append(current_state)
self._trial_result[trial].append(result)
# wait for all of the other trials to finish
all_trials = trial_runner.get_trials()
other_trials = [t for t in all_trials if t != trial]
for t in all_trials:
state = self._trial_state[t][-1]
# stop all of the trials if any of them is finished
# TODO: fix this for early stopping.
if t.status == Trial.TERMINATED:
self._stop_trials(trial_runner, other_trials)
return TrialScheduler.STOP
if state.generation < self._generation:
return TrialScheduler.PAUSE
# record hyperparameters of this generation
self._log_generation_config(all_trials)
# stop everything if we have reached the final generation
if self._generation >= self._max_generations:
self._stop_trials(trial_runner, other_trials)
return TrialScheduler.STOP
# get the state of all trials for this generation
def get_gen_state(t):
return self._trial_state[t][self._generation]
generation_state = {t: get_gen_state(t) for t in all_trials}
# find the best metric for this generation and record in circular buffer
best_score = max([s.score for s in generation_state.values()])
self._best_scores.append(best_score)
# check the percent improvement in the last `patience` generations.
self._percent_improvement = (
np.max(self._best_scores) - self._best_scores[0]
) / np.mean(np.abs(self._best_scores))
# log the state of the PBT run
pbt_state = {
"generation": self._generation,
"best_score": best_score,
"percent_improvement": self._percent_improvement,
"duration_sec": result["time_this_iter_s"],
"epoch": result["epoch"],
}
self._log_pbt_state(trial.local_dir, pbt_state)
# stop everything if the metric is not improving and buffer is full
if (
self._percent_improvement <= self._min_percent_improvement
and len(self._best_scores) == self._best_scores.maxlen
):
self._stop_trials(trial_runner, other_trials)
return TrialScheduler.STOP
# evolve if this is the last trial in the generation
self._evolve_generation(trial_runner, trial, generation_state)
self._generation += 1
return TrialScheduler.CONTINUE
def _stop_trials(self, trial_runner, trials):
""" stops all trials in a list if they are not already terminated """
for t in trials:
if t.status != Trial.TERMINATED:
trial_runner.trial_executor.stop_trial(t)
def _log_generation_config(self, all_trials):
"""Saves the HP configuration of the generation to a CSV file."""
gen_cfg_path = os.path.join(all_trials[0].local_dir, HPS_CSV)
hp_names = sorted(self._hyperparam_space.keys())
with open(gen_cfg_path, "a+") as gen_cfg_file:
if os.stat(gen_cfg_path).st_size == 0:
header = ["generation", "trial_id"] + hp_names
gen_cfg_file.write(",".join(header) + "\n")
for trial in all_trials:
hp_values = [str(trial.config[name]) for name in hp_names]
data = [str(self._generation), trial.trial_id] + hp_values
gen_cfg_file.write(",".join(data) + "\n")
def _log_pbt_state(self, local_dir, pbt_state):
"""Saves the state of PBT training to a CSV file"""
pbt_state_path = os.path.join(local_dir, PBT_CSV)
state_header = sorted(pbt_state.keys())
with open(pbt_state_path, "a+") as pbt_state_file:
if os.stat(pbt_state_path).st_size == 0:
pbt_state_file.write(",".join(state_header) + "\n")
data = [str(pbt_state[name]) for name in state_header]
pbt_state_file.write(",".join(data) + "\n")
def _log_exploit(self, old_state, new_state, old_trial, new_trial):
"""Keeps track of which models exploit which other models."""
log_path = os.path.join(old_trial.local_dir, EXPLOIT_CSV)
exploit_data = {
"generation": self._generation,
"old_trial": old_trial.trial_id,
"new_trial": new_trial.trial_id,
"old_score": old_state.score,
"new_score": new_state.score,
}
header = sorted(exploit_data.keys())
with open(log_path, "a+") as log_file:
if os.stat(log_path).st_size == 0:
log_file.write(",".join(header) + "\n")
data = [str(exploit_data[name]) for name in header]
log_file.write(",".join(data) + "\n")
def _evolve_generation(self, trial_runner, last_trial, generation_state):
"""Generates the next set of trials."""
trial_executor = trial_runner.trial_executor
for trial in trial_runner.get_trials():
trial_state = copy.deepcopy(generation_state[trial])
# returns a trial to clone or None if the trial should persist
trial_to_clone = self._exploit(trial_runner, trial, generation_state)
if trial_to_clone is not None:
# returns a modified config for the next generation
new_state = copy.deepcopy(generation_state[trial_to_clone])
new_config = self._explore(
trial_to_clone.config, self._hyperparam_space
)
logger.info(
"[exploit] transferring weights from trial "
"{} (score {:.5E}) -> {} (score {:.5E})".format(
trial_to_clone, new_state.score, trial, trial_state.score
)
)
self._log_exploit(trial_state, new_state, trial, trial_to_clone)
new_tag = make_experiment_tag(
trial_state.orig_tag, new_config, self._hyperparam_space
)
reset_successful = trial_executor.reset_trial(
trial, new_config, new_tag
)
assert reset_successful, "Config transfer unsuccessful."
# use the new state
trial_state = new_state
self._num_perturbations += 1
# restart the trials using the appropriate checkpoints
if trial == last_trial:
trial_executor.restore(trial, trial_state.ckpt)
else:
trial_executor.start_trial(trial, trial_state.ckpt)
def choose_trial_to_run(self, trial_runner):
"""Attempts to train all models to the same training iteration.
This enables the PBT scheduler to support a greater number of
concurrent trials than can fit in the cluster at any given time.
"""
candidates = []
for trial in trial_runner.get_trials():
state = self._trial_state[trial][-1]
if state.generation < self._generation and trial.status == Trial.PENDING:
candidates.append(trial)
return candidates[0] if candidates else None
def debug_string(self):
"""Returns a human readable message for printing to the console."""
pbt_mode = "Using PBT with `{}` explore and `{}` exploit. ".format(
self._explore_method, self._exploit_method
)
pbt_state = "Generation {}/{}, {} Perturbs, {:.2E}% Improvement".format(
self._generation,
self._max_generations,
self._num_perturbations,
self._percent_improvement,
)
return pbt_mode + pbt_state
| [
"copy.deepcopy",
"ray.tune.schedulers.FIFOScheduler.__init__",
"numpy.abs",
"os.stat",
"collections.deque",
"ray.tune.suggest.variant_generator.format_vars",
"collections.defaultdict",
"numpy.max",
"collections.namedtuple",
"os.path.join",
"logging.getLogger"
] | [((456, 500), 'logging.getLogger', 'logging.getLogger', (['"""ray.tune.schedulers.pbt"""'], {}), "('ray.tune.schedulers.pbt')\n", (473, 500), False, 'import logging\n'), ((516, 615), 'collections.namedtuple', 'namedtuple', (['"""TrialState"""', "['orig_tag', 'score', 'ckpt', 'generation', 'last_perturbation_time']"], {}), "('TrialState', ['orig_tag', 'score', 'ckpt', 'generation',\n 'last_perturbation_time'])\n", (526, 615), False, 'from collections import defaultdict, deque, namedtuple\n'), ((954, 980), 'ray.tune.suggest.variant_generator.format_vars', 'format_vars', (['resolved_vars'], {}), '(resolved_vars)\n', (965, 980), False, 'from ray.tune.suggest.variant_generator import format_vars\n'), ((3540, 3568), 'ray.tune.schedulers.FIFOScheduler.__init__', 'FIFOScheduler.__init__', (['self'], {}), '(self)\n', (3562, 3568), False, 'from ray.tune.schedulers import FIFOScheduler, TrialScheduler\n'), ((4001, 4018), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4012, 4018), False, 'from collections import defaultdict, deque, namedtuple\n'), ((4048, 4065), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4059, 4065), False, 'from collections import defaultdict, deque, namedtuple\n'), ((4137, 4159), 'collections.deque', 'deque', ([], {'maxlen': 'patience'}), '(maxlen=patience)\n', (4142, 4159), False, 'from collections import defaultdict, deque, namedtuple\n'), ((8595, 8641), 'os.path.join', 'os.path.join', (['all_trials[0].local_dir', 'HPS_CSV'], {}), '(all_trials[0].local_dir, HPS_CSV)\n', (8607, 8641), False, 'import os\n'), ((9311, 9343), 'os.path.join', 'os.path.join', (['local_dir', 'PBT_CSV'], {}), '(local_dir, PBT_CSV)\n', (9323, 9343), False, 'import os\n'), ((9857, 9903), 'os.path.join', 'os.path.join', (['old_trial.local_dir', 'EXPLOIT_CSV'], {}), '(old_trial.local_dir, EXPLOIT_CSV)\n', (9869, 9903), False, 'import os\n'), ((10721, 10759), 'copy.deepcopy', 'copy.deepcopy', (['generation_state[trial]'], {}), '(generation_state[trial])\n', (10734, 10759), False, 'import copy\n'), ((7191, 7216), 'numpy.max', 'np.max', (['self._best_scores'], {}), '(self._best_scores)\n', (7197, 7216), True, 'import numpy as np\n'), ((7260, 7285), 'numpy.abs', 'np.abs', (['self._best_scores'], {}), '(self._best_scores)\n', (7266, 7285), True, 'import numpy as np\n'), ((11056, 11103), 'copy.deepcopy', 'copy.deepcopy', (['generation_state[trial_to_clone]'], {}), '(generation_state[trial_to_clone])\n', (11069, 11103), False, 'import copy\n'), ((8769, 8790), 'os.stat', 'os.stat', (['gen_cfg_path'], {}), '(gen_cfg_path)\n', (8776, 8790), False, 'import os\n'), ((9466, 9489), 'os.stat', 'os.stat', (['pbt_state_path'], {}), '(pbt_state_path)\n', (9473, 9489), False, 'import os\n'), ((10264, 10281), 'os.stat', 'os.stat', (['log_path'], {}), '(log_path)\n', (10271, 10281), False, 'import os\n')] |
from neuromp.preprocessing.tree import AST
from enum import IntEnum
from itertools import product
import subprocess
import time
import numpy as np
from copy import deepcopy
class VarStates(IntEnum):
SHARED = 1
PRIVATE = 2
REDUCTION = 3
class Code(object):
def __init__(self, code):
self.ast = AST()
self.statements = self.ast.parse(code)
self.lines = self._getLines(code)
self.for_pos = self.ast.fors[0]
self.pragmas = self._initPragmas()
#TODO ALTERAR AQUI
# self.best_pragma = self._builtPragma()
self.best_pragma = "Execution cannot find correct result"
self.seq_time = None
self.seq_output = None
self.par_time = None
self.par_output = None
self.speed_up = 1.0
#TODO ALTEREI AQUI
self.max_speed_up = -1000
self.best_time = 0
self.actions = list(product(self.ast.variables, list(VarStates)))
self.runSequential()
self.total_time = self.seq_time
def _getLines(self, code):
resp = []
with open(code) as f:
for l in f:
l = l.rstrip()
resp.append(' '.join(l.split()))
return resp
def _initPragmas(self):
resp = {}
all_vars = self.ast.variables
for v in all_vars:
resp[v] = VarStates.SHARED
return resp
#TODO DEVO MUDAR AQUI - PARTE ONDE CONSTROI OS PRAGMAS PARA TESTE
def _builtPragma(self):
groups = {}
resp = "#pragma omp parallel for "
for k, v in self.pragmas.items():
if v.name not in groups:
groups[v.name] = []
groups[v.name].append(k)
for k in groups:
if k == "REDUCTION":
resp += "{}(+:{}) ".format(k.lower(), ', '.join(groups[k]))
else:
resp += "{}({}) ".format(k.lower(), ', '.join(groups[k]))
return resp.rstrip()
def getEncodedPragma(self):
resp = []
all_vars = self.ast.variables
for v in all_vars:
resp.append(12 + self.pragmas[v].value)
return resp
def getInput(self):
resp = self.getEncodedPragma()
#for s in self.statements:
# resp += self.ast.preproStatement(s)
return np.array(resp)
def runParallel(self):
tmp_lines = deepcopy(self.lines)
tmp_lines.insert(self.for_pos - 1, self._builtPragma())
#print(self._builtPragma())
with open("tmp_par.c", "w") as f:
#TODO MUDEI AQUI ACRESCENTANDO ESSAS DUAS PROXIMAS LINHAS
f.write("#include <stdio.h>" + "\n")
f.write("#include <omp.h>" + "\n")
for l in tmp_lines:
if l != "#pragma neuromp":
f.write(l + "\n")
try:
#gcc fast.c main.c -Wall -Wextra -O3 -I ../../include/ ../../lib/libcapb.a -lm -fopenmp
# subprocess.check_output(['gcc', 'tmp_par.c', '-O3', '-lm', '-fopenmp', '-o', 'tmp_par'],
subprocess.check_output(['gcc', 'tmp_par.c', '-fopenmp', '-o', 'tmp_par'],
stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as e:
#TODO ALTEREI A LINHA POSTERIOR
# print("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.par_output = None
self.par_time = 1000
return self.par_output, self.par_time
b = time.time()
p = subprocess.Popen(['./tmp_par'],
universal_newlines=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
try:
#TODO ALTEREI LINHA POSTERIOR
# self.par_output, error = p.communicate(timeout=100000 + self.seq_time)
self.par_output, error = p.communicate(timeout=self.seq_time)
self.par_time = time.time() - b
self.total_time += self.par_time
self.par_output = self.par_output.rstrip()
except subprocess.TimeoutExpired as exc:
self.par_output = None
self.par_time = 1000
#TODO RETIREI O PRINT
# print("Status : TIMEOUT")
return self.par_output, self.par_time
def runSequential(self):
with open("tmp_seq.c", "w") as f:
f.write("#include <stdio.h>" + "\n")
for l in self.lines:
if l != "#pragma neuromp":
f.write(l + "\n")
try:
# subprocess.check_output(['gcc', 'tmp_seq.c', '-O3', '-lm', '-fopenmp', '-o', 'tmp_seq'],
subprocess.check_output(['gcc', 'tmp_seq.c', '-o', 'tmp_seq'],
stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
b = time.time()
p = subprocess.Popen(['./tmp_seq'],
universal_newlines=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
self.seq_output, error = p.communicate()
self.seq_time = time.time() - b
self.seq_output = self.seq_output.rstrip()
return self.seq_output, self.seq_time
def step(self, action):
a = self.actions[action]
self.pragmas[a[0]] = a[1]
#TODO ALTEREI AQUI
# reward = self.getReward()
reward, par_time_now = self.getReward()
next_state = self.getInput()
#TODO ALTERAR AQUI
# done = (reward >= self.max_speed_up)
# done = (reward >= self.max_speed_up and reward != -1)
print("Testando: " + self._builtPragma())
if (reward >= self.max_speed_up and reward != -1):
print("ENTROU")
self.max_speed_up = reward
self.best_pragma = self._builtPragma()
self.best_time = par_time_now
return next_state, reward, (reward >= self.max_speed_up)
def render(self):
print(self._builtPragma())
def speedUp(self):
return self.seq_time / self.par_time
def getReward(self):
self.runParallel()
if self.seq_output == self.par_output:
s = self.speedUp()
if s > 1.0:
return s, self.par_time
else:
return -1, -1
else:
return -1, -1
def reset(self):
self.pragmas = self._initPragmas()
return self.getInput()
if __name__ == "__main__":
c = Code('../data/pi.c')
print(c.getInput())
#c.setProperty(8)
#c.setProperty(10)
print(c.getInput())
#print(c.actions)
print(c.runSequential())
print(c.runParallel())
print(c.getReward())
| [
"copy.deepcopy",
"subprocess.Popen",
"subprocess.check_output",
"neuromp.preprocessing.tree.AST",
"time.time",
"numpy.array"
] | [((319, 324), 'neuromp.preprocessing.tree.AST', 'AST', ([], {}), '()\n', (322, 324), False, 'from neuromp.preprocessing.tree import AST\n'), ((2330, 2344), 'numpy.array', 'np.array', (['resp'], {}), '(resp)\n', (2338, 2344), True, 'import numpy as np\n'), ((2393, 2413), 'copy.deepcopy', 'deepcopy', (['self.lines'], {}), '(self.lines)\n', (2401, 2413), False, 'from copy import deepcopy\n'), ((3556, 3567), 'time.time', 'time.time', ([], {}), '()\n', (3565, 3567), False, 'import time\n'), ((3581, 3690), 'subprocess.Popen', 'subprocess.Popen', (["['./tmp_par']"], {'universal_newlines': '(True)', 'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['./tmp_par'], universal_newlines=True, stderr=subprocess.\n PIPE, stdout=subprocess.PIPE)\n", (3597, 3690), False, 'import subprocess\n'), ((5003, 5014), 'time.time', 'time.time', ([], {}), '()\n', (5012, 5014), False, 'import time\n'), ((5027, 5136), 'subprocess.Popen', 'subprocess.Popen', (["['./tmp_seq']"], {'universal_newlines': '(True)', 'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['./tmp_seq'], universal_newlines=True, stderr=subprocess.\n PIPE, stdout=subprocess.PIPE)\n", (5043, 5136), False, 'import subprocess\n'), ((3065, 3194), 'subprocess.check_output', 'subprocess.check_output', (["['gcc', 'tmp_par.c', '-fopenmp', '-o', 'tmp_par']"], {'stderr': 'subprocess.STDOUT', 'universal_newlines': '(True)'}), "(['gcc', 'tmp_par.c', '-fopenmp', '-o', 'tmp_par'],\n stderr=subprocess.STDOUT, universal_newlines=True)\n", (3088, 3194), False, 'import subprocess\n'), ((4693, 4811), 'subprocess.check_output', 'subprocess.check_output', (["['gcc', 'tmp_seq.c', '-o', 'tmp_seq']"], {'stderr': 'subprocess.STDOUT', 'universal_newlines': '(True)'}), "(['gcc', 'tmp_seq.c', '-o', 'tmp_seq'], stderr=\n subprocess.STDOUT, universal_newlines=True)\n", (4716, 4811), False, 'import subprocess\n'), ((5254, 5265), 'time.time', 'time.time', ([], {}), '()\n', (5263, 5265), False, 'import time\n'), ((3976, 3987), 'time.time', 'time.time', ([], {}), '()\n', (3985, 3987), False, 'import time\n')] |
from abess.linear import abessLogistic
from abess.datasets import make_glm_data
import numpy as np
from time import time
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
np.random.seed(0)
n = 500
p = 2000
k = 20
rho = 0.1
M = 50
search_path = [32, 64, 128, 256, 512, 1024, 2048]
met_save = True
res_save = True
figure_save = True
met = np.zeros((len(search_path), M, 2))
res = np.zeros((len(search_path), 5))
for m in range(M):
train = make_glm_data(n = n, p = p, k = k, family = 'binomial')
test = make_glm_data(n = n, p = p, k = k, family = 'binomial', coef_ = train.coef_)
print("==> Iter : ", m)
for i in range(len(search_path)):
ts = time()
model = abessLogistic(support_size = range(100), important_search = search_path[i])
model.fit(train.x, train.y)
te = time()
met[i, m, 0] = roc_auc_score(test.y, model.predict(test.x))
met[i, m, 1] = te - ts
for i in range(len(search_path)):
res[i, 0] = search_path[i]
m = met[i].mean(axis = 0)
se = met[i].std(axis = 0) / np.sqrt(M - 1)
res[i, 1:5] = np.hstack((m, se))
if (met_save):
np.save('met.npy', met)
if (res_save):
np.save('res.npy', res)
if (figure_save):
res = np.load("res.npy")
# print(res)
plt.figure(figsize = (20, 6))
plt.subplot(121)
plt.errorbar(res[:, 0], res[:, 1], yerr = res[:, 3] * 2, capsize = 3)
plt.xticks(res[:, 0], [str(i) for i in ind])
plt.ylim(0.9, 1)
plt.ylabel('AUC')
plt.xlabel('log2(important_search)')
# plt.savefig('./auc.png')
plt.subplot(122)
plt.errorbar(res[:, 0], res[:, 2], yerr = res[:, 4] * 2, capsize = 3)
plt.xticks(res[:, 0], [str(i) for i in ind])
plt.title('Time(/s)')
plt.xlabel('log2(important_search)')
# plt.savefig('./time.png')
plt.savefig('./impsearch.png')
print('Figure saved.') | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.load",
"numpy.save",
"abess.datasets.make_glm_data",
"numpy.random.seed",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylim",
"numpy.hstack",
"time.time",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyp... | [((196, 213), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (210, 213), True, 'import numpy as np\n'), ((469, 516), 'abess.datasets.make_glm_data', 'make_glm_data', ([], {'n': 'n', 'p': 'p', 'k': 'k', 'family': '"""binomial"""'}), "(n=n, p=p, k=k, family='binomial')\n", (482, 516), False, 'from abess.datasets import make_glm_data\n'), ((536, 602), 'abess.datasets.make_glm_data', 'make_glm_data', ([], {'n': 'n', 'p': 'p', 'k': 'k', 'family': '"""binomial"""', 'coef_': 'train.coef_'}), "(n=n, p=p, k=k, family='binomial', coef_=train.coef_)\n", (549, 602), False, 'from abess.datasets import make_glm_data\n'), ((1109, 1127), 'numpy.hstack', 'np.hstack', (['(m, se)'], {}), '((m, se))\n', (1118, 1127), True, 'import numpy as np\n'), ((1148, 1171), 'numpy.save', 'np.save', (['"""met.npy"""', 'met'], {}), "('met.npy', met)\n", (1155, 1171), True, 'import numpy as np\n'), ((1192, 1215), 'numpy.save', 'np.save', (['"""res.npy"""', 'res'], {}), "('res.npy', res)\n", (1199, 1215), True, 'import numpy as np\n'), ((1245, 1263), 'numpy.load', 'np.load', (['"""res.npy"""'], {}), "('res.npy')\n", (1252, 1263), True, 'import numpy as np\n'), ((1286, 1313), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 6)'}), '(figsize=(20, 6))\n', (1296, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1321, 1337), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1332, 1337), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1407), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['res[:, 0]', 'res[:, 1]'], {'yerr': '(res[:, 3] * 2)', 'capsize': '(3)'}), '(res[:, 0], res[:, 1], yerr=res[:, 3] * 2, capsize=3)\n', (1354, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1481), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.9)', '(1)'], {}), '(0.9, 1)\n', (1473, 1481), True, 'import matplotlib.pyplot as plt\n'), ((1486, 1503), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AUC"""'], {}), "('AUC')\n", (1496, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1508, 1544), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log2(important_search)"""'], {}), "('log2(important_search)')\n", (1518, 1544), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1597), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1592, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1602, 1667), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['res[:, 0]', 'res[:, 2]'], {'yerr': '(res[:, 4] * 2)', 'capsize': '(3)'}), '(res[:, 0], res[:, 2], yerr=res[:, 4] * 2, capsize=3)\n', (1614, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1746), 'matplotlib.pyplot.title', 'plt.title', (['"""Time(/s)"""'], {}), "('Time(/s)')\n", (1734, 1746), True, 'import matplotlib.pyplot as plt\n'), ((1751, 1787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log2(important_search)"""'], {}), "('log2(important_search)')\n", (1761, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1855), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./impsearch.png"""'], {}), "('./impsearch.png')\n", (1836, 1855), True, 'import matplotlib.pyplot as plt\n'), ((694, 700), 'time.time', 'time', ([], {}), '()\n', (698, 700), False, 'from time import time\n'), ((842, 848), 'time.time', 'time', ([], {}), '()\n', (846, 848), False, 'from time import time\n'), ((1076, 1090), 'numpy.sqrt', 'np.sqrt', (['(M - 1)'], {}), '(M - 1)\n', (1083, 1090), True, 'import numpy as np\n')] |
import os.path as osp
from collections import defaultdict
import itertools as it
import logging
from wepy.resampling.decisions.clone_merge import MultiCloneMergeDecision
from wepy.reporter.dashboard import ResamplerDashboardSection
import numpy as np
import pandas as pd
class REVODashboardSection(ResamplerDashboardSection):
RESAMPLER_TEMPLATE = \
"""
Resampling Algorithm: {{ name }}
Distance Exponent: {{ dist_exponent }}
Characteristic Distance: {{ char_dist }}
Merge Distance: {{ merge_dist }}
** Resamplig
Cycle index: {{ cycle_idx }}
The percentage of cloned walkers: {{ percentage_cloned_walkers }} %
The percentage of merged walkers: {{ percentage_merged_walkers }} %
** Statistics
Average All to All Distance: {{ avg_distance }}
Minimum All to All Distance: {{ min_distance }}
Maximum All to All Distance: {{ max_distance }}
Variation value = {{ variation }}
"""
def __init__(self, resampler=None,
dist_exponent=None,
merge_dist=None,
lpmin=None,
char_dist=None,
seed=None,
decision=None,
**kwargs):
if 'name' not in kwargs:
kwargs['name'] = 'REVOResampler'
super().__init__(resampler=resampler,
dist_exponent=dist_exponent,
merge_dist=merge_dist,
lpmin=lpmin,
char_dist=char_dist,
seed=seed,
decision=decision,
)
if resampler is not None:
self.dist_exponent = resampler.dist_exponent
self.merge_dist = resampler.merge_dist
self.lpmin = resampler.lpmin
self.char_dist = resampler.char_dist
self.seed = resampler.seed
self.decision = resampler.DECISION
else:
assert dist_exponent is not None, \
"if no resampler given must give parameters: dist_exponent"
assert merge_dist is not None, \
"if no resampler given must give parameters: merge_dist"
assert lpmin is not None, \
"if no resampler given must give parameters: lpmin"
assert char_dist is not None, \
"if no resampler given must give parameters: char_dist"
assert seed is not None, \
"if no resampler given must give parameters: seed"
assert decision is not None, \
"if no resampler given must give parameters: decision"
self.dist_exponent = dist_exponent
self.merge_dist = merge_dist
self.lpmin = lpmin
self.char_dist = char_dist
self.seed = seed
self.decision = decision
# updatables
self.percentage_cloned_walkers = 0
self.percentage_merged_walkers = 0
#REVO
self.avg_distance = None
self.min_distance = None
self.max_distance = None
self.variation_values = None
def update_values(self, **kwargs):
num_clones = 0
num_merges = 0
num_walkers = len(kwargs['resampling_data'])
for walker_record in kwargs['resampling_data']:
if walker_record['decision_id'][0]==self.decision.ENUM.CLONE.value:
num_clones += 1
elif walker_record['decision_id'][0]==self.decision.ENUM.KEEP_MERGE.value:
num_merges += 1
self.percentage_cloned_walkers = (num_clones/num_walkers) * 100
self.percentage_merged_walkers = (num_merges/num_walkers) * 100
#Get the statistics
for resampler_record in kwargs['resampler_data']:
self.variation_value = resampler_record['variation'][0]
distance_matrix = resampler_record['distance_matrix']
#get the upper triangle values of the distance_matrix
distance_matrix = np.triu(distance_matrix)
distance_values= distance_matrix[np.where(distance_matrix>0)]
self.avg_distance = np.average(distance_values)
self.min_distance = np.min(distance_values)
self.max_distance = np.max(distance_values)
self.cycle_idx = kwargs['cycle_idx']
def gen_fields(self, **kwargs):
fields = super().gen_fields(**kwargs)
new_fields = {
'dist_exponent' : self.dist_exponent,
'char_dist' : self.char_dist,
'merge_dist' : self.merge_dist,
'cycle_idx' : self.cycle_idx,
'percentage_cloned_walkers' : self.percentage_cloned_walkers,
'percentage_merged_walkers' : self.percentage_merged_walkers,
'avg_distance' : self.avg_distance,
'min_distance' : self.min_distance,
'max_distance' : self.max_distance,
'variation' : self.variation_value
}
fields.update(new_fields)
return fields
| [
"numpy.average",
"numpy.triu",
"numpy.max",
"numpy.min",
"numpy.where"
] | [((3886, 3910), 'numpy.triu', 'np.triu', (['distance_matrix'], {}), '(distance_matrix)\n', (3893, 3910), True, 'import numpy as np\n'), ((4009, 4036), 'numpy.average', 'np.average', (['distance_values'], {}), '(distance_values)\n', (4019, 4036), True, 'import numpy as np\n'), ((4065, 4088), 'numpy.min', 'np.min', (['distance_values'], {}), '(distance_values)\n', (4071, 4088), True, 'import numpy as np\n'), ((4118, 4141), 'numpy.max', 'np.max', (['distance_values'], {}), '(distance_values)\n', (4124, 4141), True, 'import numpy as np\n'), ((3952, 3981), 'numpy.where', 'np.where', (['(distance_matrix > 0)'], {}), '(distance_matrix > 0)\n', (3960, 3981), True, 'import numpy as np\n')] |
import powerlaw
import pandas as pd
import numpy as np
from collections import defaultdict
import datetime
import math
from tqdm import tqdm
from ..utils import constants, utils
from scipy.sparse import lil_matrix
import random
import logging
import inspect
from ..core.trajectorydataframe import TrajDataFrame
from ..models.gravity import Gravity
from geopy.distance import distance
earth_distance_km = (lambda p0, p1: distance(p0, p1).km)
latitude = constants.LATITUDE
longitude = constants.LONGITUDE
date_time = constants.DATETIME
user_id = constants.UID
def compute_od_matrix(gravity_singly, spatial_tessellation, tile_id_column=constants.TILE_ID,
relevance_column=constants.RELEVANCE):
"""
Compute a matrix where element {ij} is the probability p_{ij} of moving between
locations in rows i and j in the GeoDataFrame spatial_tessellation given as input.
Parameters
----------
:param gravity_singly: object
instance of class collective.Gravity with argument gravity_type='singly constrained'
:param spatial_tessellation: GeoDataFrame
:param tile_id_column: str or int
column of the GeoDataFrame containing the tile_ID of the locations/tiles
:param relevance_column: str or int
column of the GeoDataFrame containing the relevance of the locations/tiles
:return:
od_matrix: numpy array
2-dim numpy array with the trip probabilities for each origin-destination pair
"""
od_matrix = gravity_singly.generate(spatial_tessellation,
tile_id_column=tile_id_column,
tot_outflows_column=None,
relevance_column=relevance_column,
out_format='probabilities')
return od_matrix
def populate_od_matrix(location, lats_lngs, relevances, gravity_singly):
"""
Populate the od matrix with the probability to move from the location in input to all other locations
in the spatial tessellation
:param location: int
the identifier of a location
:param lats_lngs: list or numpy array
list of coordinates of the centroids of the tiles in a spatial tessellation
:param relevances: list or numpy array
list of relevances of the tiles in a spatial tessellation
:param gravity_singly: object
instance of class collective.Gravity with argument gravity_type='singly constrained'
:return:
a numpy array of trip probabilities between the origin location and each destination
"""
ll_origin = lats_lngs[location]
distances = np.array([earth_distance_km(ll_origin, l) for l in lats_lngs])
scores = gravity_singly.compute_gravity_score(distances, relevances[location], relevances)
return scores / sum(scores)
class EPR:
def __init__(self, name='EPR model', rho=0.6, gamma=0.21, beta=0.8, tau=17, min_wait_time_minutes=20):
self._name = name
self._rho = rho
self._gamma = gamma
self._tau = tau
self._beta = beta
self._location2visits = defaultdict(int)
self._od_matrix = None
self._is_sparse = True
self._spatial_tessellation = None
self.lats_lngs = None
self.relevances = None
self._starting_loc = None
self.gravity_singly = None
# Minimum waiting time (in hours)
self._min_wait_time = min_wait_time_minutes / 60.0 # minimum waiting time
self._trajectories_ = []
self._log_file = None
@property
def name(self):
return self._name
@property
def rho(self):
return self._rho
@property
def gamma(self):
return self._gamma
@property
def tau(self):
return self._tau
@property
def beta(self):
return self._beta
@property
def min_wait_time(self):
return self._min_wait_time
@property
def spatial_tessellation_(self):
return self._spatial_tessellation
@property
def trajectories_(self):
return self._trajectories_
def _weighted_random_selection(self, current_location):
"""
Select a random location given the agent's visitation frequency. Used by the return mechanism.
:return: int
a random location
"""
locations = np.fromiter(self._location2visits.keys(), dtype=int)
weights = np.fromiter(self._location2visits.values(), dtype=float)
# remove the current location
currloc_idx = np.where(locations == current_location)[0][0]
locations = np.delete(locations, currloc_idx)
weights = np.delete(weights, currloc_idx)
weights = weights / np.sum(weights)
location = np.random.choice(locations, size=1, p=weights)
return int(location[0])
def _preferential_return(self, current_location):
"""
Choose the location the agent returns to, according to the visitation frequency
of the previously visited locations.
:return: int
the identifier of the next location
"""
next_location = self._weighted_random_selection(current_location)
if self._log_file is not None:
logging.info('RETURN to %s (%s, %s)' % (next_location, self.lats_lngs[next_location]))
logging.info('\t frequency = %s' % self._location2visits[next_location])
return next_location
def _preferential_exploration(self, current_location):
"""
Choose the new location the agent explores, according to the probabilities
in the od matrix.
:param current_location : int
the identifier of the current location of the individual
:return: int
the identifier of the new location to explore
"""
if self._is_sparse:
prob_array = self._od_matrix.getrowview(current_location)
if prob_array.nnz == 0:
# if the row has been not populated
weights = populate_od_matrix(current_location, self.lats_lngs, self.relevances, self.gravity_singly)
self._od_matrix[current_location, :] = weights
else:
weights = prob_array.toarray()[0]
locations = np.arange(len(self.lats_lngs))
location = np.random.choice(locations, size=1, p=weights)[0]
else: # if the matrix is precomputed
locations = np.arange(len(self._od_matrix[current_location]))
weights = self._od_matrix[current_location]
location = np.random.choice(locations, size=1, p=weights)[0]
if self._log_file is not None:
logging.info('EXPLORATION to %s (%s, %s)' % (location, self.lats_lngs[location]))
return location
def _get_trajdataframe(self, parameters):
"""
Transform the trajectories list into a pandas DataFrame.
:return: a pandas DataFrame describing the trajectories
:rtype pandas DataFrame
"""
df = pd.DataFrame(self._trajectories_, columns=[user_id, date_time, 'location'])
df[[latitude, longitude]] = df.location.apply(lambda s: pd.Series({latitude: self.lats_lngs[s][0],
longitude: self.lats_lngs[s][1]}))
df = df.sort_values(by=[user_id, date_time]).drop('location', axis=1)
return TrajDataFrame(df, parameters=parameters)
def _choose_location(self):
"""
Choose the next location to visit given the agent's current location.
:return: int
the identifier of the next location to visit
"""
n_visited_locations = len(self._location2visits) # number of already visited locations
if n_visited_locations == 0:
self._starting_loc = self._preferential_exploration(self._starting_loc)
return self._starting_loc
agent_id, current_time, current_location = self._trajectories_[-1] # the last visited location
# choose a probability to return or explore
p_new = random.uniform(0, 1)
if (p_new <= self._rho * math.pow(n_visited_locations, -self._gamma) and n_visited_locations != \
self._od_matrix.shape[0]) or n_visited_locations == 1: # choose to return or explore
# PREFERENTIAL EXPLORATION
next_location = self._preferential_exploration(current_location)
# TODO: remove the part below and exclude visited locations
# from the list of potential destinations in _preferential_exploration
# while next_location in self._location2visits:
# next_location = self._preferential_exploration(current_location)
return next_location
else:
# PREFERENTIAL RETURN
next_location = self._preferential_return(current_location)
return next_location
def _time_generator(self):
return powerlaw.Truncated_Power_Law(xmin=self.min_wait_time,
parameters=[1. + self._beta, 1.0 / self._tau]).generate_random()[0]
def _choose_waiting_time(self):
"""
Choose the time (in hours) the agent has to wait before the next movement.
:return: float
the time to wait before the next movement.
"""
time_to_wait = self._time_generator()
return time_to_wait
def generate(self, start_date, end_date, spatial_tessellation, gravity_singly={}, n_agents=1,
starting_locations=None, od_matrix=None,
relevance_column=constants.RELEVANCE,
random_state=None, log_file=None, verbose=False):
"""
Start the simulation of the agent at time "start_date" till time "end_date".
:param start_date : datetime
the starting date of the simulation
:param end_date : datetime
the ending date of the simulation
:param spatial_tessellation : dict
the spatial tessellation, a dictionary of location to info (lat, lng, relevance)
:param n_agents: int
the number of agents to generate
:param starting_location
the identifier of the starting location for the simulation (as specified in the spatial tessellation)
:type starting_location: int or None
:param od_matrix: the od_matrix to use for deciding the movements. If None, it is computed "on the fly" during the simulation
:type od_matrix: numpy array or None
:param random_state: if int, random_state is the seed used by the random number generator; if None, the random number generator is the RandomState instance used by np.random and random.random (default: None)
:type random_state: int or None
"""
if starting_locations is not None and len(starting_locations) < n_agents:
raise IndexError("The number of starting locations is smaller than the number of agents.")
if gravity_singly == {}:
self.gravity_singly = Gravity(gravity_type='singly constrained')
# Save function arguments and values in a dictionary
frame = inspect.currentframe()
args, _, _, arg_values = inspect.getargvalues(frame)
parameters = dict([])
parameters['model'] = {'class': self.__class__.__init__,
'generate': {i: arg_values[i] for i in args[1:] if i not in ['spatial_tessellation',
'od_matrix', 'log_file',
'starting_locations']}}
# if specified, fix the random seeds to guarantee reproducibility of simulation
if random_state is not None:
random.seed(random_state)
np.random.seed(random_state)
if log_file is not None:
self._log_file = log_file
logging.basicConfig(format='%(message)s', filename=log_file, filemode='w', level=logging.INFO)
# initialization of trajectories
self._trajectories_ = []
# setting of spatial tessellation
num_locs = len(spatial_tessellation)
self.lats_lngs = spatial_tessellation.geometry.apply(utils.get_geom_centroid, args=[True]).values
if relevance_column is None:
self.relevances = np.ones(num_locs)
else:
self.relevances = spatial_tessellation[relevance_column].fillna(0).values
# initialization of od matrix
if od_matrix is None:
self._od_matrix = lil_matrix((num_locs, num_locs))
self._is_sparse = True
else:
self._od_matrix = od_matrix
self._is_sparse = False
# for each agent
loop = range(1, n_agents + 1)
if verbose:
loop = tqdm(range(1, n_agents + 1))
for agent_id in loop:
self._location2visits = defaultdict(int)
if starting_locations is None:
self._starting_loc = np.random.choice(np.fromiter(range(num_locs), dtype=int), size=1)[0]
else:
self._starting_loc = starting_locations.pop()
self._epr_generate_one_agent(agent_id, start_date, end_date)
tdf = self._get_trajdataframe(parameters)
return tdf
def _epr_generate_one_agent(self, agent_id, start_date, end_date):
current_date = start_date
self._trajectories_.append((agent_id, current_date, self._starting_loc))
self._location2visits[self._starting_loc] += 1
waiting_time = self._choose_waiting_time()
current_date += datetime.timedelta(hours=waiting_time)
while current_date < end_date:
next_location = self._choose_location()
self._trajectories_.append((agent_id, current_date, next_location))
self._location2visits[next_location] += 1
waiting_time = self._choose_waiting_time()
current_date += datetime.timedelta(hours=waiting_time)
class DensityEPR(EPR):
"""
The dEPR model of individual human mobility
:param name: str
the name of the instantiation of the dEPR model (default: "Density EPR model")
:param rho: float
in the formula :math:`\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\rho` (:math:`0 < \rho \leq 1`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: :math:`\rho = 0.6`, value estimated from empirical data)
:param gamma: float
in the formula :math:`\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\gamma` (:math:`\gamma \geq 0`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: 0.21, value estimated from empirical data)
:param beta: float
the parameter :math:`\beta` of the waiting time distribution (default: :math:`\beta = 0.8`, value estimated from empirical data)
:param tau: int
the parameter :math:`\tau` of the waiting time distribution (default: :math:`\tau = 17`, expressed in hours, value estimated from empirical data)
:param min_wait_time_minutes: int
minimum waiting time in minutes
:ivar: name: str
the name of the instantiation of the model
:ivar: trajectory_: pandas DataFrame
the trajectory generated by the model, describing the trajectory of the agents
:ivar: spatial_tessellation: dict
the spatial tessellation used during the simulation
:ivar rho: float
in the formula :math:`\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\rho` (:math:`0 < \rho \leq 1`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: :math:`\rho = 0.6`, value estimated from empirical data)
:ivar gamma: float
in the formula :math:`\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\gamma` (:math:`\gamma \geq 0`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: 0.21, value estimated from empirical data)
:ivar beta: float
the parameter :math:`\beta` of the waiting time distribution (default: :math:`\beta = 0.8`, value estimated from empirical data)
:ivar tau: int
the parameter :math:`\tau` of the waiting time distribution (default: :math:`\tau = 17`, expressed in hours, value estimated from empirical data)
:ivar min_wait_time_minutes: int
minimum waiting time in minutes
.. seealso:: :class:`EPR`
References:
.. [song2010modelling] <NAME> <NAME>. "Modelling the scaling properties of human mobility." Nature Physics 6 , no. 10 (2010): 818--823.
.. [pappalardo2015returners] <NAME>., <NAME>., <NAME>. "Returners and Explorers dichotomy in human mobility.", Nature Communications, 6:8166, doi: 10.1038/ncomms9166 (2015).
.. [pappalardo2016modelling] <NAME>., <NAME>., "Human Mobility Modelling: exploration and preferential return meet the gravity model", Procedia Computer Science 83, doi: 10.1016/j.procs.2016.04.188 (2016).
"""
def __init__(self, name='Density EPR model', rho=0.6, gamma=0.21, beta=0.8, tau=17, min_wait_time_minutes=20):
super().__init__()
self._name = name
class SpatialEPR(EPR):
"""
The sEPR model of individual human mobility
:param name: str
the name of the instantiation of the sEPR model (default: "Spatial EPR model")
:param rho: float
in the formula :math:`\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\rho` (:math:`0 < \rho \leq 1`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: :math:`\rho = 0.6`, value estimated from empirical data)
:param gamma: float
in the formula :math:`Density\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\gamma` (:math:`\gamma \geq 0`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: 0.21, value estimated from empirical data)
:param beta: float
the parameter :math:`\beta` of the waiting time distribution (default: :math:`\beta = 0.8`, value estimated from empirical data)
:param tau: int
the parameter :math:`\tau` of the waiting time distribution (default: :math:`\tau = 17`, expressed in hours, value estimated from empirical data)
:param min_wait_time_minutes: int
minimum waiting time in minutes
:ivar: name: str
the name of the instantiation of the model
:ivar: trajectory_: pandas DataFrame
the trajectory generated by the model, describing the trajectory of the agents
:ivar: spatial_tessellation: dict
the spatial tessellation used during the simulation
:ivar rho: float
in the formula :math:`\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\rho` (:math:`0 < \rho \leq 1`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: :math:`\rho = 0.6`, value estimated from empirical data)
:ivar gamma: float
in the formula :math:`\rho S^{-\gamma}`, where :math:`S` is the number of distinct locations
previously visited by the agent, the parameter :math:`\gamma` (:math:`\gamma \geq 0`) controls
the agent's tendency to explore a new location during the next move versus
returning to a previously visited location (default: 0.21, value estimated from empirical data)
:ivar beta: float
the parameter :math:`\beta` of the waiting time distribution (default: :math:`\beta = 0.8`, value estimated from empirical data)
:ivar tau: int
the parameter :math:`\tau` of the waiting time distribution (default: :math:`\tau = 17`, expressed in hours, value estimated from empirical data)
:ivar min_wait_time_minutes: int
minimum waiting time in minutes
.. seealso:: :class:`EPR`
References:
.. [song2010modelling] <NAME> <NAME>. "Modelling the scaling properties of human mobility." Nature Physics 6 , no. 10 (2010): 818--823.
.. [pappalardo2015returners] <NAME>., <NAME>., <NAME>., <NAME>. "Returners and Explorers dichotomy in human mobility.", Nature Communications, 6:8166, doi: 10.1038/ncomms9166 (2015).
.. [pappalardo2016modelling] <NAME>., <NAME>. <NAME>., "Human Mobility Modelling: exploration and preferential return meet the gravity model", Procedia Computer Science 83, doi: 10.1016/j.procs.2016.04.188 (2016).
"""
def __init__(self, name='Spatial EPR model', rho=0.6, gamma=0.21, beta=0.8, tau=17, min_wait_time_minutes=20):
super().__init__()
self._name = name
def generate(self, start_date, end_date, spatial_tessellation, gravity_singly={}, n_agents=1,
starting_locations=None, od_matrix=None,
relevance_column=None,
random_state=None, log_file=None, verbose=False):
return super().generate(start_date, end_date, spatial_tessellation, gravity_singly=gravity_singly,
n_agents=n_agents,
starting_locations=starting_locations, od_matrix=od_matrix,
relevance_column=relevance_column,
random_state=random_state, log_file=log_file, verbose=verbose)
class Ditras(EPR):
"""
The DITRAS (DIary-based TRAjectory Simulator) model of individual human mobility
:param rho: float
in the formula $\rho S^{-\gamma}$, where $S$ is the number of distinct locations
previously visited by the agent, the rho parameter (0 < rho <= 1) controls
the agent's tendency to explore a new location during the next move versus returning to a previously visited location.
(default: 0.6 (value estimated from empirical data)
:param gamma: float
in the formula $\rho S^{-\gamma}$, where $S$ is the number of distinct locations
previously visited by the agent, the gamma parameter (gamma >= 0) controls
the agent's tendency to explore a new location during the next move versus returning to a previously visited location.
(default: 0.21, value estimated from empirical data by)
:ivar: name: str
the name of the instantiation of the model (default: "Ditras model")
:ivar: trajectory_: pandas DataFrame
the trajectory generated by the model, describing the trajectory of the agents
:ivar: spatial_tessellation: dict
the spatial tessellation used during the simulation
:ivar rho: float
in the formula $\rho S^{-\gamma}$, where $S$ is the number of distinct locations
previously visited by the agent, the rho parameter (0 < rho <= 1) controls
the agent's tendency to explore a new location during the next move versus returning to a previously visited location.
(default: 0.6 (value estimated from empirical data)
:ivar gamma: float
in the formula $\rho S^{-\gamma}$, where $S$ is the number of distinct locations
previously visited by the agent, the gamma parameter (gamma >= 0) controls
the agent's tendency to explore a new location during the next move versus returning to a previously visited location.
(default: 0.21, value estimated from empirical data by)
Example:
from skmob.models.epr import Ditras
from skmob.models.markov_diary_generator import MarkovDiaryGenerator
from skmob.preprocessing import filtering, compression, detection, clustering
# Preeprocess the GPS data that will be used to fit the diary generator
tdf = skmob.TrajDataFrame.from_file('./data/geolife_sample.txt.gz',
latitude='lat', longitude='lon', user_id='user',
datetime='datetime', sep=',')
ctdf = compression.compress(tdf)
stdf = detection.stops(ctdf)
cstdf = clustering.cluster(stdf)
# Create the diary generator using 2 users
mdg = MarkovDiaryGenerator()
mdg.fit(cstdf, 2, lid='cluster')
# Instantiate the model
start_time = pd.to_datetime('2019/01/01 08:00:00')
end_time = pd.to_datetime('2019/01/14 08:00:00')
ditras = Ditras(mdg)
tessellation = gpd.GeoDataFrame.from_file("data/NY_counties_2011.geojson")
# Generate 3 users
tdf = ditras.generate(start_time, end_time, tessellation, relevance_column='population',
n_agents=3, od_matrix=None, verbose=True)
References:
.. [pappalardo2018data] <NAME>, Data-driven generation of spatio-temporal routines in human mobility, Data Mining and Knowledge Discovery, 32:3 (2018).
"""
def __init__(self, diary_generator, name='Ditras model', rho=0.3, gamma=0.21):
super().__init__()
self._diary_generator = diary_generator
self._name = name
self._rho = rho
self._gamma = gamma
def _epr_generate_one_agent(self, agent_id, start_date, end_date):
# infer the time_steps (in hours) from the start_date and the end_date
delta_t = (end_date - start_date).total_seconds()
n_hours = int((delta_t / 60.0) / 60.0)
# generate a mobility diary for the agent
diary_df = self._diary_generator.generate(n_hours, start_date)
for i, row in diary_df.iterrows():
if row.abstract_location == 0: # the agent is at home
self._trajectories_.append((agent_id, row.datetime, self._starting_loc))
self._location2visits[self._starting_loc] += 1
else: # the agent is not at home
next_location = self._choose_location()
self._trajectories_.append((agent_id, row.datetime, next_location))
self._location2visits[next_location] += 1
| [
"numpy.sum",
"numpy.random.seed",
"powerlaw.Truncated_Power_Law",
"numpy.ones",
"collections.defaultdict",
"inspect.getargvalues",
"scipy.sparse.lil_matrix",
"geopy.distance.distance",
"pandas.DataFrame",
"math.pow",
"datetime.timedelta",
"random.seed",
"numpy.random.choice",
"pandas.Serie... | [((421, 437), 'geopy.distance.distance', 'distance', (['p0', 'p1'], {}), '(p0, p1)\n', (429, 437), False, 'from geopy.distance import distance\n'), ((3136, 3152), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3147, 3152), False, 'from collections import defaultdict\n'), ((4647, 4680), 'numpy.delete', 'np.delete', (['locations', 'currloc_idx'], {}), '(locations, currloc_idx)\n', (4656, 4680), True, 'import numpy as np\n'), ((4699, 4730), 'numpy.delete', 'np.delete', (['weights', 'currloc_idx'], {}), '(weights, currloc_idx)\n', (4708, 4730), True, 'import numpy as np\n'), ((4795, 4841), 'numpy.random.choice', 'np.random.choice', (['locations'], {'size': '(1)', 'p': 'weights'}), '(locations, size=1, p=weights)\n', (4811, 4841), True, 'import numpy as np\n'), ((7081, 7156), 'pandas.DataFrame', 'pd.DataFrame', (['self._trajectories_'], {'columns': "[user_id, date_time, 'location']"}), "(self._trajectories_, columns=[user_id, date_time, 'location'])\n", (7093, 7156), True, 'import pandas as pd\n'), ((8152, 8172), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (8166, 8172), False, 'import random\n'), ((11266, 11288), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (11286, 11288), False, 'import inspect\n'), ((11322, 11349), 'inspect.getargvalues', 'inspect.getargvalues', (['frame'], {}), '(frame)\n', (11342, 11349), False, 'import inspect\n'), ((13797, 13835), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'waiting_time'}), '(hours=waiting_time)\n', (13815, 13835), False, 'import datetime\n'), ((4760, 4775), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (4766, 4775), True, 'import numpy as np\n'), ((5281, 5372), 'logging.info', 'logging.info', (["('RETURN to %s (%s, %s)' % (next_location, self.lats_lngs[next_location]))"], {}), "('RETURN to %s (%s, %s)' % (next_location, self.lats_lngs[\n next_location]))\n", (5293, 5372), False, 'import logging\n'), ((5380, 5452), 'logging.info', 'logging.info', (["('\\t frequency = %s' % self._location2visits[next_location])"], {}), "('\\t frequency = %s' % self._location2visits[next_location])\n", (5392, 5452), False, 'import logging\n'), ((6728, 6814), 'logging.info', 'logging.info', (["('EXPLORATION to %s (%s, %s)' % (location, self.lats_lngs[location]))"], {}), "('EXPLORATION to %s (%s, %s)' % (location, self.lats_lngs[\n location]))\n", (6740, 6814), False, 'import logging\n'), ((11932, 11957), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (11943, 11957), False, 'import random\n'), ((11970, 11998), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (11984, 11998), True, 'import numpy as np\n'), ((12083, 12181), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'filename': 'log_file', 'filemode': '"""w"""', 'level': 'logging.INFO'}), "(format='%(message)s', filename=log_file, filemode='w',\n level=logging.INFO)\n", (12102, 12181), False, 'import logging\n'), ((12514, 12531), 'numpy.ones', 'np.ones', (['num_locs'], {}), '(num_locs)\n', (12521, 12531), True, 'import numpy as np\n'), ((12731, 12763), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(num_locs, num_locs)'], {}), '((num_locs, num_locs))\n', (12741, 12763), False, 'from scipy.sparse import lil_matrix\n'), ((13088, 13104), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (13099, 13104), False, 'from collections import defaultdict\n'), ((14146, 14184), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'waiting_time'}), '(hours=waiting_time)\n', (14164, 14184), False, 'import datetime\n'), ((4581, 4620), 'numpy.where', 'np.where', (['(locations == current_location)'], {}), '(locations == current_location)\n', (4589, 4620), True, 'import numpy as np\n'), ((6376, 6422), 'numpy.random.choice', 'np.random.choice', (['locations'], {'size': '(1)', 'p': 'weights'}), '(locations, size=1, p=weights)\n', (6392, 6422), True, 'import numpy as np\n'), ((6626, 6672), 'numpy.random.choice', 'np.random.choice', (['locations'], {'size': '(1)', 'p': 'weights'}), '(locations, size=1, p=weights)\n', (6642, 6672), True, 'import numpy as np\n'), ((7221, 7297), 'pandas.Series', 'pd.Series', (['{latitude: self.lats_lngs[s][0], longitude: self.lats_lngs[s][1]}'], {}), '({latitude: self.lats_lngs[s][0], longitude: self.lats_lngs[s][1]})\n', (7230, 7297), True, 'import pandas as pd\n'), ((9031, 9136), 'powerlaw.Truncated_Power_Law', 'powerlaw.Truncated_Power_Law', ([], {'xmin': 'self.min_wait_time', 'parameters': '[1.0 + self._beta, 1.0 / self._tau]'}), '(xmin=self.min_wait_time, parameters=[1.0 +\n self._beta, 1.0 / self._tau])\n', (9059, 9136), False, 'import powerlaw\n'), ((8207, 8250), 'math.pow', 'math.pow', (['n_visited_locations', '(-self._gamma)'], {}), '(n_visited_locations, -self._gamma)\n', (8215, 8250), False, 'import math\n')] |
"""
Naive well mixed model without delays. Saves result in .json format,
[({parameter:value}, {species:[[time series]]})]. Output file name should
be provided as the first argument when executing the script.
"""
import sys
import os.path
import numpy as np
import gillespy2
import json
def generate_partitions(n_traj, n_cores):
return [
n_traj//n_cores + (1 if i < n_traj % n_cores else 0)
for i in range(n_cores)]
class Cell(gillespy2.Model):
def __init__(self, parameters):
gillespy2.Model.__init__(self, "Well Mixed Model")
self.list_species = ['Gf', 'Gb', 'RNA', 'P']
self.dict_species = {
species: gillespy2.Species(
name=species,
initial_value=0)
for species in self.list_species}
# Force species order to follow list_species
# Necessary to extract results correctly
self.add_species([self.dict_species[s] for s in self.list_species])
# TODO unit test order preservation, e.g. all rates to 0...
#Parameters
parameters = {name: gillespy2.Parameter(name=name, expression=value)
for name, value in parameters.items() if isinstance(value, float)}
self.add_parameter(list(parameters.values()))
#Reactions
#Degradation
dict_reactions = {}
for name, species in [(name, self.dict_species[name])
for name in ["RNA", "P"]]:
dict_reactions["dgd"+name] = gillespy2.Reaction(
name = "dgd"+name,
reactants = {species:1}, products={},
rate = parameters['gamma'])
#Transcription
dict_reactions["tcp"] = gillespy2.Reaction(
name = "tcp",
reactants = {self.dict_species['Gf']:1},
products={self.dict_species['Gf']:1, self.dict_species['RNA']:1},
rate = parameters['mu'])
#Translation
dict_reactions["tlt"] = gillespy2.Reaction(
name="tlt",
reactants={self.dict_species['RNA']:1},
products={self.dict_species['RNA']:1, self.dict_species['P']:1},
rate=parameters['kappa'])
#Binding with P
dict_reactions["bdg"] = gillespy2.Reaction(
name="bdg",
reactants={self.dict_species['Gf']:1, self.dict_species['P']:1},
products={self.dict_species['Gb']:1},
rate=parameters['k_a'])
#Unbinding from her
dict_reactions["ubdg"+name] = gillespy2.Reaction(
name="ubdg"+name,
reactants={self.dict_species['Gb']:1},
products={self.dict_species['Gf']:1, self.dict_species['P']:1},
rate=parameters['k_d'])
self.add_reaction(list(dict_reactions.values()))
def run(
self,
initial_state,
time_stop,
n_trajectories,
seed=None,
n_points=500):
#Species
for name, species in self.dict_species.items():
species.initial_value = initial_state.get(name, 0)
self.timespan(np.linspace(0, time_stop, num=n_points + 1).tolist())
raw_results = gillespy2.Model.run(
self,
number_of_trajectories=n_trajectories, seed=seed)
results = {
species:
([trajectory[species].tolist() for trajectory in raw_results]
if n_trajectories > 1 else
raw_results[species].tolist())
for species in self.list_species}
results['time'] = ([list(self.tspan) for _ in range(n_trajectories)]
if n_trajectories > 1 else
list(self.tspan))
return (results
if n_trajectories > 1 else
(results, {species: results[species][-1]
for species in results if species != 'time'}))
if __name__ == "__main__":
parameters = {
'gamma': 1.,
'mu': 1.,
'kappa': 1.,
'k_a': 1.,
'k_d': 1.,
}
model = Cell(parameters)
results = model.run({'Gf':1}, 100, 1, n_points=2)
print(results)
| [
"gillespy2.Species",
"gillespy2.Parameter",
"gillespy2.Model.__init__",
"numpy.linspace",
"gillespy2.Model.run",
"gillespy2.Reaction"
] | [((524, 574), 'gillespy2.Model.__init__', 'gillespy2.Model.__init__', (['self', '"""Well Mixed Model"""'], {}), "(self, 'Well Mixed Model')\n", (548, 574), False, 'import gillespy2\n'), ((1739, 1908), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': '"""tcp"""', 'reactants': "{self.dict_species['Gf']: 1}", 'products': "{self.dict_species['Gf']: 1, self.dict_species['RNA']: 1}", 'rate': "parameters['mu']"}), "(name='tcp', reactants={self.dict_species['Gf']: 1},\n products={self.dict_species['Gf']: 1, self.dict_species['RNA']: 1},\n rate=parameters['mu'])\n", (1757, 1908), False, 'import gillespy2\n'), ((2023, 2196), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': '"""tlt"""', 'reactants': "{self.dict_species['RNA']: 1}", 'products': "{self.dict_species['RNA']: 1, self.dict_species['P']: 1}", 'rate': "parameters['kappa']"}), "(name='tlt', reactants={self.dict_species['RNA']: 1},\n products={self.dict_species['RNA']: 1, self.dict_species['P']: 1}, rate\n =parameters['kappa'])\n", (2041, 2196), False, 'import gillespy2\n'), ((2307, 2477), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': '"""bdg"""', 'reactants': "{self.dict_species['Gf']: 1, self.dict_species['P']: 1}", 'products': "{self.dict_species['Gb']: 1}", 'rate': "parameters['k_a']"}), "(name='bdg', reactants={self.dict_species['Gf']: 1, self.\n dict_species['P']: 1}, products={self.dict_species['Gb']: 1}, rate=\n parameters['k_a'])\n", (2325, 2477), False, 'import gillespy2\n'), ((2597, 2774), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': "('ubdg' + name)", 'reactants': "{self.dict_species['Gb']: 1}", 'products': "{self.dict_species['Gf']: 1, self.dict_species['P']: 1}", 'rate': "parameters['k_d']"}), "(name='ubdg' + name, reactants={self.dict_species['Gb']: \n 1}, products={self.dict_species['Gf']: 1, self.dict_species['P']: 1},\n rate=parameters['k_d'])\n", (2615, 2774), False, 'import gillespy2\n'), ((3280, 3355), 'gillespy2.Model.run', 'gillespy2.Model.run', (['self'], {'number_of_trajectories': 'n_trajectories', 'seed': 'seed'}), '(self, number_of_trajectories=n_trajectories, seed=seed)\n', (3299, 3355), False, 'import gillespy2\n'), ((681, 729), 'gillespy2.Species', 'gillespy2.Species', ([], {'name': 'species', 'initial_value': '(0)'}), '(name=species, initial_value=0)\n', (698, 729), False, 'import gillespy2\n'), ((1106, 1154), 'gillespy2.Parameter', 'gillespy2.Parameter', ([], {'name': 'name', 'expression': 'value'}), '(name=name, expression=value)\n', (1125, 1154), False, 'import gillespy2\n'), ((1518, 1622), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': "('dgd' + name)", 'reactants': '{species: 1}', 'products': '{}', 'rate': "parameters['gamma']"}), "(name='dgd' + name, reactants={species: 1}, products={},\n rate=parameters['gamma'])\n", (1536, 1622), False, 'import gillespy2\n'), ((3203, 3246), 'numpy.linspace', 'np.linspace', (['(0)', 'time_stop'], {'num': '(n_points + 1)'}), '(0, time_stop, num=n_points + 1)\n', (3214, 3246), True, 'import numpy as np\n')] |
"""Classes of fundamental atomistic concepts
Representations of fundamental atomistic concepts,
such as energy levels, Atoms,...
Uses ASE's Atoms objects.
"""
import numpy as np
import copy as cp
from ase.atoms import Atom, Atoms
class EnergyLevel(object):
"""An energy level"""
def __init__(self, energy, occupation=1.0, wfn=None, weight=None):
"""Set energy and occupation."""
self.energy = energy
self.occupation = occupation
self.weight = weight
self.wfn = wfn
class EnergyLevels(object):
"""A list of levels with fermi energy
Compared to the C++ class,
- the copy constructor has been renamed to the 'copy' method
- the get/set methods have been stripped off
- instead of overloading *=, the levels are exposed to the user
- removed setFermiZero
"""
def __init__(self, energies=None, occupations=None, weights=None, wfns=None, fermi=None):
"""Fill object with levels and Fermi energy."""
self.levels = []
self.fermi = fermi
if energies is None:
raise ValueError("No energies specified")
n = len(energies)
# If occupations are specified, take them
if occupations is not None:
for i in range(n):
tmp = EnergyLevel(energies[i], occupations[i])
self.levels.append(tmp)
# If we just have a fermi energy, create occupations
elif fermi is not None:
for i in range(n):
if energies[i] < fermi:
tmp = EnergyLevel(energies[i], 1.0)
else:
tmp = EnergyLevel(energies[i], 0.0)
self.levels.append(tmp)
# If neither fermi nor occupations are set...
elif energies is not None:
for i in range(n):
tmp = EnergyLevel(energies[i], None)
self.levels.append(tmp)
if wfns is not None:
if len(wfns) == n:
for i in range(n):
self.levels[i].wfn = wfns[i]
else:
print("Error: Number of wave functions != number or levels")
if weights is not None:
if len(weights) == n:
for i in range(n):
self.levels[i].weight = weights[i]
else:
print("Error: Number of weights != number or levels")
@property
def energies(self):
return np.array([l.energy for l in self.levels])
@property
def occupations(self):
return np.array([l.occupation for l in self.levels])
@energies.setter
def energies(self, es):
"""Sets levels, not touching occupations."""
if len(self.levels) != len(es):
print('Error: Trying to set {le} energies for {le} levels.' \
.format(lo=len(es), le=len(self.levels)))
return
for i in range(len(self.levels)):
self.levels[i].energy = es[i]
@occupations.setter
def occupations(self, os):
"""Sets occupations for existing levels."""
if len(os) != len(self.levels):
print('Error: Trying to set {lo} occupations for {le} levels.' \
.format(lo=len(os), le=len(self.levels)))
else:
for i in range(len(self.levels)):
self.levels[i].occupation = os[i]
def copy(self, energylevels):
"""Return a copy of energylevels."""
self.levels = cp.copy(energylevels.levels)
self.fermi = energylevels.fermi
def join(self, energylevels):
self.levels = self.levels + energylevels.levels
if self.fermi != energylevels.fermi:
print('Warning: Merging energy levels'
'with different Fermi energies.')
self.fermi = energylevels.fermi
def sort(self):
self.levels.sort(key = lambda x: x.energy)
def shift(self, de):
self.energies += de
self.fermi += de
def __iadd__(self, b):
if isinstance(b, float):
self.energies += de
self.fermi += de
elif isinstance(b, self.__class__):
self.levels += b.levels
self.sort()
if not self.fermi == b.fermi:
self.fermi = None
else:
raise TypeError("Unsupported operand type(s) for +: '{}' and '{}'"\
.format(type(self), type(b)))
return self
def __isub__(self, de):
self.energies -= de
self.fermi -= de
return self
def n_occupied(self, epsilon=1e-12):
"""Return number of occupied levels
Levels with energy at most epsilon above Fermi still count as occupied.
This prevents the disregard of occupied levels, when the Fermi energy
is identical to the highest occupied level.
"""
if self.fermi:
return sum([1 if e < self.fermi + epsilon else 0 for e in self.energies])
elif all(o is not None for o in self.occupations):
print("Note: Counting occupied levels based on occupation number.")
return sum([1 if o > 0 else 0 for o in self.occupations])
else:
print("Error: Cannot determine occupations.")
def n_empty(self):
"""Return number of empty levels"""
return len(levels) - self.n_occupied()
def __str__(self):
text = "{} energy levels".format(len(self.levels))
if self.fermi:
text += ", Fermi energy {:.3f} eV".format(self.fermi)
if all(o is not None for o in self.occupations):
text += ", occupations specified"
return text
def __getitem__(self, index):
return self.levels[index]
def dos(self, bmethod = 'Gaussian', bepsilon = 1e-3, FWHM = 0.1, delta_e = 0.005):
"""
Returns [energy, density of states].
Parameters
----------
bmethod: Method used for broadening ('Gaussian' or 'Lorentzian')
bepsilon: Convolution is performed with broadening function in range
[E-Eb, E+Eb] where Eb is determined such that the integrated weight
of the broadening function outside of [-Eb, Eb] is < bepsilon.
FWHM: Full-width of broadening function at half-maximum [eV]
delta_e: spacing of energy grid [eV]
"""
import scipy.special as scsp
# Prepare broadening functions for later convolution
# quantile function quantile(y) = x is defined such that
# the integral of the probability density from -\infty to x equals y.
if bmethod == 'Gaussian':
sigma = FWHM / np.log(8 * np.sqrt(2))
quantile = lambda y: np.sqrt(2) * scsp.erfinv(2.0*y -1.0) * sigma
broadening = lambda x: 1/(sigma * np.sqrt(2*np.pi)) \
* np.exp( - x**2 / (2 * sigma**2) )
elif bmethod == 'Lorentzian':
gamma = FWHM * 0.5
quantile = lambda y: np.tan(np.pi * (y - 0.5)) * gamma
broadening = lambda x: 1/np.pi * gamma / (x**2 + gamma**2)
else:
print("Error: Broadening method \"{}\" not recognized."\
.format(bmethod))
eb = -quantile(bepsilon * 0.5)
if FWHM / delta_e < 5:
print("Warning: FWHM / delta_e < 5. Broadening function might not be sampled well.")
# Tabulate the Gaussian in range sigma * nsigma
benergies = np.r_[-eb : eb : delta_e]
bprofile = broadening(benergies)
self.sort()
energies = self.energies
loE=energies[0] - eb
hiE=energies[-1] + eb
E=np.r_[loE : hiE : delta_e]
# Encoding the discretized energy in the array index i makes the code much faster.
# Create dos of delta-peaks to be folded with Gaussian
DOSdelta = np.zeros(len(E))
for level in self.levels:
e = level.energy
w = level.weight if level.weight else 1.0
# In order to be able to fold with tabulated Gaussian, we have to place
# levels *on* the grid. I.e. level spacing cannot be smaller than deltaE.
n = int((e-loE)/delta_e)
# Note: DOS should be calculated for unoccupied levels as well!
DOSdelta[n] += w
# Convolve with gaussian, keeping same dimension
# Can be made even faster by using fftconvolve
DOS = np.convolve(DOSdelta,bprofile, mode='same')
return np.array([E, DOS])
class KPoint(object):
"""Holds a k-point"""
def __init__(self, kvector=None, energylevels=None, weight=None):
self.kvector = np.array(kvector)
self.energylevels = energylevels
self.weight = weight
def __iadd__(self, k):
"""Merging two k-points
Used e.g. when merging k-points of different spins
"""
if self.kvector != k.kvector:
print("Warning: Adding k-points with differing k-vectors {} and {}"\
.format(self.kvector, k.kvector))
self.weight += k.weight
self.energylevels += k.energylevels
@property
def nbnd(self):
return len(self.energylevels.energies)
@property
def energies(self):
return self.energylevels.energies
@property
def fermi(self):
return self.energylevels.fermi
def copy(self, kpt):
"""Performs deep copy."""
self.energylevels = kpt.energylevels.copy()
self.kvector = cp.copy(spectrum.kvector)
self.weight = cp.copy(spectrum.weight)
def __str__(self):
e = self.energylevels
k = self.kvector
text = 'k = ({:6.3f}, {:6.3f}, {:6.3f})'.format(k[0], k[1], k[2])
if self.weight:
w = self.weight
text += ', w = {}'.format(w)
text += ' : {}\n'.format(e.__str__())
return text
class Dispersion(object):
"""Holds a collection of k-points"""
def __init__(self, kpoints=None):
if kpoints is None:
self.kpoints = []
else:
self.kpoints = kpoints
@property
def energylevels(self):
s = EnergyLevels()
for kpt in self.kpoints:
s += kpt.energylevels
return s
@property
def energies(self):
return self.energylevels.energies
@property
def kvectors(self):
s = []
for kpt in self.kpoints:
s.append(kpt.kvector)
return s
@property
def weights(self):
s = []
for kpt in self.kpoints:
s.append(kpt.weights)
return s
@property
def fermi(self):
"""Returns Fermi energy."""
fermis = [k.fermi for k in self.kpoints]
fermi = np.unique(fermis)
if len(fermi) == 1:
return fermi[0]
elif len(fermi) != 1:
print("There are Fermi energies {}".format(fermis))
print("Using the mean {}".format(np.mean(fermis)))
return np.mean(fermis)
def merge_kpoints(self):
kv = [0,0,0]
levels = EnergyLevels([k.energylevels for k in self.kpoints])
weight = 1.0
self.kpoints = [KPoint(kv,levels,weight)]
def __iadd__(self, s):
"""Merging two dispersions
Used e.g. when merging dispersions belonging to different spins.
"""
if len(self.kpoints) != len(s.kpoints):
print("Unable to merge due to different number of kpoints")
for i in range(len(self.kpoints)):
self.kpoints[i] += s.kpoints[i]
return self
def shift(self, e):
"""Shift energylevels by energy e"""
for k in self.kpoints:
k.energylevels.shift(e)
@property
def nbnd(self):
nbnds = [k.nbnd for k in self.kpoints]
nbnd = np.unique(nbnds)
if len( np.unique(nbnd) ) != 1:
print("Warning: k-points have different numer of bands {}"\
.format(nbnd))
return nbnd[0]
@property
def nkpt(self):
return len(self.kpoints)
def copy(self, dispersion):
"""Performs deep copy."""
self.kpoints = [k.copy() for k in dispersion.kpoints]
def __str__(self):
text = "Dispersion containing {} k-points\n".format(self.nkpt)
for kpt in self.kpoints:
text += kpt.__str__()
return text
def __getitem__(self, index):
return self.kpoints[index]
#class Atom(object):
#
# """Represents a single atom.
#
# An atom with coordinates, chemical species, charge, etc.
# """
#
# def __init__(self, coordinates=None, number=None, charge=None):
# self.coordinates = np.array(coordinates)
# self.number = number
# self.charge = charge
#
# @property
# def symbol(self):
# return number2symbol(self.number)
#
# @symbol.setter
# def symbol(self, symbol_new):
# self.symbol = symbol_new
#
# def distance(self, atom):
# d = 0
# #print c1.coordinates
# #print c2.coordinates
# for c1, c2 in zip(self.coordinates, atom.coordinates):
# d += (c1 - c2)**2
# return np.sqrt(d)
#
# def __str__(self):
# text = "(x,y,z) = ({},{},{})\tN = {}\tCharge {}".format(
# self.coordinates[0],
# self.coordinates[1],
# self.coordinates[2],
# self.number,
# self.charge)
# return text
| [
"copy.copy",
"scipy.special.erfinv",
"numpy.mean",
"numpy.array",
"numpy.exp",
"numpy.tan",
"numpy.convolve",
"numpy.unique",
"numpy.sqrt"
] | [((2482, 2523), 'numpy.array', 'np.array', (['[l.energy for l in self.levels]'], {}), '([l.energy for l in self.levels])\n', (2490, 2523), True, 'import numpy as np\n'), ((2581, 2626), 'numpy.array', 'np.array', (['[l.occupation for l in self.levels]'], {}), '([l.occupation for l in self.levels])\n', (2589, 2626), True, 'import numpy as np\n'), ((3506, 3534), 'copy.copy', 'cp.copy', (['energylevels.levels'], {}), '(energylevels.levels)\n', (3513, 3534), True, 'import copy as cp\n'), ((8471, 8515), 'numpy.convolve', 'np.convolve', (['DOSdelta', 'bprofile'], {'mode': '"""same"""'}), "(DOSdelta, bprofile, mode='same')\n", (8482, 8515), True, 'import numpy as np\n'), ((8531, 8549), 'numpy.array', 'np.array', (['[E, DOS]'], {}), '([E, DOS])\n', (8539, 8549), True, 'import numpy as np\n'), ((8694, 8711), 'numpy.array', 'np.array', (['kvector'], {}), '(kvector)\n', (8702, 8711), True, 'import numpy as np\n'), ((9538, 9563), 'copy.copy', 'cp.copy', (['spectrum.kvector'], {}), '(spectrum.kvector)\n', (9545, 9563), True, 'import copy as cp\n'), ((9586, 9610), 'copy.copy', 'cp.copy', (['spectrum.weight'], {}), '(spectrum.weight)\n', (9593, 9610), True, 'import copy as cp\n'), ((10795, 10812), 'numpy.unique', 'np.unique', (['fermis'], {}), '(fermis)\n', (10804, 10812), True, 'import numpy as np\n'), ((11865, 11881), 'numpy.unique', 'np.unique', (['nbnds'], {}), '(nbnds)\n', (11874, 11881), True, 'import numpy as np\n'), ((11046, 11061), 'numpy.mean', 'np.mean', (['fermis'], {}), '(fermis)\n', (11053, 11061), True, 'import numpy as np\n'), ((11899, 11914), 'numpy.unique', 'np.unique', (['nbnd'], {}), '(nbnd)\n', (11908, 11914), True, 'import numpy as np\n'), ((6893, 6927), 'numpy.exp', 'np.exp', (['(-x ** 2 / (2 * sigma ** 2))'], {}), '(-x ** 2 / (2 * sigma ** 2))\n', (6899, 6927), True, 'import numpy as np\n'), ((6702, 6712), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6709, 6712), True, 'import numpy as np\n'), ((6747, 6757), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6754, 6757), True, 'import numpy as np\n'), ((6760, 6786), 'scipy.special.erfinv', 'scsp.erfinv', (['(2.0 * y - 1.0)'], {}), '(2.0 * y - 1.0)\n', (6771, 6786), True, 'import scipy.special as scsp\n'), ((7029, 7054), 'numpy.tan', 'np.tan', (['(np.pi * (y - 0.5))'], {}), '(np.pi * (y - 0.5))\n', (7035, 7054), True, 'import numpy as np\n'), ((11009, 11024), 'numpy.mean', 'np.mean', (['fermis'], {}), '(fermis)\n', (11016, 11024), True, 'import numpy as np\n'), ((6838, 6856), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6845, 6856), True, 'import numpy as np\n')] |
import argparse
from datetime import datetime
import tensorflow as tf
import cv2
import pandas as pd
import numpy as np
from PIL import Image
import os
import sys
from tensorflow.keras import layers, models, optimizers
NCLASSES = 2
NUM_CHANNELS = 3
lr = 0.001
ne = 250
HEIGHT = 224
WIDTH = 224
image_feature_description = {
'img_raw': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
'personid': tf.io.FixedLenFeature([], tf.int64),
'toothid': tf.io.FixedLenFeature([], tf.int64)
}
def _parse_function(proto):
parsed = tf.io.parse_single_example(proto, image_feature_description)
img = parsed["img_raw"]
image = tf.image.decode_png(img, channels=NUM_CHANNELS)
image = tf.image.resize(image, [HEIGHT, WIDTH])
image /= 255.0 # normalize to [0,1] range
label = parsed["label"]
toothid = parsed["toothid"]
personid = parsed["personid"]
return image, label, toothid, personid
def _train_parser(image, label, toothid, personid):
return image,label
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tooth NETWORK TRAINER AND TESTER')
parser.add_argument("--lr", default=0.0003, type=float, help="Learning Rate")
parser.add_argument("--ne", default=250,type=int, help="Number of Epochs")
parser.add_argument("--ft", default="Fine", type=str, help="False Tuning")
args = parser.parse_args()
lr = args.lr
ne = args.ne
ft = args.ft
dataset = tf.data.TFRecordDataset("train.tfrecord")
val_dataset = tf.data.TFRecordDataset("val.tfrecord")
val_dataset = val_dataset.map(_parse_function)
val_dataset = val_dataset.batch(16)
dataset = dataset.shuffle(buffer_size=40)
train_size = int(0.7 * 360)
train_dataset = dataset.take(train_size)
test_dataset = dataset.skip(train_size)
train_dataset = train_dataset.map(_parse_function)
test_dataset = test_dataset.map(_parse_function)
train_dataset = train_dataset.batch(32)
test_dataset = test_dataset.batch(32)
train_batch = train_dataset.map(_train_parser)
test_batch = test_dataset.map(_train_parser)
IMG_SHAPE = (224, 224, 3)
for image_batch, label_batch in train_batch.take(1):
pass
if ft == "Fine":
modells = [
tf.keras.applications.vgg19.VGG19(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS), include_top=False),
tf.keras.applications.Xception(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS), include_top=False),
tf.keras.applications.InceptionV3(input_shape=(
HEIGHT, WIDTH, NUM_CHANNELS), include_top=False),
tf.keras.applications.MobileNetV2(input_shape=(
HEIGHT, WIDTH, NUM_CHANNELS), include_top=False)
]
else:
modells = [
tf.keras.applications.vgg19.VGG19(input_shape=(
HEIGHT, WIDTH, NUM_CHANNELS), include_top=False, weights='imagenet'),
tf.keras.applications.Xception(input_shape=(
HEIGHT, WIDTH, NUM_CHANNELS), include_top=False, weights='imagenet'),
tf.keras.applications.InceptionV3(input_shape=(
HEIGHT, WIDTH, NUM_CHANNELS), include_top=False, weights='imagenet'),
tf.keras.applications.MobileNetV2(input_shape=(
HEIGHT, WIDTH, NUM_CHANNELS), include_top=False, weights='imagenet')
]
model_names= ["vgg19","xception","inceptionv3","MobileNetv2"]
last_layers = ["block5_conv4", "conv2d_3", "conv2d_97", "Conv_1"]
stp = 0
for base_model in modells:
feature_batch = base_model(image_batch)
modelName = model_names[stp]
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer
])
checkpoint_path = modelName+".ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
if ft == "Fine":
model.trainable = True
else:
model.trainable = False
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=lr),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
if ft == "Fine":
history = model.fit(train_batch,
epochs=ne,
validation_data=test_batch,
callbacks=[cp_callback])
hist_df = pd.DataFrame(history.history)
hist_csv_file = modelName +'_history.csv'
hist_df.to_csv(hist_csv_file)
layer_output = base_model.get_layer(last_layers[stp]).output
features = []
ids = []
tooths = []
intermediate_model = tf.keras.models.Model(inputs=base_model.input, outputs=layer_output)
results = {"Id":[],"ToothId":[]}
for data in val_dataset:
img, lbl, tid, pid = data
intermediate_prediction = intermediate_model.predict(img.numpy())
#pred = base_model(img)
feature = tf.math.reduce_mean(intermediate_prediction, axis=2)
feature = tf.math.reduce_mean(feature, axis=1).numpy()
features.append(feature)
ids.extend(pid.numpy())
tooths.extend(tid.numpy())
features = np.concatenate(features)
features = pd.DataFrame(features)
features = features.add_prefix('Feature_')
ids = pd.DataFrame(ids)
tooths = pd.DataFrame(tooths)
features['Id'] = ids
features['ToothId'] = tooths
if ft == "Fine":
features.to_csv(modelName+"_"+"feats.csv")
ids.to_csv(modelName+"_"+"ids.csv")
tooths.to_csv(modelName+"_"+"tooths.csv")
else:
features.to_csv(modelName+"_"+"feats_withoutfine.csv")
ids.to_csv(modelName+"_"+"ids_withoutfine.csv")
tooths.to_csv(modelName+"_"+"tooths_withoutfine.csv")
stp += 1
| [
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.applications.Xception",
"tensorflow.image.decode_png",
"tensorflow.keras.Sequential",
"tensorflow.keras.optimizers.RMSprop",
"pandas.DataFrame",
"os.path.dirname",
"tensorflo... | [((342, 378), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (363, 378), True, 'import tensorflow as tf\n'), ((393, 428), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (414, 428), True, 'import tensorflow as tf\n'), ((446, 481), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (467, 481), True, 'import tensorflow as tf\n'), ((498, 533), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (519, 533), True, 'import tensorflow as tf\n'), ((579, 639), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['proto', 'image_feature_description'], {}), '(proto, image_feature_description)\n', (605, 639), True, 'import tensorflow as tf\n'), ((680, 727), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['img'], {'channels': 'NUM_CHANNELS'}), '(img, channels=NUM_CHANNELS)\n', (699, 727), True, 'import tensorflow as tf\n'), ((740, 779), 'tensorflow.image.resize', 'tf.image.resize', (['image', '[HEIGHT, WIDTH]'], {}), '(image, [HEIGHT, WIDTH])\n', (755, 779), True, 'import tensorflow as tf\n'), ((1080, 1151), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tooth NETWORK TRAINER AND TESTER"""'}), "(description='Tooth NETWORK TRAINER AND TESTER')\n", (1103, 1151), False, 'import argparse\n'), ((1476, 1517), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['"""train.tfrecord"""'], {}), "('train.tfrecord')\n", (1499, 1517), True, 'import tensorflow as tf\n'), ((1534, 1573), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['"""val.tfrecord"""'], {}), "('val.tfrecord')\n", (1557, 1573), True, 'import tensorflow as tf\n'), ((3546, 3586), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (3584, 3586), True, 'import tensorflow as tf\n'), ((3674, 3698), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (3695, 3698), True, 'import tensorflow as tf\n'), ((3775, 3848), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['[base_model, global_average_layer, prediction_layer]'], {}), '([base_model, global_average_layer, prediction_layer])\n', (3794, 3848), True, 'import tensorflow as tf\n'), ((3944, 3976), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (3959, 3976), False, 'import os\n'), ((4051, 4150), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'save_weights_only': '(True)', 'verbose': '(1)'}), '(filepath=checkpoint_path,\n save_weights_only=True, verbose=1)\n', (4085, 4150), True, 'import tensorflow as tf\n'), ((5018, 5086), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'base_model.input', 'outputs': 'layer_output'}), '(inputs=base_model.input, outputs=layer_output)\n', (5039, 5086), True, 'import tensorflow as tf\n'), ((5538, 5562), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (5552, 5562), True, 'import numpy as np\n'), ((5578, 5600), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {}), '(features)\n', (5590, 5600), True, 'import pandas as pd\n'), ((5658, 5675), 'pandas.DataFrame', 'pd.DataFrame', (['ids'], {}), '(ids)\n', (5670, 5675), True, 'import pandas as pd\n'), ((5689, 5709), 'pandas.DataFrame', 'pd.DataFrame', (['tooths'], {}), '(tooths)\n', (5701, 5709), True, 'import pandas as pd\n'), ((2250, 2349), 'tensorflow.keras.applications.vgg19.VGG19', 'tf.keras.applications.vgg19.VGG19', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)'}), '(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False)\n', (2283, 2349), True, 'import tensorflow as tf\n'), ((2353, 2449), 'tensorflow.keras.applications.Xception', 'tf.keras.applications.Xception', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)'}), '(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False)\n', (2383, 2449), True, 'import tensorflow as tf\n'), ((2453, 2552), 'tensorflow.keras.applications.InceptionV3', 'tf.keras.applications.InceptionV3', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)'}), '(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False)\n', (2486, 2552), True, 'import tensorflow as tf\n'), ((2567, 2666), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)'}), '(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False)\n', (2600, 2666), True, 'import tensorflow as tf\n'), ((2712, 2831), 'tensorflow.keras.applications.vgg19.VGG19', 'tf.keras.applications.vgg19.VGG19', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False, weights='imagenet')\n", (2745, 2831), True, 'import tensorflow as tf\n'), ((2850, 2966), 'tensorflow.keras.applications.Xception', 'tf.keras.applications.Xception', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False, weights='imagenet')\n", (2880, 2966), True, 'import tensorflow as tf\n'), ((2985, 3104), 'tensorflow.keras.applications.InceptionV3', 'tf.keras.applications.InceptionV3', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False, weights='imagenet')\n", (3018, 3104), True, 'import tensorflow as tf\n'), ((3123, 3242), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': '(HEIGHT, WIDTH, NUM_CHANNELS)', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=(HEIGHT, WIDTH, NUM_CHANNELS),\n include_top=False, weights='imagenet')\n", (3156, 3242), True, 'import tensorflow as tf\n'), ((4753, 4782), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (4765, 4782), True, 'import pandas as pd\n'), ((5305, 5357), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['intermediate_prediction'], {'axis': '(2)'}), '(intermediate_prediction, axis=2)\n', (5324, 5357), True, 'import tensorflow as tf\n'), ((4373, 4407), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'lr': 'lr'}), '(lr=lr)\n', (4400, 4407), True, 'import tensorflow as tf\n'), ((4432, 4484), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4466, 4484), True, 'import tensorflow as tf\n'), ((5374, 5410), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['feature'], {'axis': '(1)'}), '(feature, axis=1)\n', (5393, 5410), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from utils.glove import GloVe
from utils.word_sample import WordSample
import numpy as np
from sklearn.mixture import GaussianMixture
# In[2]:
n_samples = 10000
glove = GloVe("glove/glove.6B.300d.txt")
words = WordSample("./words_alpha.txt", incl_words=glove.wordset, n_samples=n_samples).words
emb_vecs = glove.get_emb_vecs_of(words)
# build the data-matrix with shape = (n_samples, emb_dims)
X = np.array([emb_vecs[w] for w in words])
# normalize it
length = np.sqrt((X**2).sum(axis=1))[:, None]
X = X / length
# In[3]:
gmm = GaussianMixture(n_components=10, verbose=1)
gmm.fit(X)
# In[4]:
labels = gmm.predict(X)
# In[5]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[14]:
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca_result = pca.fit_transform(X)
# In[15]:
pca.explained_variance_ratio_
# In[16]:
import seaborn as sns
plt.figure(figsize=(16,10))
sns.scatterplot(
x=pca_result[:, 0], y=pca_result[:, 1],
hue=labels,
palette=sns.color_palette("hls", 10),
alpha=0.3
)
# In[10]:
import time
from sklearn.manifold import TSNE
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(X)
print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
# In[11]:
plt.figure(figsize=(16,10))
sns.scatterplot(
x=tsne_results[:, 0], y=tsne_results[:, 1],
hue=labels,
palette=sns.color_palette("hls", 10),
legend="full",
alpha=0.3
)
# $e^{i\pi} + 1 = 0$
# $\kappa = \frac{f''}{(1+{f'}^{2})^{3/2}}$
# In[ ]:
| [
"sklearn.manifold.TSNE",
"utils.word_sample.WordSample",
"sklearn.mixture.GaussianMixture",
"time.time",
"utils.glove.GloVe",
"matplotlib.pyplot.figure",
"numpy.array",
"sklearn.decomposition.PCA",
"seaborn.color_palette"
] | [((224, 256), 'utils.glove.GloVe', 'GloVe', (['"""glove/glove.6B.300d.txt"""'], {}), "('glove/glove.6B.300d.txt')\n", (229, 256), False, 'from utils.glove import GloVe\n'), ((454, 492), 'numpy.array', 'np.array', (['[emb_vecs[w] for w in words]'], {}), '([emb_vecs[w] for w in words])\n', (462, 492), True, 'import numpy as np\n'), ((588, 631), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(10)', 'verbose': '(1)'}), '(n_components=10, verbose=1)\n', (603, 631), False, 'from sklearn.mixture import GaussianMixture\n'), ((836, 855), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (839, 855), False, 'from sklearn.decomposition import PCA\n'), ((970, 998), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (980, 998), True, 'import matplotlib.pyplot as plt\n'), ((1206, 1217), 'time.time', 'time.time', ([], {}), '()\n', (1215, 1217), False, 'import time\n'), ((1225, 1283), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'verbose': '(1)', 'perplexity': '(40)', 'n_iter': '(300)'}), '(n_components=2, verbose=1, perplexity=40, n_iter=300)\n', (1229, 1283), False, 'from sklearn.manifold import TSNE\n'), ((1412, 1440), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (1422, 1440), True, 'import matplotlib.pyplot as plt\n'), ((265, 343), 'utils.word_sample.WordSample', 'WordSample', (['"""./words_alpha.txt"""'], {'incl_words': 'glove.wordset', 'n_samples': 'n_samples'}), "('./words_alpha.txt', incl_words=glove.wordset, n_samples=n_samples)\n", (275, 343), False, 'from utils.word_sample import WordSample\n'), ((1087, 1115), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', '(10)'], {}), "('hls', 10)\n", (1104, 1115), True, 'import seaborn as sns\n'), ((1533, 1561), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', '(10)'], {}), "('hls', 10)\n", (1550, 1561), True, 'import seaborn as sns\n'), ((1373, 1384), 'time.time', 'time.time', ([], {}), '()\n', (1382, 1384), False, 'import time\n')] |
import numpy as np
import pandas as pd
import pdb
from sklearn import metrics
from .classifiers.lightgbm import Lgbm
from .classifiers.base import AbstractLearner
from .data import CadeData
from . import models
from .base import AbstractDensity
def auc(df):
fpr, tpr, _ = metrics.roc_curve(df.truth.values, df.pred.values, pos_label=1)
return metrics.auc(fpr, tpr)
class Cade(AbstractDensity):
''' Classifier-adjusted density estimation
Based on https://pdfs.semanticscholar.org/e4e6/033069a8569ba16f64da3061538bcb90bec6.pdf
:param initial_density:
:param sim_size:
'''
# A soft target for the number of instances to simulate when `sim_size` is "auto"
simulation_size_attractor = 10000
def __init__(self,
initial_density=None,
classifier=Lgbm(),
sim_size='auto',
verbose=False):
super().__init__()
if initial_density is None:
self.initial_density = models.JointDensity()
else:
self.initial_density = initial_density
assert isinstance(self.initial_density, AbstractDensity)
assert isinstance(classifier, AbstractLearner)
self.classifier = classifier
self.sim_size = sim_size
self.verbose = verbose
def compute_simulation_size(self, df):
''' Determine the number of synthetic data samples to simulate
If self.sim_size is 'auto', sets the simulation size as the geometric mean
between the data size and self.simulation_size_attractor
If self.sim_size is a positive number less than 100, simulation size is
round(self.sim_size)*df.shape[0]
Finally, if self.sim_size >= 100, simulation size is round(self.sim_size)
'''
n_real = df.shape[0]
if isinstance(self.sim_size, str):
assert self.sim_size=='auto'
sim_n = np.sqrt(n_real*self.simulation_size_attractor)
elif self.sim_size < 100:
assert self.sim_size > 0
sim_n = round(self.sim_size*n_real)
if sim_n < 10:
raise Exception("Simulation size is very small. Consider using a larger value of sim_size")
else:
sim_n = round(self.sim_size)
self.sim_rate = sim_n / df.shape[0]
return int(sim_n)
def _diagnostics(self, x, truth):
val_df = pd.DataFrame({
'pred': self.classifier.predict(x),
'truth': truth
})
self.diagnostics = {
'val_df': val_df,
'auc': auc(val_df),
}
def _validate_data(self, data):
try:
assert isinstance(data, pd.DataFrame)
except:
raise Exception("the data needs to be a pandas.DataFrame")
try:
assert isinstance(data.columns[0], str)
except:
raise Exception("the data column names need to be strings, not " + str(type(df.columns[0])))
def train(self, df, diagnostics=False):
''' Model the density of the data
:param df: (pandas DataFrame)
'''
self._validate_data(df)
self.vp('Training a generative density model on '
+ str(df.shape[0]) + ' samples')
self.initial_density.train(df)
sim_n = self.compute_simulation_size(df)
self.vp('Simulating ' + str(sim_n) +
' fake samples from the model and join it with the real data')
partially_synthetic_data = CadeData(
X=pd.concat([df, self.initial_density.rvs(sim_n)]),
y=np.concatenate([np.ones(df.shape[0]), np.zeros(sim_n)])
)
self.vp('Train the classifier to distinguish real from fake')
self.classifier.train(partially_synthetic_data)
if diagnostics:
self._diagnostics(partially_synthetic_data.X, partially_synthetic_data.y)
if self.verbose:
if not hasattr(self, 'diagnostics'):
self._diagnostics(partially_synthetic_data.X, partially_synthetic_data.y)
AUROC = str(round(self.diagnostics['auc'], 3))
print("In-sample, the classifier had AUROC = " + AUROC)
def density(self, X):
''' Predict the density at new points
Apply equation 2.1 in https://pdfs.semanticscholar.org/e4e6/033069a8569ba16f64da3061538bcb90bec6.pdf
:param X: (pd.DataFrame or numpy array) Must match the exact column order of the `df`
argument that was passed to self.train
'''
self._validate_data(X)
# Initial density estimate
synthetic_dens = self.initial_density.density(X)
# Classifier adjustment factor
p_real = self.classifier.predict(X)
odds_real = p_real/(1 - p_real)
classifier_adjustment=self.sim_rate*odds_real
return synthetic_dens*classifier_adjustment
| [
"sklearn.metrics.roc_curve",
"numpy.zeros",
"numpy.ones",
"sklearn.metrics.auc",
"numpy.sqrt"
] | [((278, 341), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['df.truth.values', 'df.pred.values'], {'pos_label': '(1)'}), '(df.truth.values, df.pred.values, pos_label=1)\n', (295, 341), False, 'from sklearn import metrics\n'), ((353, 374), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (364, 374), False, 'from sklearn import metrics\n'), ((1894, 1942), 'numpy.sqrt', 'np.sqrt', (['(n_real * self.simulation_size_attractor)'], {}), '(n_real * self.simulation_size_attractor)\n', (1901, 1942), True, 'import numpy as np\n'), ((3579, 3599), 'numpy.ones', 'np.ones', (['df.shape[0]'], {}), '(df.shape[0])\n', (3586, 3599), True, 'import numpy as np\n'), ((3601, 3616), 'numpy.zeros', 'np.zeros', (['sim_n'], {}), '(sim_n)\n', (3609, 3616), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
image_proc.py
Particle image velocimetry for simple shear flows
Handles the primary functions
"""
import sys
import argparse
import numpy as np
from PIL import Image
import os
import matplotlib.pyplot as plt
SUCCESS = 0
INVALID_DATA = 1
IO_ERROR = 2
NUMBER_OF_IMAGE = 3
DEF_IMAGE_NAME_A = 'sample_im1.bmp'
DEF_IMAGE_NAME_B = 'sample_im2.bmp'
def warning(*objs):
"""Writes a message to stderr."""
print("WARNING: ", *objs, file=sys.stderr)
def plot_piv(base_f_name, piv_results):
"""
Make a plot of the PIV results
:param base_f_name: str of base output name (without extension)
:param piv_results: piv results, numpy array, with shape (y_position, displacement)
:return: save a png file
"""
plt.plot(piv_results[:,0], piv_results[:,1], 'bs')
plt.title('PIV results')
plt.xlabel('Y position (pixel)')
plt.ylabel('Displacement (pixel)')
out_name = base_f_name + '.png'
plt.savefig(out_name)
print("Wrote file: {}".format(out_name))
def load_image( infilename ) :
"""
Load image into Numpy array
:param infilename: input file name
:return: image_data : image in the form of Numpy array
"""
try:
img = Image.open( infilename )
img.load()
image_data = np.asarray( img, dtype="int32" )
except OSError as e:
warning("Read invalid image:", e)
return None, e
return image_data, SUCCESS
def divid_image(image, division_pixel):
"""
Cut a image into horizontal stripes and compress them into 1D brighness fluctuation profile
Parameters
------------
image : image as a 2D Numpy array
division_pixel : height of individual stripes (unit, pixels)
Returns
------------
image_segments : a list a image segments
y_position : position of image stripes
"""
height = image.shape[0]
index_divid = np.arange(0, height-1, division_pixel)
image_segments = []
y_position = []
if index_divid[-1] != height - 1:
index_divid = np.append(index_divid, height - 1)
num_stripes = index_divid.size - 1
for i in range(num_stripes):
index_a = index_divid[i]
index_b = index_divid[i + 1]
y_position.append((index_a + index_b)/2.0)
stripe = np.mean(image[index_a:index_b, :], axis=0)
stripe -= stripe.mean(); stripe /= stripe.std()
image_segments.append(stripe)
return image_segments, y_position
def x_corr(image_a_segments, image_b_segments):
"""
Calculate the displacement profile.
:param image_a_segments: Horizontal stripes from image 1
:param image_b_segments: Horizontal stripes from image 2
:return: shift : displacement profile
"""
import warnings
warnings.filterwarnings("ignore")
from scipy.signal import correlate
shift = np.zeros(len(image_a_segments))
for i in range(len(image_a_segments)):
y1 = image_a_segments[i]
y2 = image_b_segments[i]
nsamples = y1.shape[0]
xcorr = correlate(y1, y2)
d_shift = np.arange(1-nsamples, nsamples)
shift[i] = -d_shift[xcorr.argmax()]
return shift
def piv_analysis(image_a_path, image_b_path, division_pixel):
"""
Calculate the 1D velocity profile based on a pair of images.
Horizontal direction: flow direction.
Vertical direction: velocity gradient direction.
Upper boundary of image: upper static wall.
Lower boundary of image: lower moving wall.
Parameters
----------
image_a_path : path of image 1
image_b_path : path of image 2
division_pixel : Thickness (number of pixels) of horizontal stripes
Returns
-------
piv_result : displacement profile (column 2) versus y position (column 1)
"""
image_a, ret_a = load_image(image_a_path)
image_b, ret_b = load_image(image_b_path)
if (ret_a!=SUCCESS) or (ret_b!=SUCCESS):
return IO_ERROR
if not image_a.shape == image_b.shape:
warning('Image 1 and image 2 have different sizes')
return INVALID_DATA
image_a_segments, y_position = divid_image(image_a, division_pixel)
y_position = np.asarray(y_position)
image_b_segments = divid_image(image_b, division_pixel)[0]
disp_profile = x_corr(image_a_segments, image_b_segments)
# print(disp_profile)
piv_results = np.vstack((y_position, disp_profile))
return piv_results.T
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser()
# print([DEF_IMAGE_NAME_A, DEF_IMAGE_NAME_B])
parser.add_argument("-m", "--image_file", help="The location of the image files",
default=[DEF_IMAGE_NAME_A, DEF_IMAGE_NAME_B], nargs=2)
parser.add_argument("-d", "--division_pixel", type=int,help="Thickness (number of pixels) of horizontal stripes",
default=5)
# parser.add_argument("-n", "--no_attribution", help="Whether to include attribution",
# action='store_false')
args = None
args = parser.parse_args(argv)
image1_none = not os.path.isfile(args.image_file[0])
image2_none = not os.path.isfile(args.image_file[1])
if image1_none or image2_none:
warning("Either {} or {} does not exist".format(args.image_file[0], args.image_file[1]))
parser.print_help()
return args, IO_ERROR
return args, SUCCESS
def main(argv=None):
args, ret = parse_cmdline(argv)
if ret != SUCCESS:
return ret
image_a_path = args.image_file[0]
image_b_path = args.image_file[1]
division_pixel = args.division_pixel
piv_results = piv_analysis(image_a_path, image_b_path, division_pixel)
image_a_name = os.path.basename(image_a_path)
image_b_name = os.path.basename(image_b_path)
name_p1 = os.path.splitext(image_a_name)[0]
name_p2 = os.path.splitext(image_b_name)[0]
base_f_name = 'piv_results_' + name_p1 + '_' + name_p2
out_name = base_f_name + '.csv'
try:
np.savetxt(out_name, piv_results, delimiter=',')
print("Wrote file: {}".format(out_name))
except ValueError as e:
warning("Data cannot be written to file:", e)
return INVALID_DATA
plot_piv(base_f_name, piv_results)
return SUCCESS # success
if __name__ == "__main__":
status = main()
sys.exit(status)
| [
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"numpy.savetxt",
"numpy.append",
"os.path.basename",
"scipy.signal.correlate",
"numpy.asarray",
"matplotlib.pyplot.ylabel",
"sys.exit",
"numpy.vstack",
"matplotlib.pyplot.plot",
"warning... | [((787, 839), 'matplotlib.pyplot.plot', 'plt.plot', (['piv_results[:, 0]', 'piv_results[:, 1]', '"""bs"""'], {}), "(piv_results[:, 0], piv_results[:, 1], 'bs')\n", (795, 839), True, 'import matplotlib.pyplot as plt\n'), ((842, 866), 'matplotlib.pyplot.title', 'plt.title', (['"""PIV results"""'], {}), "('PIV results')\n", (851, 866), True, 'import matplotlib.pyplot as plt\n'), ((871, 903), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Y position (pixel)"""'], {}), "('Y position (pixel)')\n", (881, 903), True, 'import matplotlib.pyplot as plt\n'), ((908, 942), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Displacement (pixel)"""'], {}), "('Displacement (pixel)')\n", (918, 942), True, 'import matplotlib.pyplot as plt\n'), ((983, 1004), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_name'], {}), '(out_name)\n', (994, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1968), 'numpy.arange', 'np.arange', (['(0)', '(height - 1)', 'division_pixel'], {}), '(0, height - 1, division_pixel)\n', (1937, 1968), True, 'import numpy as np\n'), ((2786, 2819), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2809, 2819), False, 'import warnings\n'), ((4181, 4203), 'numpy.asarray', 'np.asarray', (['y_position'], {}), '(y_position)\n', (4191, 4203), True, 'import numpy as np\n'), ((4373, 4410), 'numpy.vstack', 'np.vstack', (['(y_position, disp_profile)'], {}), '((y_position, disp_profile))\n', (4382, 4410), True, 'import numpy as np\n'), ((4699, 4724), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4722, 4724), False, 'import argparse\n'), ((5927, 5957), 'os.path.basename', 'os.path.basename', (['image_a_path'], {}), '(image_a_path)\n', (5943, 5957), False, 'import os\n'), ((5977, 6007), 'os.path.basename', 'os.path.basename', (['image_b_path'], {}), '(image_b_path)\n', (5993, 6007), False, 'import os\n'), ((6545, 6561), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (6553, 6561), False, 'import sys\n'), ((1252, 1274), 'PIL.Image.open', 'Image.open', (['infilename'], {}), '(infilename)\n', (1262, 1274), False, 'from PIL import Image\n'), ((1317, 1347), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': '"""int32"""'}), "(img, dtype='int32')\n", (1327, 1347), True, 'import numpy as np\n'), ((2071, 2105), 'numpy.append', 'np.append', (['index_divid', '(height - 1)'], {}), '(index_divid, height - 1)\n', (2080, 2105), True, 'import numpy as np\n'), ((2317, 2359), 'numpy.mean', 'np.mean', (['image[index_a:index_b, :]'], {'axis': '(0)'}), '(image[index_a:index_b, :], axis=0)\n', (2324, 2359), True, 'import numpy as np\n'), ((3059, 3076), 'scipy.signal.correlate', 'correlate', (['y1', 'y2'], {}), '(y1, y2)\n', (3068, 3076), False, 'from scipy.signal import correlate\n'), ((3095, 3128), 'numpy.arange', 'np.arange', (['(1 - nsamples)', 'nsamples'], {}), '(1 - nsamples, nsamples)\n', (3104, 3128), True, 'import numpy as np\n'), ((5308, 5342), 'os.path.isfile', 'os.path.isfile', (['args.image_file[0]'], {}), '(args.image_file[0])\n', (5322, 5342), False, 'import os\n'), ((5365, 5399), 'os.path.isfile', 'os.path.isfile', (['args.image_file[1]'], {}), '(args.image_file[1])\n', (5379, 5399), False, 'import os\n'), ((6022, 6052), 'os.path.splitext', 'os.path.splitext', (['image_a_name'], {}), '(image_a_name)\n', (6038, 6052), False, 'import os\n'), ((6070, 6100), 'os.path.splitext', 'os.path.splitext', (['image_b_name'], {}), '(image_b_name)\n', (6086, 6100), False, 'import os\n'), ((6216, 6264), 'numpy.savetxt', 'np.savetxt', (['out_name', 'piv_results'], {'delimiter': '""","""'}), "(out_name, piv_results, delimiter=',')\n", (6226, 6264), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_gp_signals
----------------------------------
Tests for GP signal modules.
"""
import unittest
import numpy as np
import scipy.linalg as sl
from enterprise.pulsar import Pulsar
from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils
from enterprise.signals.selections import Selection
from tests.enterprise_test_data import datadir
@signal_base.function
def create_quant_matrix(toas, dt=1):
U, _ = utils.create_quantization_matrix(toas, dt=dt, nmin=1)
avetoas = np.array([toas[idx.astype(bool)].mean() for idx in U.T])
# return value slightly different than 1 to get around ECORR columns
return U * 1.0000001, avetoas
@signal_base.function
def se_kernel(etoas, log10_sigma=-7, log10_lam=np.log10(30 * 86400)):
tm = np.abs(etoas[None, :] - etoas[:, None])
d = np.eye(tm.shape[0]) * 10 ** (2 * (log10_sigma - 1.5))
return 10 ** (2 * log10_sigma) * np.exp(-(tm ** 2) / 2 / 10 ** (2 * log10_lam)) + d
class TestGPSignals(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim")
def test_ecorr(self):
"""Test that ecorr signal returns correct values."""
# set up signal parameter
ecorr = parameter.Uniform(-10, -5)
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr)
ecm = ec(self.psr)
# parameters
ecorr = -6.4
params = {"B1855+09_basis_ecorr_log10_ecorr": ecorr}
# basis matrix test
U = utils.create_quantization_matrix(self.psr.toas)[0]
msg = "U matrix incorrect for Basis Ecorr signal."
assert np.allclose(U, ecm.get_basis(params)), msg
# Jvec test
jvec = 10 ** (2 * ecorr) * np.ones(U.shape[1])
msg = "Prior vector incorrect for Basis Ecorr signal."
assert np.all(ecm.get_phi(params) == jvec), msg
# inverse Jvec test
msg = "Prior vector inverse incorrect for Basis Ecorr signal."
assert np.all(ecm.get_phiinv(params) == 1 / jvec), msg
# test shape
msg = "U matrix shape incorrect"
assert ecm.get_basis(params).shape == U.shape, msg
def test_ecorr_backend(self):
"""Test that ecorr-backend signal returns correct values."""
# set up signal parameter
ecorr = parameter.Uniform(-10, -5)
selection = Selection(selections.by_backend)
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr, selection=selection)
ecm = ec(self.psr)
# parameters
ecorrs = [-6.1, -6.2, -6.3, -6.4]
params = {
"B1855+09_basis_ecorr_430_ASP_log10_ecorr": ecorrs[0],
"B1855+09_basis_ecorr_430_PUPPI_log10_ecorr": ecorrs[1],
"B1855+09_basis_ecorr_L-wide_ASP_log10_ecorr": ecorrs[2],
"B1855+09_basis_ecorr_L-wide_PUPPI_log10_ecorr": ecorrs[3],
}
# get the basis
bflags = self.psr.backend_flags
Umats = []
for flag in np.unique(bflags):
mask = bflags == flag
Umats.append(utils.create_quantization_matrix(self.psr.toas[mask])[0])
nepoch = sum(U.shape[1] for U in Umats)
U = np.zeros((len(self.psr.toas), nepoch))
jvec = np.zeros(nepoch)
netot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Umats[ct].shape[1]
U[mask, netot : nn + netot] = Umats[ct]
jvec[netot : nn + netot] = 10 ** (2 * ecorrs[ct])
netot += nn
# basis matrix test
msg = "U matrix incorrect for Basis Ecorr-backend signal."
assert np.allclose(U, ecm.get_basis(params)), msg
# Jvec test
msg = "Prior vector incorrect for Basis Ecorr backend signal."
assert np.all(ecm.get_phi(params) == jvec), msg
# inverse Jvec test
msg = "Prior vector inverse incorrect for Basis Ecorr backend signal."
assert np.all(ecm.get_phiinv(params) == 1 / jvec), msg
# test shape
msg = "U matrix shape incorrect"
assert ecm.get_basis(params).shape == U.shape, msg
def test_kernel(self):
log10_sigma = parameter.Uniform(-10, -5)
log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400))
basis = create_quant_matrix(dt=7 * 86400)
prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam)
se = gp_signals.BasisGP(prior, basis, name="se")
sem = se(self.psr)
# parameters
log10_lam, log10_sigma = 7.4, -6.4
params = {"B1855+09_se_log10_lam": log10_lam, "B1855+09_se_log10_sigma": log10_sigma}
# basis check
U, avetoas = create_quant_matrix(self.psr.toas, dt=7 * 86400)
msg = "Kernel Basis incorrect"
assert np.allclose(U, sem.get_basis(params)), msg
# kernel test
K = se_kernel(avetoas, log10_lam=log10_lam, log10_sigma=log10_sigma)
msg = "Kernel incorrect"
assert np.allclose(K, sem.get_phi(params)), msg
# inverse kernel test
Kinv = np.linalg.inv(K)
msg = "Kernel inverse incorrect"
assert np.allclose(Kinv, sem.get_phiinv(params)), msg
def test_kernel_backend(self):
# set up signal parameter
selection = Selection(selections.by_backend)
log10_sigma = parameter.Uniform(-10, -5)
log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400))
basis = create_quant_matrix(dt=7 * 86400)
prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam)
se = gp_signals.BasisGP(prior, basis, selection=selection, name="se")
sem = se(self.psr)
# parameters
log10_sigmas = [-7, -6, -6.4, -8.5]
log10_lams = [8.3, 7.4, 6.8, 5.6]
params = {
"B1855+09_se_430_ASP_log10_lam": log10_lams[0],
"B1855+09_se_430_ASP_log10_sigma": log10_sigmas[0],
"B1855+09_se_430_PUPPI_log10_lam": log10_lams[1],
"B1855+09_se_430_PUPPI_log10_sigma": log10_sigmas[1],
"B1855+09_se_L-wide_ASP_log10_lam": log10_lams[2],
"B1855+09_se_L-wide_ASP_log10_sigma": log10_sigmas[2],
"B1855+09_se_L-wide_PUPPI_log10_lam": log10_lams[3],
"B1855+09_se_L-wide_PUPPI_log10_sigma": log10_sigmas[3],
}
# get the basis
bflags = self.psr.backend_flags
Fmats, fs, phis = [], [], []
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
U, avetoas = create_quant_matrix(self.psr.toas[mask], dt=7 * 86400)
Fmats.append(U)
fs.append(avetoas)
phis.append(se_kernel(avetoas, log10_sigma=log10_sigmas[ct], log10_lam=log10_lams[ct]))
nf = sum(F.shape[1] for F in Fmats)
U = np.zeros((len(self.psr.toas), nf))
K = sl.block_diag(*phis)
Kinv = np.linalg.inv(K)
nftot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Fmats[ct].shape[1]
U[mask, nftot : nn + nftot] = Fmats[ct]
nftot += nn
msg = "Kernel basis incorrect for backend signal."
assert np.allclose(U, sem.get_basis(params)), msg
# spectrum test
msg = "Kernel incorrect for backend signal."
assert np.allclose(sem.get_phi(params), K), msg
# inverse spectrum test
msg = "Kernel inverse incorrect for backend signal."
assert np.allclose(sem.get_phiinv(params), Kinv), msg
def test_fourier_red_noise(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30)
rnm = rn(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
# basis matrix test
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_fourier_red_noise_pshift(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, pshift=True, pseed=42)
rnm = rn(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
# basis matrix test
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30, pshift=True, pseed=42)
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_fourier_red_user_freq_array(self):
"""Test that red noise signal returns correct values with user defined
frequency array."""
# set parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
# set up signal model. use list of frequencies to make basis
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, modes=f2[::2])
rnm = rn(self.psr)
# basis matrix test
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_fourier_red_noise_backend(self):
"""Test that red noise-backend signal returns correct values."""
# set up signal parameter
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
selection = Selection(selections.by_backend)
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30, selection=selection)
rnm = rn(self.psr)
# parameters
log10_As = [-14, -14.4, -15, -14.8]
gammas = [2.3, 4.4, 1.8, 5.6]
params = {
"B1855+09_red_noise_430_ASP_gamma": gammas[0],
"B1855+09_red_noise_430_PUPPI_gamma": gammas[1],
"B1855+09_red_noise_L-wide_ASP_gamma": gammas[2],
"B1855+09_red_noise_L-wide_PUPPI_gamma": gammas[3],
"B1855+09_red_noise_430_ASP_log10_A": log10_As[0],
"B1855+09_red_noise_430_PUPPI_log10_A": log10_As[1],
"B1855+09_red_noise_L-wide_ASP_log10_A": log10_As[2],
"B1855+09_red_noise_L-wide_PUPPI_log10_A": log10_As[3],
}
# get the basis
bflags = self.psr.backend_flags
Fmats, fs, phis = [], [], []
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
F, f = utils.createfourierdesignmatrix_red(self.psr.toas[mask], 30)
Fmats.append(F)
fs.append(f)
phis.append(utils.powerlaw(f, log10_As[ct], gammas[ct]))
nf = sum(F.shape[1] for F in Fmats)
F = np.zeros((len(self.psr.toas), nf))
phi = np.hstack([p for p in phis])
nftot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Fmats[ct].shape[1]
F[mask, nftot : nn + nftot] = Fmats[ct]
nftot += nn
msg = "F matrix incorrect for GP Fourier backend signal."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
msg = "Spectrum incorrect for GP Fourier backend signal."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier backend signal."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_red_noise_add(self):
"""Test that red noise addition only returns independent columns."""
# set up signals
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
cpl = utils.powerlaw(
log10_A=parameter.Uniform(-18, -12)("log10_Agw"), gamma=parameter.Uniform(1, 7)("gamma_gw")
)
# parameters
log10_A, gamma = -14.5, 4.33
log10_Ac, gammac = -15.5, 1.33
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"log10_Agw": log10_Ac,
"gamma_gw": gammac,
}
Tmax = self.psr.toas.max() - self.psr.toas.min()
tpars = [
(30, 20, Tmax, Tmax),
(20, 30, Tmax, Tmax),
(30, 30, Tmax, Tmax),
(30, 20, Tmax, 1.123 * Tmax),
(20, 30, Tmax, 1.123 * Tmax),
(30, 30, 1.123 * Tmax, Tmax),
]
for (nf1, nf2, T1, T2) in tpars:
rn = gp_signals.FourierBasisGP(spectrum=pl, components=nf1, Tspan=T1)
crn = gp_signals.FourierBasisGP(spectrum=cpl, components=nf2, Tspan=T2)
s = rn + crn
rnm = s(self.psr)
# set up frequencies
F1, f1 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=nf1, Tspan=T1)
F2, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=nf2, Tspan=T2)
# test power spectrum
p1 = utils.powerlaw(f1, log10_A, gamma)
p2 = utils.powerlaw(f2, log10_Ac, gammac)
if T1 == T2:
nf = max(2 * nf1, 2 * nf2)
phi = np.zeros(nf)
F = F1 if nf1 > nf2 else F2
phi[: 2 * nf1] = p1
phi[: 2 * nf2] += p2
F[
:,
] # noqa: E231
else:
phi = np.concatenate((p1, p2))
F = np.hstack((F1, F2))
msg = "Combined red noise PSD incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phi(params) == phi), msg
msg = "Combined red noise PSD inverse incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
msg = "Combined red noise Fmat incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.allclose(F, rnm.get_basis(params)), msg
def test_red_noise_add_backend(self):
"""Test that red noise with backend addition only returns
independent columns."""
# set up signals
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
selection = Selection(selections.by_backend)
cpl = utils.powerlaw(
log10_A=parameter.Uniform(-18, -12)("log10_Agw"), gamma=parameter.Uniform(1, 7)("gamma_gw")
)
# parameters
log10_As = [-14, -14.4, -15, -14.8]
gammas = [2.3, 4.4, 1.8, 5.6]
log10_Ac, gammac = -15.5, 1.33
params = {
"B1855+09_red_noise_430_ASP_gamma": gammas[0],
"B1855+09_red_noise_430_PUPPI_gamma": gammas[1],
"B1855+09_red_noise_L-wide_ASP_gamma": gammas[2],
"B1855+09_red_noise_L-wide_PUPPI_gamma": gammas[3],
"B1855+09_red_noise_430_ASP_log10_A": log10_As[0],
"B1855+09_red_noise_430_PUPPI_log10_A": log10_As[1],
"B1855+09_red_noise_L-wide_ASP_log10_A": log10_As[2],
"B1855+09_red_noise_L-wide_PUPPI_log10_A": log10_As[3],
"log10_Agw": log10_Ac,
"gamma_gw": gammac,
}
Tmax = self.psr.toas.max() - self.psr.toas.min()
tpars = [
(30, 20, Tmax, Tmax),
(20, 30, Tmax, Tmax),
(30, 30, Tmax, Tmax),
(30, 20, Tmax, 1.123 * Tmax),
(20, 30, Tmax, 1.123 * Tmax),
(30, 30, 1.123 * Tmax, Tmax),
(30, 20, None, Tmax),
]
for (nf1, nf2, T1, T2) in tpars:
rn = gp_signals.FourierBasisGP(spectrum=pl, components=nf1, Tspan=T1, selection=selection)
crn = gp_signals.FourierBasisGP(spectrum=cpl, components=nf2, Tspan=T2)
s = rn + crn
rnm = s(self.psr)
# get the basis
bflags = self.psr.backend_flags
Fmats, fs, phis = [], [], []
F2, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nf2, Tspan=T2)
p2 = utils.powerlaw(f2, log10_Ac, gammac)
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
F1, f1 = utils.createfourierdesignmatrix_red(self.psr.toas[mask], nf1, Tspan=T1)
Fmats.append(F1)
fs.append(f1)
phis.append(utils.powerlaw(f1, log10_As[ct], gammas[ct]))
Fmats.append(F2)
phis.append(p2)
nf = sum(F.shape[1] for F in Fmats)
F = np.zeros((len(self.psr.toas), nf))
phi = np.hstack([p for p in phis])
nftot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Fmats[ct].shape[1]
F[mask, nftot : nn + nftot] = Fmats[ct]
nftot += nn
F[:, -2 * nf2 :] = F2
msg = "Combined red noise PSD incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phi(params) == phi), msg
msg = "Combined red noise PSD inverse incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
msg = "Combined red noise Fmat incorrect "
msg += "for {} {} {} {}".format(nf1, nf2, T1, T2)
assert np.allclose(F, rnm.get_basis(params)), msg
def test_gp_timing_model(self):
"""Test that the timing model signal returns correct values."""
# set up signal parameter
ts = gp_signals.TimingModel()
tm = ts(self.psr)
# basis matrix test
M = self.psr.Mmat.copy()
norm = np.sqrt(np.sum(M ** 2, axis=0))
M /= norm
params = {}
msg = "M matrix incorrect for Timing Model signal."
assert np.allclose(M, tm.get_basis(params)), msg
# Jvec test
phi = np.ones(self.psr.Mmat.shape[1]) * 1e40
msg = "Prior vector incorrect for Timing Model signal."
assert np.all(tm.get_phi(params) == phi), msg
# inverse Jvec test
msg = "Prior vector inverse incorrect for Timing Model signal."
assert np.all(tm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "M matrix shape incorrect"
assert tm.get_basis(params).shape == self.psr.Mmat.shape, msg
def test_pshift_fourier(self):
"""Test Fourier basis with prescribed phase shifts."""
# build a SignalCollection with timing model and red noise with phase shifts
Tspan = self.psr.toas.max() - self.psr.toas.min()
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(0, 7))
ts = gp_signals.TimingModel()
rn = gp_signals.FourierBasisGP(pl, components=5, Tspan=Tspan, pseed=parameter.Uniform(0, 32768))
s = ts + rn
m = s(self.psr)
b1 = m.signals[1].get_basis()
b2 = utils.createfourierdesignmatrix_red(nmodes=5, Tspan=Tspan)("")(self.psr.toas)[0]
msg = "Fourier bases incorrect (no phase shifts)"
assert np.all(b1 == b2), msg
b1 = m.signals[1].get_basis()
b2 = utils.createfourierdesignmatrix_red(nmodes=5, Tspan=Tspan, pseed=5)("")(self.psr.toas)[0]
msg = "Fourier bases incorrect (no-parameter call vs phase shift 5)"
assert not np.all(b1 == b2), msg
b1 = m.signals[1].get_basis(params={self.psr.name + "_red_noise_pseed": 5})
b2 = utils.createfourierdesignmatrix_red(nmodes=5, Tspan=Tspan, pseed=5)("")(self.psr.toas)[0]
msg = "Fourier bases incorrect (phase shift 5)"
assert np.all(b1 == b2), msg
b1 = m.signals[1].get_basis(params={self.psr.name + "_red_noise_pseed": 5})
b2 = utils.createfourierdesignmatrix_red(nmodes=5, Tspan=Tspan)("")(self.psr.toas)[0]
msg = "Fourier bases incorrect (phase-shift-5 call vs no phase shift)"
assert not np.all(b1 == b2), msg
def test_gp_parameter(self):
"""Test GP basis model with parameterized basis."""
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(0, 7))
basis_env = utils.createfourierdesignmatrix_env(
log10_Amp=parameter.Uniform(-10, -5), t0=parameter.Uniform(4.3e9, 5e9), log10_Q=parameter.Uniform(0, 4)
)
basis_red = utils.createfourierdesignmatrix_red()
rn_env = gp_signals.BasisGP(pl, basis_env, name="env")
rn = gp_signals.BasisGP(pl, basis_red)
s = rn_env + rn
m = s(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
log10_A_env, gamma_env = -14.0, 2.5
log10_Amp, log10_Q, t0 = -7.3, np.log10(345), 55000 * 86400
params = {
"B1855+09_log10_A": log10_A,
"B1855+09_gamma": gamma,
"B1855+09_env_log10_A": log10_A_env,
"B1855+09_env_gamma": gamma_env,
"B1855+09_env_log10_Q": log10_Q,
"B1855+09_env_log10_Amp": log10_Amp,
"B1855+09_env_t0": t0,
}
# get basis
Fred, f2_red = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
Fenv, f2_env = utils.createfourierdesignmatrix_env(
self.psr.toas, nmodes=30, log10_Amp=log10_Amp, log10_Q=log10_Q, t0=t0
)
F = np.hstack((Fenv, Fred))
phi_env = utils.powerlaw(f2_env, log10_A=log10_A_env, gamma=gamma_env)
phi_red = utils.powerlaw(f2_red, log10_A=log10_A, gamma=gamma)
phi = np.concatenate((phi_env, phi_red))
# basis matrix test
msg = "F matrix incorrect for GP Fourier signal."
assert np.allclose(F, m.get_basis(params)), msg
# spectrum test
msg = "Spectrum incorrect for GP Fourier signal."
assert np.all(m.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for GP Fourier signal."
assert np.all(m.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert m.get_basis(params).shape == F.shape, msg
def test_combine_signals(self):
"""Test for combining different signals."""
# set up signal parameter
ecorr = parameter.Uniform(-10, -5)
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr)
pl = utils.powerlaw(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
rn = gp_signals.FourierBasisGP(spectrum=pl, components=30)
log10_sigma = parameter.Uniform(-10, -5)
log10_lam = parameter.Uniform(np.log10(86400), np.log10(1500 * 86400))
basis = create_quant_matrix(dt=7 * 86400)
prior = se_kernel(log10_sigma=log10_sigma, log10_lam=log10_lam)
se = gp_signals.BasisGP(prior, basis, name="se")
ts = gp_signals.TimingModel()
s = ec + rn + ts + se
m = s(self.psr)
# parameters
ecorr = -6.4
log10_A, gamma = -14.5, 4.33
log10_lam, log10_sigma = 7.4, -6.4
params = {
"B1855+09_basis_ecorr_log10_ecorr": ecorr,
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_se_log10_lam": log10_lam,
"B1855+09_se_log10_sigma": log10_sigma,
}
# combined basis matrix
U = utils.create_quantization_matrix(self.psr.toas)[0]
M = self.psr.Mmat.copy()
norm = np.sqrt(np.sum(M ** 2, axis=0))
M /= norm
F, f2 = utils.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
U2, avetoas = create_quant_matrix(self.psr.toas, dt=7 * 86400)
T = np.hstack((U, F, M, U2))
# combined prior vector
jvec = 10 ** (2 * ecorr) * np.ones(U.shape[1])
phim = np.ones(self.psr.Mmat.shape[1]) * 1e40
phi = utils.powerlaw(f2, log10_A=log10_A, gamma=gamma)
K = se_kernel(avetoas, log10_lam=log10_lam, log10_sigma=log10_sigma)
phivec = np.concatenate((jvec, phi, phim))
phi = sl.block_diag(np.diag(phivec), K)
phiinv = np.linalg.inv(phi)
# basis matrix test
msg = "Basis matrix incorrect for combined signal."
assert np.allclose(T, m.get_basis(params)), msg
# Kernal test
msg = "Prior matrix incorrect for combined signal."
assert np.allclose(m.get_phi(params), phi), msg
# inverse Kernel test
msg = "Prior matrix inverse incorrect for combined signal."
assert np.allclose(m.get_phiinv(params), phiinv), msg
# test shape
msg = "Basis matrix shape incorrect size for combined signal."
assert m.get_basis(params).shape == T.shape, msg
def test_gp_common_selection(self):
psr2 = Pulsar(datadir + "/B1937+21_NANOGrav_9yv1.gls.par", datadir + "/B1937+21_NANOGrav_9yv1.tim")
mn = white_signals.MeasurementNoise()
pl = utils.powerlaw(log10_A=parameter.Uniform(-20, -11), gamma=parameter.Uniform(0, 7))
orf = utils.hd_orf()
Tspan = max(self.psr.toas.max(), psr2.toas.max()) - min(self.psr.toas.min(), psr2.toas.min())
prn = gp_signals.FourierBasisGP(pl, components=1, Tspan=Tspan)
rn = gp_signals.FourierBasisCommonGP2(
pl, orf, selection=Selection(selections.by_telescope), components=1, Tspan=Tspan
)
model = mn + prn + rn
pta = signal_base.PTA([model(self.psr), model(psr2)])
telescopes = sorted(np.unique(psr2.telescope))
parnames = [par.name for par in pta.params]
msg = "Per-telescope common-noise parameters not in PTA"
assert all("common_fourier_{}_gamma".format(telescope) in parnames for telescope in telescopes), msg
p0 = parameter.sample(pta.params)
# will throw if there are problems
pta.get_lnlikelihood(params=p0, phiinv_method="sparse")
pta.get_lnprior(params=p0)
# should throw since phiinv_method is not 'sparse'
with self.assertRaises(NotImplementedError):
pta.get_lnlikelihood(params=p0)
msg = "Wrong nonzero element count in Phi matrices"
assert len(pta.pulsarmodels[0].get_phi(p0)) == 2, msg
assert len(pta.pulsarmodels[1].get_phi(p0)) == 6, msg
Phi = pta.get_phi(p0)
assert sum(sum(Phi != 0)) == 12, msg
# determine order of GP components in psr2
b0 = pta.pulsarmodels[1].get_basis()[:, 0] != 0
b1 = pta.pulsarmodels[1].get_basis()[:, 2] != 0
b2 = pta.pulsarmodels[1].get_basis()[:, 4] != 0
# a0 is arecibo/ao since telescopes is sorted
a0 = [np.all(b == (psr2.telescope == telescopes[0])) for b in [b0, b1, b2]]
a1 = [np.all(b == (psr2.telescope == telescopes[1])) for b in [b0, b1, b2]]
msg = "Wrong telescope masks for psr2"
assert sum(a0) == sum(a1) == 1, msg
# check cross-pulsar correlations are in the right place
i = len(pta.pulsarmodels[0].get_phi(p0)) + 2 * a0.index(True)
msg = "Wrong Phi cross terms"
assert Phi[0, i] != 0 and Phi[0, i] == Phi[i, 0], msg
assert Phi[1, i + 1] != 0 and Phi[1, i + 1] == Phi[i + 1, 1], msg
msg = "Discrepant Phi inverse"
assert np.allclose(
pta.get_phiinv(params=p0, method="sparse").toarray(), np.linalg.inv(pta.get_phi(params=p0))
), msg
class TestGPSignalsPint(TestGPSignals):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(
datadir + "/B1855+09_NANOGrav_9yv1.gls.par",
datadir + "/B1855+09_NANOGrav_9yv1.tim",
ephem="DE430",
timing_package="pint",
)
# won't work because one PSR will have telescope == 'arecibo', the other 'ao'
def test_gp_common_selection(self):
pass
| [
"enterprise.signals.utils.create_quantization_matrix",
"numpy.abs",
"enterprise.signals.selections.Selection",
"numpy.sum",
"numpy.ones",
"numpy.exp",
"enterprise.signals.parameter.Uniform",
"enterprise.signals.utils.powerlaw",
"numpy.diag",
"numpy.unique",
"enterprise.signals.gp_signals.TimingM... | [((509, 562), 'enterprise.signals.utils.create_quantization_matrix', 'utils.create_quantization_matrix', (['toas'], {'dt': 'dt', 'nmin': '(1)'}), '(toas, dt=dt, nmin=1)\n', (541, 562), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((812, 832), 'numpy.log10', 'np.log10', (['(30 * 86400)'], {}), '(30 * 86400)\n', (820, 832), True, 'import numpy as np\n'), ((844, 883), 'numpy.abs', 'np.abs', (['(etoas[None, :] - etoas[:, None])'], {}), '(etoas[None, :] - etoas[:, None])\n', (850, 883), True, 'import numpy as np\n'), ((892, 911), 'numpy.eye', 'np.eye', (['tm.shape[0]'], {}), '(tm.shape[0])\n', (898, 911), True, 'import numpy as np\n'), ((1210, 1306), 'enterprise.pulsar.Pulsar', 'Pulsar', (["(datadir + '/B1855+09_NANOGrav_9yv1.gls.par')", "(datadir + '/B1855+09_NANOGrav_9yv1.tim')"], {}), "(datadir + '/B1855+09_NANOGrav_9yv1.gls.par', datadir +\n '/B1855+09_NANOGrav_9yv1.tim')\n", (1216, 1306), False, 'from enterprise.pulsar import Pulsar\n'), ((1441, 1467), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (1458, 1467), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((1481, 1526), 'enterprise.signals.gp_signals.EcorrBasisModel', 'gp_signals.EcorrBasisModel', ([], {'log10_ecorr': 'ecorr'}), '(log10_ecorr=ecorr)\n', (1507, 1526), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((2501, 2527), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (2518, 2527), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((2548, 2580), 'enterprise.signals.selections.Selection', 'Selection', (['selections.by_backend'], {}), '(selections.by_backend)\n', (2557, 2580), False, 'from enterprise.signals.selections import Selection\n'), ((2594, 2660), 'enterprise.signals.gp_signals.EcorrBasisModel', 'gp_signals.EcorrBasisModel', ([], {'log10_ecorr': 'ecorr', 'selection': 'selection'}), '(log10_ecorr=ecorr, selection=selection)\n', (2620, 2660), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((3163, 3180), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (3172, 3180), True, 'import numpy as np\n'), ((3413, 3429), 'numpy.zeros', 'np.zeros', (['nepoch'], {}), '(nepoch)\n', (3421, 3429), True, 'import numpy as np\n'), ((4356, 4382), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (4373, 4382), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((4597, 4640), 'enterprise.signals.gp_signals.BasisGP', 'gp_signals.BasisGP', (['prior', 'basis'], {'name': '"""se"""'}), "(prior, basis, name='se')\n", (4615, 4640), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((5253, 5269), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (5266, 5269), True, 'import numpy as np\n'), ((5463, 5495), 'enterprise.signals.selections.Selection', 'Selection', (['selections.by_backend'], {}), '(selections.by_backend)\n', (5472, 5495), False, 'from enterprise.signals.selections import Selection\n'), ((5518, 5544), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (5535, 5544), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((5760, 5824), 'enterprise.signals.gp_signals.BasisGP', 'gp_signals.BasisGP', (['prior', 'basis'], {'selection': 'selection', 'name': '"""se"""'}), "(prior, basis, selection=selection, name='se')\n", (5778, 5824), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((7038, 7058), 'scipy.linalg.block_diag', 'sl.block_diag', (['*phis'], {}), '(*phis)\n', (7051, 7058), True, 'import scipy.linalg as sl\n'), ((7074, 7090), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (7087, 7090), True, 'import numpy as np\n'), ((7964, 8017), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'pl', 'components': '(30)'}), '(spectrum=pl, components=30)\n', (7989, 8017), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((8241, 8302), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas'], {'nmodes': '(30)'}), '(self.psr.toas, nmodes=30)\n', (8276, 8302), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((8458, 8506), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2'], {'log10_A': 'log10_A', 'gamma': 'gamma'}), '(f2, log10_A=log10_A, gamma=gamma)\n', (8472, 8506), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((9157, 9233), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'pl', 'components': '(30)', 'pshift': '(True)', 'pseed': '(42)'}), '(spectrum=pl, components=30, pshift=True, pseed=42)\n', (9182, 9233), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((9457, 9545), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas'], {'nmodes': '(30)', 'pshift': '(True)', 'pseed': '(42)'}), '(self.psr.toas, nmodes=30, pshift=True,\n pseed=42)\n', (9492, 9545), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((9697, 9745), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2'], {'log10_A': 'log10_A', 'gamma': 'gamma'}), '(f2, log10_A=log10_A, gamma=gamma)\n', (9711, 9745), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((10469, 10530), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas'], {'nmodes': '(30)'}), '(self.psr.toas, nmodes=30)\n', (10504, 10530), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((10710, 10763), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'pl', 'modes': 'f2[::2]'}), '(spectrum=pl, modes=f2[::2])\n', (10735, 10763), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((10975, 11023), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2'], {'log10_A': 'log10_A', 'gamma': 'gamma'}), '(f2, log10_A=log10_A, gamma=gamma)\n', (10989, 11023), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((11690, 11722), 'enterprise.signals.selections.Selection', 'Selection', (['selections.by_backend'], {}), '(selections.by_backend)\n', (11699, 11722), False, 'from enterprise.signals.selections import Selection\n'), ((11736, 11810), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'pl', 'components': '(30)', 'selection': 'selection'}), '(spectrum=pl, components=30, selection=selection)\n', (11761, 11810), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((12977, 13005), 'numpy.hstack', 'np.hstack', (['[p for p in phis]'], {}), '([p for p in phis])\n', (12986, 13005), True, 'import numpy as np\n'), ((16624, 16656), 'enterprise.signals.selections.Selection', 'Selection', (['selections.by_backend'], {}), '(selections.by_backend)\n', (16633, 16656), False, 'from enterprise.signals.selections import Selection\n'), ((19949, 19973), 'enterprise.signals.gp_signals.TimingModel', 'gp_signals.TimingModel', ([], {}), '()\n', (19971, 19973), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((21105, 21129), 'enterprise.signals.gp_signals.TimingModel', 'gp_signals.TimingModel', ([], {}), '()\n', (21127, 21129), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((21486, 21502), 'numpy.all', 'np.all', (['(b1 == b2)'], {}), '(b1 == b2)\n', (21492, 21502), True, 'import numpy as np\n'), ((22027, 22043), 'numpy.all', 'np.all', (['(b1 == b2)'], {}), '(b1 == b2)\n', (22033, 22043), True, 'import numpy as np\n'), ((22743, 22780), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', ([], {}), '()\n', (22778, 22780), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22799, 22844), 'enterprise.signals.gp_signals.BasisGP', 'gp_signals.BasisGP', (['pl', 'basis_env'], {'name': '"""env"""'}), "(pl, basis_env, name='env')\n", (22817, 22844), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22858, 22891), 'enterprise.signals.gp_signals.BasisGP', 'gp_signals.BasisGP', (['pl', 'basis_red'], {}), '(pl, basis_red)\n', (22876, 22891), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((23485, 23546), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas'], {'nmodes': '(30)'}), '(self.psr.toas, nmodes=30)\n', (23520, 23546), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((23570, 23681), 'enterprise.signals.utils.createfourierdesignmatrix_env', 'utils.createfourierdesignmatrix_env', (['self.psr.toas'], {'nmodes': '(30)', 'log10_Amp': 'log10_Amp', 'log10_Q': 'log10_Q', 't0': 't0'}), '(self.psr.toas, nmodes=30, log10_Amp=\n log10_Amp, log10_Q=log10_Q, t0=t0)\n', (23605, 23681), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((23711, 23734), 'numpy.hstack', 'np.hstack', (['(Fenv, Fred)'], {}), '((Fenv, Fred))\n', (23720, 23734), True, 'import numpy as np\n'), ((23753, 23813), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2_env'], {'log10_A': 'log10_A_env', 'gamma': 'gamma_env'}), '(f2_env, log10_A=log10_A_env, gamma=gamma_env)\n', (23767, 23813), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((23832, 23884), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2_red'], {'log10_A': 'log10_A', 'gamma': 'gamma'}), '(f2_red, log10_A=log10_A, gamma=gamma)\n', (23846, 23884), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((23899, 23933), 'numpy.concatenate', 'np.concatenate', (['(phi_env, phi_red)'], {}), '((phi_env, phi_red))\n', (23913, 23933), True, 'import numpy as np\n'), ((24631, 24657), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (24648, 24657), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((24671, 24716), 'enterprise.signals.gp_signals.EcorrBasisModel', 'gp_signals.EcorrBasisModel', ([], {'log10_ecorr': 'ecorr'}), '(log10_ecorr=ecorr)\n', (24697, 24716), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((24827, 24880), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'pl', 'components': '(30)'}), '(spectrum=pl, components=30)\n', (24852, 24880), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((24904, 24930), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (24921, 24930), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((25145, 25188), 'enterprise.signals.gp_signals.BasisGP', 'gp_signals.BasisGP', (['prior', 'basis'], {'name': '"""se"""'}), "(prior, basis, name='se')\n", (25163, 25188), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((25203, 25227), 'enterprise.signals.gp_signals.TimingModel', 'gp_signals.TimingModel', ([], {}), '()\n', (25225, 25227), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((25898, 25959), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas'], {'nmodes': '(30)'}), '(self.psr.toas, nmodes=30)\n', (25933, 25959), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((26043, 26067), 'numpy.hstack', 'np.hstack', (['(U, F, M, U2)'], {}), '((U, F, M, U2))\n', (26052, 26067), True, 'import numpy as np\n'), ((26224, 26272), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2'], {'log10_A': 'log10_A', 'gamma': 'gamma'}), '(f2, log10_A=log10_A, gamma=gamma)\n', (26238, 26272), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((26367, 26400), 'numpy.concatenate', 'np.concatenate', (['(jvec, phi, phim)'], {}), '((jvec, phi, phim))\n', (26381, 26400), True, 'import numpy as np\n'), ((26466, 26484), 'numpy.linalg.inv', 'np.linalg.inv', (['phi'], {}), '(phi)\n', (26479, 26484), True, 'import numpy as np\n'), ((27136, 27232), 'enterprise.pulsar.Pulsar', 'Pulsar', (["(datadir + '/B1937+21_NANOGrav_9yv1.gls.par')", "(datadir + '/B1937+21_NANOGrav_9yv1.tim')"], {}), "(datadir + '/B1937+21_NANOGrav_9yv1.gls.par', datadir +\n '/B1937+21_NANOGrav_9yv1.tim')\n", (27142, 27232), False, 'from enterprise.pulsar import Pulsar\n'), ((27243, 27275), 'enterprise.signals.white_signals.MeasurementNoise', 'white_signals.MeasurementNoise', ([], {}), '()\n', (27273, 27275), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((27387, 27401), 'enterprise.signals.utils.hd_orf', 'utils.hd_orf', ([], {}), '()\n', (27399, 27401), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((27519, 27575), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', (['pl'], {'components': '(1)', 'Tspan': 'Tspan'}), '(pl, components=1, Tspan=Tspan)\n', (27544, 27575), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((28116, 28144), 'enterprise.signals.parameter.sample', 'parameter.sample', (['pta.params'], {}), '(pta.params)\n', (28132, 28144), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((29914, 30048), 'enterprise.pulsar.Pulsar', 'Pulsar', (["(datadir + '/B1855+09_NANOGrav_9yv1.gls.par')", "(datadir + '/B1855+09_NANOGrav_9yv1.tim')"], {'ephem': '"""DE430"""', 'timing_package': '"""pint"""'}), "(datadir + '/B1855+09_NANOGrav_9yv1.gls.par', datadir +\n '/B1855+09_NANOGrav_9yv1.tim', ephem='DE430', timing_package='pint')\n", (29920, 30048), False, 'from enterprise.pulsar import Pulsar\n'), ((983, 1027), 'numpy.exp', 'np.exp', (['(-tm ** 2 / 2 / 10 ** (2 * log10_lam))'], {}), '(-tm ** 2 / 2 / 10 ** (2 * log10_lam))\n', (989, 1027), True, 'import numpy as np\n'), ((1699, 1746), 'enterprise.signals.utils.create_quantization_matrix', 'utils.create_quantization_matrix', (['self.psr.toas'], {}), '(self.psr.toas)\n', (1731, 1746), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((1923, 1942), 'numpy.ones', 'np.ones', (['U.shape[1]'], {}), '(U.shape[1])\n', (1930, 1942), True, 'import numpy as np\n'), ((3482, 3499), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (3491, 3499), True, 'import numpy as np\n'), ((4421, 4436), 'numpy.log10', 'np.log10', (['(86400)'], {}), '(86400)\n', (4429, 4436), True, 'import numpy as np\n'), ((4438, 4460), 'numpy.log10', 'np.log10', (['(1500 * 86400)'], {}), '(1500 * 86400)\n', (4446, 4460), True, 'import numpy as np\n'), ((5583, 5598), 'numpy.log10', 'np.log10', (['(86400)'], {}), '(86400)\n', (5591, 5598), True, 'import numpy as np\n'), ((5600, 5622), 'numpy.log10', 'np.log10', (['(1500 * 86400)'], {}), '(1500 * 86400)\n', (5608, 5622), True, 'import numpy as np\n'), ((6641, 6658), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (6650, 6658), True, 'import numpy as np\n'), ((7143, 7160), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (7152, 7160), True, 'import numpy as np\n'), ((12615, 12632), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (12624, 12632), True, 'import numpy as np\n'), ((12688, 12748), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas[mask]', '(30)'], {}), '(self.psr.toas[mask], 30)\n', (12723, 12748), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((13058, 13075), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (13067, 13075), True, 'import numpy as np\n'), ((14829, 14893), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'pl', 'components': 'nf1', 'Tspan': 'T1'}), '(spectrum=pl, components=nf1, Tspan=T1)\n', (14854, 14893), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((14912, 14977), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'cpl', 'components': 'nf2', 'Tspan': 'T2'}), '(spectrum=cpl, components=nf2, Tspan=T2)\n', (14937, 14977), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((15088, 15160), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas'], {'nmodes': 'nf1', 'Tspan': 'T1'}), '(self.psr.toas, nmodes=nf1, Tspan=T1)\n', (15123, 15160), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((15182, 15254), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas'], {'nmodes': 'nf2', 'Tspan': 'T2'}), '(self.psr.toas, nmodes=nf2, Tspan=T2)\n', (15217, 15254), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((15307, 15341), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f1', 'log10_A', 'gamma'], {}), '(f1, log10_A, gamma)\n', (15321, 15341), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((15359, 15395), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2', 'log10_Ac', 'gammac'], {}), '(f2, log10_Ac, gammac)\n', (15373, 15395), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((17956, 18046), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'pl', 'components': 'nf1', 'Tspan': 'T1', 'selection': 'selection'}), '(spectrum=pl, components=nf1, Tspan=T1, selection=\n selection)\n', (17981, 18046), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((18060, 18125), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', ([], {'spectrum': 'cpl', 'components': 'nf2', 'Tspan': 'T2'}), '(spectrum=cpl, components=nf2, Tspan=T2)\n', (18085, 18125), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((18316, 18381), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas', 'nf2'], {'Tspan': 'T2'}), '(self.psr.toas, nf2, Tspan=T2)\n', (18351, 18381), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((18399, 18435), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f2', 'log10_Ac', 'gammac'], {}), '(f2, log10_Ac, gammac)\n', (18413, 18435), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((18941, 18969), 'numpy.hstack', 'np.hstack', (['[p for p in phis]'], {}), '([p for p in phis])\n', (18950, 18969), True, 'import numpy as np\n'), ((20085, 20107), 'numpy.sum', 'np.sum', (['(M ** 2)'], {'axis': '(0)'}), '(M ** 2, axis=0)\n', (20091, 20107), True, 'import numpy as np\n'), ((20299, 20330), 'numpy.ones', 'np.ones', (['self.psr.Mmat.shape[1]'], {}), '(self.psr.Mmat.shape[1])\n', (20306, 20330), True, 'import numpy as np\n'), ((21746, 21762), 'numpy.all', 'np.all', (['(b1 == b2)'], {}), '(b1 == b2)\n', (21752, 21762), True, 'import numpy as np\n'), ((22326, 22342), 'numpy.all', 'np.all', (['(b1 == b2)'], {}), '(b1 == b2)\n', (22332, 22342), True, 'import numpy as np\n'), ((23082, 23095), 'numpy.log10', 'np.log10', (['(345)'], {}), '(345)\n', (23090, 23095), True, 'import numpy as np\n'), ((24969, 24984), 'numpy.log10', 'np.log10', (['(86400)'], {}), '(86400)\n', (24977, 24984), True, 'import numpy as np\n'), ((24986, 25008), 'numpy.log10', 'np.log10', (['(1500 * 86400)'], {}), '(1500 * 86400)\n', (24994, 25008), True, 'import numpy as np\n'), ((25733, 25780), 'enterprise.signals.utils.create_quantization_matrix', 'utils.create_quantization_matrix', (['self.psr.toas'], {}), '(self.psr.toas)\n', (25765, 25780), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((25840, 25862), 'numpy.sum', 'np.sum', (['(M ** 2)'], {'axis': '(0)'}), '(M ** 2, axis=0)\n', (25846, 25862), True, 'import numpy as np\n'), ((26136, 26155), 'numpy.ones', 'np.ones', (['U.shape[1]'], {}), '(U.shape[1])\n', (26143, 26155), True, 'import numpy as np\n'), ((26171, 26202), 'numpy.ones', 'np.ones', (['self.psr.Mmat.shape[1]'], {}), '(self.psr.Mmat.shape[1])\n', (26178, 26202), True, 'import numpy as np\n'), ((26429, 26444), 'numpy.diag', 'np.diag', (['phivec'], {}), '(phivec)\n', (26436, 26444), True, 'import numpy as np\n'), ((27848, 27873), 'numpy.unique', 'np.unique', (['psr2.telescope'], {}), '(psr2.telescope)\n', (27857, 27873), True, 'import numpy as np\n'), ((28995, 29041), 'numpy.all', 'np.all', (['(b == (psr2.telescope == telescopes[0]))'], {}), '(b == (psr2.telescope == telescopes[0]))\n', (29001, 29041), True, 'import numpy as np\n'), ((29079, 29125), 'numpy.all', 'np.all', (['(b == (psr2.telescope == telescopes[1]))'], {}), '(b == (psr2.telescope == telescopes[1]))\n', (29085, 29125), True, 'import numpy as np\n'), ((7891, 7918), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (7908, 7918), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((7926, 7949), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (7943, 7949), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((9084, 9111), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (9101, 9111), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((9119, 9142), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (9136, 9142), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((10637, 10664), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (10654, 10664), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((10672, 10695), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (10689, 10695), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((11610, 11637), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (11627, 11637), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((11645, 11668), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (11662, 11668), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((12826, 12869), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f', 'log10_As[ct]', 'gammas[ct]'], {}), '(f, log10_As[ct], gammas[ct])\n', (12840, 12869), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((13959, 13986), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (13976, 13986), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((13994, 14017), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (14011, 14017), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((15486, 15498), 'numpy.zeros', 'np.zeros', (['nf'], {}), '(nf)\n', (15494, 15498), True, 'import numpy as np\n'), ((15730, 15754), 'numpy.concatenate', 'np.concatenate', (['(p1, p2)'], {}), '((p1, p2))\n', (15744, 15754), True, 'import numpy as np\n'), ((15775, 15794), 'numpy.hstack', 'np.hstack', (['(F1, F2)'], {}), '((F1, F2))\n', (15784, 15794), True, 'import numpy as np\n'), ((16544, 16571), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (16561, 16571), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((16579, 16602), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (16596, 16602), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((18474, 18491), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (18483, 18491), True, 'import numpy as np\n'), ((18557, 18628), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', (['self.psr.toas[mask]', 'nf1'], {'Tspan': 'T1'}), '(self.psr.toas[mask], nf1, Tspan=T1)\n', (18592, 18628), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((19030, 19047), 'numpy.unique', 'np.unique', (['bflags'], {}), '(bflags)\n', (19039, 19047), True, 'import numpy as np\n'), ((21031, 21058), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (21048, 21058), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((21066, 21089), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(0)', '(7)'], {}), '(0, 7)\n', (21083, 21089), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((21206, 21233), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(0)', '(32768)'], {}), '(0, 32768)\n', (21223, 21233), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22479, 22506), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (22496, 22506), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22514, 22537), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(0)', '(7)'], {}), '(0, 7)\n', (22531, 22537), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22618, 22644), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-10)', '(-5)'], {}), '(-10, -5)\n', (22635, 22644), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22649, 22694), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(4300000000.0)', '(5000000000.0)'], {}), '(4300000000.0, 5000000000.0)\n', (22666, 22694), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22688, 22711), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(0)', '(4)'], {}), '(0, 4)\n', (22705, 22711), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((24754, 24781), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (24771, 24781), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((24789, 24812), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (24806, 24812), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((27313, 27340), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-20)', '(-11)'], {}), '(-20, -11)\n', (27330, 27340), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((27348, 27371), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(0)', '(7)'], {}), '(0, 7)\n', (27365, 27371), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((27654, 27688), 'enterprise.signals.selections.Selection', 'Selection', (['selections.by_telescope'], {}), '(selections.by_telescope)\n', (27663, 27688), False, 'from enterprise.signals.selections import Selection\n'), ((3241, 3294), 'enterprise.signals.utils.create_quantization_matrix', 'utils.create_quantization_matrix', (['self.psr.toas[mask]'], {}), '(self.psr.toas[mask])\n', (3273, 3294), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((14069, 14096), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (14086, 14096), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((14117, 14140), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (14134, 14140), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((16707, 16734), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-12)'], {}), '(-18, -12)\n', (16724, 16734), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((16755, 16778), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(1)', '(7)'], {}), '(1, 7)\n', (16772, 16778), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((18720, 18764), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', (['f1', 'log10_As[ct]', 'gammas[ct]'], {}), '(f1, log10_As[ct], gammas[ct])\n', (18734, 18764), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((21332, 21390), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', ([], {'nmodes': '(5)', 'Tspan': 'Tspan'}), '(nmodes=5, Tspan=Tspan)\n', (21367, 21390), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((21560, 21627), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', ([], {'nmodes': '(5)', 'Tspan': 'Tspan', 'pseed': '(5)'}), '(nmodes=5, Tspan=Tspan, pseed=5)\n', (21595, 21627), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((21866, 21933), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', ([], {'nmodes': '(5)', 'Tspan': 'Tspan', 'pseed': '(5)'}), '(nmodes=5, Tspan=Tspan, pseed=5)\n', (21901, 21933), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n'), ((22147, 22205), 'enterprise.signals.utils.createfourierdesignmatrix_red', 'utils.createfourierdesignmatrix_red', ([], {'nmodes': '(5)', 'Tspan': 'Tspan'}), '(nmodes=5, Tspan=Tspan)\n', (22182, 22205), False, 'from enterprise.signals import gp_signals, white_signals, parameter, selections, signal_base, utils\n')] |
import cv2
import os
import numpy as np
from tqdm import tqdm
def check_dir(directory):
'''
检查路径
:param directory:
:return:
'''
if not os.path.exists(directory):
os.makedirs(directory)
print('Creating directory -', directory)
else:
print('Directory exists -', directory)
def images_Normalization(path, size=(640, 480), rotate=0, color_space=0, img_show=True):
"""
图像数据归一化
:param path:
:param img_show:
:return:
"""
global src
for root, dirs, files in os.walk(path):
for i in tqdm(range(0, len(files))):
name = files[i]
if len(dirs) == 0:
fname = os.path.join(root, name)
print('fname', fname)
print('name', name)
# 处理原图img
img_src = cv2.imread(fname)
# resize调整大小
if size != (0, 0):
src = cv2.resize(img_src, size)
# 旋转角度逆时针rotate=0,1,2,3
src = np.rot90(src, rotate)
# 修改颜色空间
if color_space != 0:
# src = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
src = cv2.cvtColor(src, color_space)
if img_show:
cv2.imshow('src_img', src)
k = cv2.waitKey() & 0xff
if k == 27: return 0
# 创建dst目录并存储图片
src_dir = root + '/dst/'
check_dir(src_dir)
src_path = src_dir + name
cv2.imwrite(src_path, src)
if __name__ == '__main__':
img_path = '/home/linxu/Desktop/matlab/'
color_space = 0
# color_space = cv2.COLOR_RGB2BGR
rotate = 0
# images_Normalization(path=img_path, size=(0, 0),
# rotate=rotate, color_space=color_space,
# img_show=False)
images_Normalization(path=img_path, size=(640, 425),
rotate=rotate, color_space=color_space,
img_show=False) | [
"os.makedirs",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"os.walk",
"os.path.exists",
"cv2.imread",
"numpy.rot90",
"cv2.imshow",
"os.path.join",
"cv2.resize"
] | [((538, 551), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (545, 551), False, 'import os\n'), ((161, 186), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (175, 186), False, 'import os\n'), ((196, 218), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (207, 218), False, 'import os\n'), ((681, 705), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (693, 705), False, 'import os\n'), ((833, 850), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (843, 850), False, 'import cv2\n'), ((1031, 1052), 'numpy.rot90', 'np.rot90', (['src', 'rotate'], {}), '(src, rotate)\n', (1039, 1052), True, 'import numpy as np\n'), ((1566, 1592), 'cv2.imwrite', 'cv2.imwrite', (['src_path', 'src'], {}), '(src_path, src)\n', (1577, 1592), False, 'import cv2\n'), ((942, 967), 'cv2.resize', 'cv2.resize', (['img_src', 'size'], {}), '(img_src, size)\n', (952, 967), False, 'import cv2\n'), ((1207, 1237), 'cv2.cvtColor', 'cv2.cvtColor', (['src', 'color_space'], {}), '(src, color_space)\n', (1219, 1237), False, 'import cv2\n'), ((1288, 1314), 'cv2.imshow', 'cv2.imshow', (['"""src_img"""', 'src'], {}), "('src_img', src)\n", (1298, 1314), False, 'import cv2\n'), ((1339, 1352), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1350, 1352), False, 'import cv2\n')] |
"""
Tools for n-dimensional linear algebra
Vectors are just numpy arrays, as are dense matrices. Sparse matrices
are CSR matrices. Parallel vector and matrix are built on top of those
representations using PETSc.
.. inheritance-diagram:: proteus.LinearAlgebraTools
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
import numpy
import math
import sys
from . import superluWrappers
from . import Comm
from .Comm import globalSum, globalMax
from .superluWrappers import *
from .Profiling import logEvent
from petsc4py import PETSc as p4pyPETSc
# PETSc Matrix Functions
def _petsc_view(obj, filename):
"""Saves petsc object to disk using a PETSc binary viewer.
Parameters
----------
obj : PETSc obj
PETSc4py object to be saved (e.g. vector, matrix, etc)
filename : str
String with PETSc filename
"""
viewer = p4pyPETSc.Viewer().createBinary(filename, 'w')
viewer(obj)
viewer2 = p4pyPETSc.Viewer().createASCII(filename+".m", 'w')
viewer2.pushFormat(1)
viewer2(obj)
viewer2.popFormat()
def petsc_load_matrix(filename):
""" This function loads a PETSc matrix from a binary format.
(Eg. what is saved using the petsc_view function).
Parameters
----------
filename : str
This is the name of the binary with the file stored.
Returns
-------
matrix : petsc4py matrix
The matrix that is stored in the binary file.
"""
try:
viewer = p4pyPETSc.Viewer().createBinary(filename,'r')
output = p4pyPETSc.Mat().load(viewer)
except:
logEvent("Either you've entered an invalid file name or your object is not a matrix (try petsc_load_vector).")
output = None
return output
def petsc_load_vector(filename):
""" This function loads a PETSc vector from a binary format.
(Eg. what is saved using the petsc_view function).
Parameters
----------
filename : str
This is the name of the binary with the file stored.
Returns
-------
matrix : petsc4py vector
The matrix that is stored in the binary file.
"""
try:
viewer = p4pyPETSc.Viewer().createBinary(filename,'r')
output = p4pyPETSc.Vec().load(viewer)
except:
logEvent("Either you've entered an invalid file name or your object is not a vector (try petsc_load_matrix).")
output = None
return output
def petsc_load_IS(filename):
""" This function loads a PETSc index-set from a binary format.
(Eg. what is saved using the petsc_view function).
Parameters
----------
filename : str
This is the name of the binary with the file stored.
Returns
-------
matrix : petsc4py IS
The index-set that is stored in the binary file.
"""
try:
viewer = p4pyPETSc.Viewer().createBinary(filename,'r')
output = p4pyPETSc.IS().load(viewer)
except:
logEvent("Either you've entered an invalid file name or your object is not an index set.")
output = None
return output
def csr_2_petsc(size,csr):
""" Create an petsc4py matrix from size and CSR information.
Parameters:
----------
size : tuple
A 2-tuple with the number of matrix rows and columns.
csr : tuple
A 3-tuple with the sparse matrix csr information.
Returns:
--------
matrix : PETSc4py aij matrix
"""
mat = p4pyPETSc.Mat().create()
mat.setSizes(size = size)
mat.setType('aij')
mat.setUp()
mat.assemblyBegin()
mat.setValuesCSR(csr[0],csr[1],csr[2])
mat.assemblyEnd()
return mat
def _pythonCSR_2_dense(rowptr,colptr,data,nr,nc,output=False):
""" Takes python CSR datatypes and makes a dense matrix """
dense_matrix = numpy.zeros(shape = (nr,nc), dtype='float')
for idx in range(len(rowptr)-1):
row_vals = data[rowptr[idx]:rowptr[idx+1]]
for val_idx,j in enumerate(colptr[rowptr[idx]:rowptr[idx+1]]):
dense_matrix[idx][j] = row_vals[val_idx]
if output is not False:
numpy.save(output,dense_matrix)
return dense_matrix
def superlu_get_rank(sparse_matrix):
""" Returns the rank of a superluWrapper sparse matrix.
Parameters
----------
sparse_matrix : :class:`proteus.superluWrappers.SparseMatrix`
Returns
-------
matrix_rank : int
The rank of the sparse_matrix
Notes
-----
This function is a tool for debugging and should only be used
for small matrices.
"""
A = superlu_sparse_2_dense(sparse_matrix)
return numpy.linalg.matrix_rank(A)
def petsc4py_get_rank(sparse_matrix):
""" Returns the rank of a superluWrapper sparse matrix.
Parameters
----------
sparse_matrix : :class:`p4pyPETSc.Mat`
Returns
-------
matrix_rank : int
The rank of the sparse_matrix
Notes
-----
This function is a debugging tool and should only be used
for small matrices.
"""
A = petsc4py_sparse_2_dense(sparse_matrix)
return numpy.linalg.matrix_rank(A)
def superlu_has_pressure_null_space(sparse_matrix):
"""
Checks whether a superluWrapper sparse matrix has a constant
pressure null space.
Parameters
----------
sparse_matrix : :class:`proteus.superluWrappers.SparseMatrix`
Returns
-------
does : bool
Boolean variable indicating whether the pressure term
creates a null space.
Notes
-----
Assumes interwoven dof.
This function was written mainly for debugging purposes and may be
slow for large matrices.
"""
A = superlu_2_petsc4py(sparse_matrix)
return petsc4py_mat_has_pressure_null_space(A)
def petsc4py_mat_has_pressure_null_space(A):
"""
Checks whether a PETSc4Py sparse matrix has a constant
pressure null space.
Parameters
----------
A : :class:`p4pyPETSc.Mat`
Returns
-------
does : bool
Boolean variable indicating whether the pressure term
creates a null space.
Notes
-----
Assumes interwoven dof.
This function was written mainly for debugging purposes and may be
slow for large matrices.
"""
x = numpy.zeros(A.getSize()[1])
y = numpy.zeros(A.getSize()[1])
x[::3] = 1
x_petsc = p4pyPETSc.Vec().createWithArray(x)
y_petsc = p4pyPETSc.Vec().createWithArray(y)
A.mult(x_petsc,y_petsc)
if y_petsc.norm() < 1e-15:
return True
else:
return False
def superlu_sparse_2_dense(sparse_matrix,output=False):
""" Converts a sparse superluWrapper into a dense matrix.
Parameters
----------
sparse_matrix :
output : str
Out file name to store the matrix.
Returns
-------
dense_matrix : numpy array
A numpy array storing the dense matrix.
Notes
-----
This function should not be used for large matrices.
"""
rowptr = sparse_matrix.getCSRrepresentation()[0]
colptr = sparse_matrix.getCSRrepresentation()[1]
data = sparse_matrix.getCSRrepresentation()[2]
nr = sparse_matrix.shape[0]
nc = sparse_matrix.shape[1]
return _pythonCSR_2_dense(rowptr,colptr,data,nr,nc,output)
def petsc4py_sparse_2_dense(sparse_matrix,output=False):
""" Converts a PETSc4Py matrix to a dense numpyarray.
Parameters
----------
sparse_matrix : PETSc4py matrix
output : str
Output file name to store the matrix.
Returns
-------
dense_matrix : numpy array
A numpy array with the dense matrix.
Notes
-----
This function is very inefficient for large matrices.
"""
rowptr = sparse_matrix.getValuesCSR()[0]
colptr = sparse_matrix.getValuesCSR()[1]
data = sparse_matrix.getValuesCSR()[2]
nr = sparse_matrix.getSize()[0]
nc = sparse_matrix.getSize()[1]
return _pythonCSR_2_dense(rowptr,colptr,data,nr,nc,output)
def superlu_2_petsc4py(sparse_superlu):
""" Copy a sparse superlu matrix to a sparse petsc4py matrix
Parameters
----------
sparse_superlu : :class:`proteus.superluWrappers.SparseMatrix`
Returns
-------
sparse_matrix : :class: `p4pyPETSc.Mat`
"""
comm = Comm.get()
if comm.size() > 1:
rowptr,colind,nzval = sparse_superlu.getCSRrepresentation()
A_petsc4py = ParMat_petsc4py.create_ParMat_from_OperatorConstructor(sparse_superlu)
else:
rowptr, colind, nzval = sparse_superlu.getCSRrepresentation()
A_rowptr = rowptr.copy()
A_colind = colind.copy()
A_nzval = nzval.copy()
nr = sparse_superlu.shape[0]
nc = sparse_superlu.shape[1]
A_petsc4py = p4pyPETSc.Mat().createAIJWithArrays((nr,nc),
(A_rowptr,
A_colind,
A_nzval))
return A_petsc4py
def petsc_create_diagonal_inv_matrix(sparse_petsc):
""" Create an inverse diagonal petsc4py matrix from input matrix.
Parameters
----------
sparse_petsc : :class:`p4pyPETSc.Mat`
Returns
-------
sparse_matrix : :class:`p4pyPETSc.Mat`
"""
diag_inv = p4pyPETSc.Mat().create()
diag_inv.setSizes(sparse_petsc.getSizes())
diag_inv.setType('aij')
diag_inv.setUp()
diag_inv.setDiagonal(old_div(1.,sparse_petsc.getDiagonal()))
return diag_inv
def dense_numpy_2_petsc4py(dense_numpy, eps = 1.e-12):
""" Create a sparse petsc4py matrix from a dense numpy matrix.
Note - This routine has been built mainly to support testing.
It would be rare for this routine to be useful for most applications.
Parameters
----------
dense_numpy :
eps : float
Tolerance for non-zero values.
Returns
-------
sparse_matrix : PETSc4py matrix
"""
vals = []
colptr = []
rowptr = [0]
rowptr_track = 0
for i,row in enumerate(dense_numpy):
for j,val in enumerate(row):
if abs(val) > eps:
vals.append(val)
colptr.append(j)
rowptr_track += 1
rowptr.append(rowptr_track)
return p4pyPETSc.Mat().createAIJ(size=dense_numpy.shape,
csr = (rowptr, colptr, vals))
def csr_2_petsc_mpiaij(size,csr):
""" Create an MPIaij petsc4py matrix from size and CSR information.
Parameters:
----------
size : tuple
Two entires: (num_rows, num_cols)
csr : tuple
(row_idx, col_idx, vals)
Returns:
--------
matrix : PETSc4py MPIaij matrix
"""
mat = p4pyPETSc.Mat().create()
mat.setSizes(size = size)
mat.setType('mpiaij')
mat.setUp()
mat.assemblyBegin()
mat.setValuesCSR(csr[0],csr[1],csr[2])
mat.assemblyEnd()
return mat
def split_PETSc_Mat(mat):
""" Decompose a PETSc matrix into a symmetric and skew-symmetric
matrix
Parameters:
----------
mat : :class: `PETSc4py Matrix`
Returns:
--------
H : :class: `PETSc4py Matrix`
Symmetric (or Hermitian) component of mat
S : :class: `PETSc4py Matrix`
Skew-Symmetric (or skew-Hermitian) component of mat
"""
H = mat.copy()
H.zeroEntries()
H.axpy(1.0,mat)
H.axpy(1.0,mat.transpose())
H.scale(0.5)
S = mat.copy()
S.zeroEntries()
S.axpy(1.0,mat)
S.aypx(-1.0,mat.transpose())
S.scale(0.5)
return H, S
class ParVec_petsc4py(p4pyPETSc.Vec):
"""
Parallel vector using petsc4py's wrappers for PETSc
Parameters
----------
array : numpy_array
A numpy array with size equal to the number of locally
owned unknowns plus the number of local ghost cells.
bs : int
Block size.
n : int
The number of locally owned unknowns
N : int
The number of unknowns in the global system
nghosts : int
The number of ghost nodes for the process.
subdomain2global : numpy array
Map from the process unknowns to the global
uknowns.
blockVecType : str
ghosts : numpy array
A numpy array with the local process uknowns that are
ghost nodes.
proteus2petsc_subdomain : numpy array
A numpy array that serves as a map from the proteus
uknown ordering to the petsc uknown ordering
petsc2proteus_subdomain : numpy array
A numpy array that serves as a map from the petsc uknown
ordering to the proteus unknown ordering
"""
def __init__(self,array=None,bs=None,n=None,N=None,nghosts=None,subdomain2global=None,blockVecType="simple",ghosts=None,
proteus2petsc_subdomain=None,
petsc2proteus_subdomain=None):
p4pyPETSc.Vec.__init__(self)
if array is None:
return#when duplicating for petsc usage
self.proteus2petsc_subdomain=proteus2petsc_subdomain
self.petsc2proteus_subdomain=petsc2proteus_subdomain
blockSize = max(1,bs)
self.dim_proc = n*blockSize
self.nghosts = nghosts
self.blockVecType = blockVecType
assert self.blockVecType == "simple", "petsc4py wrappers require self.blockVecType=simple"
self.proteus_array = array
if nghosts is None:
if blockVecType == "simple":
self.createWithArray(array,size=(blockSize*n,blockSize*N),bsize=1)
else:
self.createWithArray(array,size=(blockSize*n,blockSize*N),bsize=blockSize)
self.subdomain2global=subdomain2global
self.petsc_l2g = None
self.setUp()
else:
assert nghosts >= 0, "The number of ghostnodes must be non-negative"
assert subdomain2global.shape[0] == (n+nghosts), ("The subdomain2global map is the wrong length n=%i,nghosts=%i,shape=%i \n" % (n,n+nghosts,subdomain2global.shape[0]))
assert len(array.flat) == (n+nghosts)*blockSize, "%i != (%i+%i)*%i \n" % (len(array.flat), n,nghosts,blockSize)
if blockVecType == "simple":
if ghosts is None:
ghosts = numpy.zeros((blockSize*nghosts),'i')
for j in range(blockSize):
ghosts[j::blockSize]=subdomain2global[n:]*blockSize+j
self.createGhostWithArray(ghosts,array,size=(blockSize*n,blockSize*N),bsize=1)
if blockSize > 1: #have to build in block dofs
subdomain2globalTotal = numpy.zeros((blockSize*subdomain2global.shape[0],),'i')
for j in range(blockSize):
subdomain2globalTotal[j::blockSize]=subdomain2global*blockSize+j
self.subdomain2global=subdomain2globalTotal
else:
self.subdomain2global=subdomain2global
else:
#TODO need to debug
ghosts = subdomain2global[n:]
self.createGhostWithArray(ghosts,array,size=(blockSize*n,blockSize*N),bsize=blockSize)
self.subdomain2global = subdomain2global
self.setUp()
#self.petsc_l2g = p4pyPETSc.LGMap()
#self.petsc_l2g.create(self.subdomain2global)
#self.setLGMap(self.petsc_l2g)
self.setFromOptions()
def scatter_forward_insert(self):
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.petsc2proteus_subdomain]
self.ghostUpdateBegin(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
self.ghostUpdateEnd(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.proteus2petsc_subdomain]
def scatter_reverse_add(self):
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.petsc2proteus_subdomain]
self.ghostUpdateBegin(p4pyPETSc.InsertMode.ADD_VALUES,p4pyPETSc.ScatterMode.REVERSE)
self.ghostUpdateEnd(p4pyPETSc.InsertMode.ADD_VALUES,p4pyPETSc.ScatterMode.REVERSE)
if self.proteus2petsc_subdomain is not None:
self.proteus_array[:] = self.proteus_array[self.proteus2petsc_subdomain]
def save(self, filename):
"""Saves to disk using a PETSc binary viewer."""
_petsc_view(self, filename)
class ParInfo_petsc4py(object):
"""
ARB - this class is experimental. My idea is to store the
information need to constructor parallel vectors and matrices
here as static class values. Then ParVec and ParMat can
use these values to create parallel objects later.
"""
def __init__(self):
self.par_bs = None
self.par_n = None
self.par_n_lst = None
self.par_N = None
self.par_nghost = None
self.par_nghost_lst = None
self.petsc_subdomain2global_petsc = None
self.subdomain2global = None
self.proteus2petsc_subdomain = None
self.petsc2proteus_subdomain = None
self.nzval_proteus2petsc = None
self.dim = None
self.mixed = False
def print_info(cls):
from . import Comm
comm = Comm.get()
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_bs = ' + repr(cls.par_bs))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_n = ' + repr(cls.par_n))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_n_lst = ' + repr(cls.par_n_lst))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_N = ' + repr(cls.par_N))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_nghost = ' + repr(cls.par_nghost))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' par_nghost_lst = ' + repr(cls.par_nghost_lst))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' petsc_subdomain2global_petsc = ' + repr(cls.petsc_subdomain2global_petsc))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' subdomain2global = ' + repr(cls.subdomain2global))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' proteus2petsc_subdomain = ' + repr(cls.proteus2petsc_subdomain))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' petsc2proteus_subomdain = ' + repr(cls.petsc2proteus_subdomain))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' dim = ' + repr(cls.dim))
logEvent('comm.rank() = ' + repr(comm.rank()) + ' nzval_proteus2petsc = ' + repr(cls.nzval_proteus2petsc))
class ParMat_petsc4py(p4pyPETSc.Mat):
""" Parallel matrix based on petsc4py's wrappers for PETSc.
ghosted_csr_mat : :class:`proteus.superluWrappers.SparseMatrix`
Primary CSR information for the ParMat.
par_bs : int
The block size.
par_n : int
The number of locally owned unknowns.
par_N : int
The number of global unknowns.
par_nghost : int
The number of locally owned ghost unknowns.
subdomain2global : :class:`numpy.ndarray`
A map from the local unknown to the global unknown.
blockVecType : str
pde : :class:`proteus.Transport.OneLevelTransport`
The Transport class defining the problem.
par_nc : int
par_Nc : int
proteus_jacobian : :class:`proteus.superluWrappers.SparseMatrix`
Jacobian generated by Transport class's initializeJacobian.
nzval_proteus2petsc : :class:`numpy.ndarray`
Array with index permutations for mapping between
proteus and petsc degrees of freedom.
"""
def __init__(self,
ghosted_csr_mat=None,
par_bs=None,
par_n=None,
par_N=None,
par_nghost=None,
subdomain2global=None,
blockVecType="simple",
pde=None,
par_nc=None,
par_Nc=None,
proteus_jacobian=None,
nzval_proteus2petsc=None):
p4pyPETSc.Mat.__init__(self)
if ghosted_csr_mat is None:
return#when duplicating for petsc usage
self.pde = pde
if par_nc is None:
par_nc = par_n
if par_Nc is None:
par_Nc = par_N
self.proteus_jacobian=proteus_jacobian
self.nzval_proteus2petsc = nzval_proteus2petsc
self.ghosted_csr_mat=ghosted_csr_mat
self.blockVecType = blockVecType
assert self.blockVecType == "simple", "petsc4py wrappers require self.blockVecType=simple"
self.create(p4pyPETSc.COMM_WORLD)
self.blockSize = max(1,par_bs)
if self.blockSize > 1 and blockVecType != "simple":
## \todo fix block aij in ParMat_petsc4py
self.setType('mpibaij')
self.setSizes([[self.blockSize*par_n,self.blockSize*par_N],[self.blockSize*par_nc,self.blockSize*par_Nc]],bsize=self.blockSize)
self.setBlockSize(self.blockSize)
self.subdomain2global = subdomain2global #no need to include extra block dofs?
else:
self.setType('aij')
self.setSizes([[par_n*self.blockSize,par_N*self.blockSize],[par_nc*self.blockSize,par_Nc*self.blockSize]],bsize=1)
if self.blockSize > 1: #have to build in block dofs
subdomain2globalTotal = numpy.zeros((self.blockSize*subdomain2global.shape[0],),'i')
for j in range(self.blockSize):
subdomain2globalTotal[j::self.blockSize]=subdomain2global*self.blockSize+j
self.subdomain2global=subdomain2globalTotal
else:
self.subdomain2global=subdomain2global
from proteus import Comm
comm = Comm.get()
logEvent("ParMat_petsc4py comm.rank= %s blockSize = %s par_n= %s par_N=%s par_nghost=%s par_jacobian.getSizes()= %s "
% (comm.rank(),self.blockSize,par_n,par_N,par_nghost,self.getSizes()))
self.csr_rep = ghosted_csr_mat.getCSRrepresentation()
if self.proteus_jacobian is not None:
self.proteus_csr_rep = self.proteus_jacobian.getCSRrepresentation()
if self.blockSize > 1:
blockOwned = self.blockSize*par_n
self.csr_rep_local = ghosted_csr_mat.getSubMatCSRrepresentation(0,blockOwned)
else:
self.csr_rep_local = ghosted_csr_mat.getSubMatCSRrepresentation(0,par_n)
self.petsc_l2g = p4pyPETSc.LGMap()
self.petsc_l2g.create(self.subdomain2global)
self.setUp()
self.setLGMap(self.petsc_l2g)
#
self.colind_global = self.petsc_l2g.apply(self.csr_rep_local[1]) #prealloc needs global indices
self.setPreallocationCSR([self.csr_rep_local[0],self.colind_global,self.csr_rep_local[2]])
self.setFromOptions()
@classmethod
def create_ParMat_from_OperatorConstructor(cls,
operator):
""" Build a ParMat consistent with the problem from an Operator
constructor matrix.
Arguments
---------
operator : :class:`proteus.superluWrappers.SparseMatrix`
Matrix to be turned into a parallel petsc matrix.
"""
par_bs = ParInfo_petsc4py.par_bs
par_n = ParInfo_petsc4py.par_n
par_N = ParInfo_petsc4py.par_N
par_nghost = ParInfo_petsc4py.par_nghost
petsc_subdomain2global_petsc = ParInfo_petsc4py.petsc_subdomain2global_petsc
subdomain2global = ParInfo_petsc4py.subdomain2global
petsc2proteus_subdomain = ParInfo_petsc4py.petsc2proteus_subdomain
proteus2petsc_subdomain = ParInfo_petsc4py.proteus2petsc_subdomain
dim = ParInfo_petsc4py.dim
# ARB - this is largely copied from Transport.py,
# a refactor should be done to elimate this duplication
rowptr, colind, nzval = operator.getCSRrepresentation()
rowptr_petsc = rowptr.copy()
colind_petsc = colind.copy()
nzval_petsc = nzval.copy()
nzval_proteus2petsc = colind.copy()
nzval_petsc2proteus = colind.copy()
rowptr_petsc[0] = 0
for i in range(par_n+par_nghost):
start_proteus = rowptr[petsc2proteus_subdomain[i]]
end_proteus = rowptr[petsc2proteus_subdomain[i]+1]
nzrow = end_proteus - start_proteus
rowptr_petsc[i+1] = rowptr_petsc[i] + nzrow
start_petsc = rowptr_petsc[i]
end_petsc = rowptr_petsc[i+1]
petsc_cols_i = proteus2petsc_subdomain[colind[start_proteus:end_proteus]]
j_sorted = petsc_cols_i.argsort()
colind_petsc[start_petsc:end_petsc] = petsc_cols_i[j_sorted]
nzval_petsc[start_petsc:end_petsc] = nzval[start_proteus:end_proteus][j_sorted]
for j_petsc, j_proteus in zip(numpy.arange(start_petsc,end_petsc),
numpy.arange(start_proteus,end_proteus)[j_sorted]):
nzval_petsc2proteus[j_petsc] = j_proteus
nzval_proteus2petsc[j_proteus] = j_petsc
proteus_a = {}
petsc_a = {}
for i in range(dim):
for j,k in zip(colind[rowptr[i]:rowptr[i+1]],list(range(rowptr[i],rowptr[i+1]))):
proteus_a[i,j] = nzval[k]
petsc_a[proteus2petsc_subdomain[i],proteus2petsc_subdomain[j]] = nzval[k]
for i in range(dim):
for j,k in zip(colind_petsc[rowptr_petsc[i]:rowptr_petsc[i+1]],list(range(rowptr_petsc[i],rowptr_petsc[i+1]))):
nzval_petsc[k] = petsc_a[i,j]
#additional stuff needed for petsc par mat
petsc_jacobian = SparseMat(dim,dim,nzval_petsc.shape[0], nzval_petsc, colind_petsc, rowptr_petsc)
return cls(petsc_jacobian,
par_bs,
par_n,
par_N,
par_nghost,
petsc_subdomain2global_petsc,
proteus_jacobian = operator,
nzval_proteus2petsc=nzval_proteus2petsc)
def save(self, filename):
"""Saves to disk using a PETSc binary viewer. """
_petsc_view(self, filename)
def Vec(n):
"""
Build a vector of length n (using numpy)
For example::
>>> Vec(3)
array([ 0., 0., 0.])
"""
return numpy.zeros((n,),'d')
def Mat(m,n):
"""
Build an m x n matrix (using numpy)
For example::
>>> Mat(2,3)
array([[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
return numpy.zeros((m,n),'d')
def SparseMatFromDict(nr,nc,aDict):
"""
Build a nr x nc sparse matrix from a dictionary representation
"""
from . import superluWrappers
indeces = list(aDict.keys())
indeces.sort()
nnz = len(indeces)
nzval = numpy.zeros((nnz,),'d')
rowptr = numpy.zeros((nr+1,),'i')
colind = numpy.zeros((nnz,),'i')
i=0
k=0
rowptr[i]=0
for ij in indeces:
nzval[k] = aDict[ij]
colind[k] = ij[1]
if ij[0] > i:
i += 1
rowptr[i]=k
k+=1
rowptr[i+1] = k
return (SparseMat(nr,nc,nnz,nzval,colind,rowptr),nzval)
def SparseMat(nr,nc,nnz,nzval,colind,rowptr):
""" Build a nr x nc sparse matrix from the CSR data structures
Parameters
----------
nr : int
The number of rows.
nc : int
The number of columns.
nnz : int
The number of non-zero matrix entries.
nzval : numpy array
Array with non-zero matrix entries.
colind : numpy array of 32bit integers
CSR column array.
rowptr : numpy array of 32bit integers
CSR row pointer.
Returns
-------
sparse_matrix : :class:`proteus.superluWrappers.SparseMatrix`
superlu sparse matrix in CSR format.
Note
----
For the superluWrapper, both the colind and rowptr should use
32-bit integer data types.
"""
if (colind.dtype != 'int32' or rowptr.dtype != 'int32'):
print('ERROR - colind and rowptr must be "int32" numpy arrays for ' \
'superluWrappers')
sys.exit(1)
return superluWrappers.SparseMatrix(nr,nc,nnz,nzval,colind,rowptr)
class SparseMatShell(object):
""" Build a parallel matrix shell from CSR data structures.
Parameters
----------
ghosted_csr_mat: :class: `proteus.superluWrappers.SparseMatrix`
"""
def __init__(self,ghosted_csr_mat):
self.ghosted_csr_mat=ghosted_csr_mat
self.par_b = None
self.xGhosted = None
self.yGhosted = None
def create(self, A):
pass
def mult(self, A, x, y):
assert self.par_b is not None, "The parallel RHS vector par_b must be " \
"initialized before using the mult function"
logEvent("Using SparseMatShell in LinearSolver matrix multiply")
if self.xGhosted is None:
self.xGhosted = self.par_b.duplicate()
self.yGhosted = self.par_b.duplicate()
self.xGhosted.setArray(x.getArray())
self.xGhosted.ghostUpdateBegin(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
self.xGhosted.ghostUpdateEnd(p4pyPETSc.InsertMode.INSERT,p4pyPETSc.ScatterMode.FORWARD)
self.yGhosted.zeroEntries()
with self.xGhosted.localForm() as xlf, self.yGhosted.localForm() as ylf:
self.ghosted_csr_mat.matvec(xlf.getArray(),ylf.getArray())
y.setArray(self.yGhosted.getArray())
class OperatorShell(object):
""" A base class for operator shells """
def __init__(self):
pass
def create(self,A):
pass
def getSize(self):
"""
Return the number of degrees of freedom for the operator.
"""
raise NotImplementedError('You need to define a getSize ' \
'method for your shell')
class ProductOperatorShell(OperatorShell):
""" A base class for shell operators that apply multiplcation.
Operators derived from this class should have working multiplication
functions.
"""
def __init__(self):
pass
def mult(self, A, x, y):
raise NotImplementedError('You need to define a multiply' \
'function for your shell')
class InvOperatorShell(OperatorShell):
""" A base class for inverse operator shells
Operators derived from this class should have working apply
functions.
"""
def __init__(self):
pass
@staticmethod
def _create_tmp_vec(size):
""" Creates an empty vector of given size.
Arguments
---------
size : int
Size of the temporary vector.
Returns
-------
vec : PETSc vector
"""
tmp = p4pyPETSc.Vec().create()
tmp.setType('mpi')
tmp.setSizes(size)
return tmp
@staticmethod
def _create_copy_vec(vec):
""" Creates a copy of a petsc4py vector.
Parameters
----------
vec : :class:`petsc4py.Vec`
Returns
-------
tmp : :class:`petsc4py.Vec`
"""
tmp = p4pyPETSc.Vec().create()
tmp.setType('mpi')
tmp = vec.copy()
return tmp
def apply(self, A, x, y):
raise NotImplementedError('You need to define an apply' \
'method for your shell')
def getSize(self):
""" Returns the size of InvOperatorShell.
Notes
-----
This acts a virtual method and must be implemented for
all inherited classes.
"""
raise NotImplementedError()
def create_petsc_ksp_obj(self,
petsc_option_prefix,
matrix_operator,
constant_null_space = False):
""" Create a PETSc4py KSP object.
Arguments
---------
petsc_option_prefix : str
PETSc commandline option prefix.
matrix_operator : mat
PETSc matrix object for the ksp class.
null_space : bool
True if the KSP object has a constant null space.
Returns
-------
ksp_obj : PETSc ksp
"""
ksp_obj = p4pyPETSc.KSP().create()
ksp_obj.setOperators(matrix_operator,
matrix_operator)
ksp_obj.setOptionsPrefix(petsc_option_prefix)
if constant_null_space:
const_nullspace_str = ''.join([petsc_option_prefix,
'ksp_constant_null_space'])
self.options.setValue(const_nullspace_str,'')
matrix_operator.setNullSpace(self.const_null_space)
ksp_obj.setFromOptions()
ksp_obj.setUp()
return ksp_obj
def _create_constant_nullspace(self):
"""Initialize a constant null space. """
self.const_null_space = p4pyPETSc.NullSpace().create(comm=p4pyPETSc.COMM_WORLD,
vectors = (),
constant = True)
def _set_dirichlet_idx_set(self):
"""
Initialize an index set of non-Dirichlet degrees of freedom.
When the value of some degrees of freedom are known in
advance it can be helfpul to remove these degrees of
freedom from the inverse operator. This function
creates a PETSc4py index set of unknown degrees of freedom.
"""
comm = Comm.get()
# Assign number of unknowns
num_dof = self.getSize()
self.strong_dirichlet_DOF = [i for i in self.strong_dirichlet_DOF if i< num_dof]
try:
num_known_dof = len(self.strong_dirichlet_DOF)
except AttributeError:
print("ERROR - strong_dirichlet_DOF have not been " \
" assigned for this inverse operator object.")
exit()
num_unknown_dof = num_dof - num_known_dof
# Use boolean mask to collect unknown DOF indices
self.dof_indices = numpy.arange(num_dof,
dtype = 'int32')
known_dof_mask = numpy.ones(num_dof,
dtype = bool)
known_dof_mask[self.strong_dirichlet_DOF] = False
self.unknown_dof_indices = self.dof_indices[known_dof_mask]
self.known_dof_indices = self.dof_indices[~known_dof_mask]
if comm.size() == 1:
# Create PETSc4py index set of unknown DOF
self.known_dof_is = p4pyPETSc.IS()
self.known_dof_is.createGeneral(self.known_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
self.unknown_dof_is = p4pyPETSc.IS()
self.unknown_dof_is.createGeneral(self.unknown_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
elif comm.size() > 1:
self.global_known_dof_indices = [self.par_info.subdomain2global[i] for i in self.known_dof_indices]
self.global_unknown_dof_indices = [self.par_info.subdomain2global[i] for i in self.unknown_dof_indices]
self.known_dof_is = p4pyPETSc.IS()
self.known_dof_is.createGeneral(self.global_known_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
self.unknown_dof_is = p4pyPETSc.IS()
self.unknown_dof_is.createGeneral(self.global_unknown_dof_indices,
comm=p4pyPETSc.COMM_WORLD)
def _converged_trueRes(self,ksp,its,rnorm):
""" Function handle to feed to ksp's setConvergenceTest """
ksp.buildResidual(self.r_work)
truenorm = self.r_work.norm()
if its == 0:
self.rnorm0 = truenorm
# ARB - Leaving these log events in for future debugging purposes.
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual: %12.5e" %(truenorm) )
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual(relative): %12.5e" %(truenorm / self.rnorm0) )
# logEvent(" KSP it %i norm(r) = %e norm(r)/|b| = %e ; atol=%e rtol=%e " % (its,
# truenorm,
# (truenorm/ self.rnorm0),
# ksp.atol,
# ksp.rtol))
return False
else:
# ARB - Leaving these log events in for future debugging purposes.
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual: %12.5e" %(truenorm) )
# logEvent("NumericalAnalytics KSP_LSC_LaplaceResidual(relative): %12.5e" %(truenorm / self.rnorm0) )
# logEvent(" KSP it %i norm(r) = %e norm(r)/|b| = %e ; atol=%e rtol=%e " % (its,
# truenorm,
# (truenorm/ self.rnorm0),
# ksp.atol,
# ksp.rtol))
if truenorm < self.rnorm0*ksp.rtol:
return p4pyPETSc.KSP.ConvergedReason.CONVERGED_RTOL
if truenorm < ksp.atol:
return p4pyPETSc.KSP.ConvergedReason.CONVERGED_ATOL
return False
class LSCInv_shell(InvOperatorShell):
""" Shell class for the LSC Inverse Preconditioner
This class creates a shell for the least-squares commutator (LSC)
preconditioner, where
:math:`M_{s}= (B \hat{Q^{-1}_{v}} B^{'}) (B \hat{Q^{-1}_{v}} F
\hat{Q^{-1}_{v}} B^{'})^{-1} (B \hat{Q^{-1}_{v}} B^{'})`
is used to approximate the Schur complement.
"""
def __init__(self, Qv, B, Bt, F):
"""Initializes the LSC inverse operator.
Parameters
----------
Qv : petsc4py matrix object
The diagonal elements of the velocity mass matrix.
B : petsc4py matrix object
The discrete divergence operator.
Bt : petsc4py matrix object
The discrete gradient operator.
F : petsc4py matrix object
The A-block of the linear system.
"""
# TODO - Find a good way to assert that Qv is diagonal
self.Qv = Qv
self.B = B
self.Bt = Bt
self.F = F
self._constructBQinvBt()
self._options = p4pyPETSc.Options()
if self._options.hasName('innerLSCsolver_BTinvBt_ksp_constant_null_space'):
self._create_constant_nullspace()
self.BQinvBt.setNullSpace(self.const_null_space)
self.kspBQinvBt = p4pyPETSc.KSP().create()
self.kspBQinvBt.setOperators(self.BQinvBt,self.BQinvBt)
self.kspBQinvBt.setOptionsPrefix('innerLSCsolver_BTinvBt_')
self.kspBQinvBt.pc.setUp()
self.kspBQinvBt.setFromOptions()
self.kspBQinvBt.setUp()
# initialize solver for Qv
self.kspQv = p4pyPETSc.KSP().create()
self.kspQv.setOperators(self.Qv,self.Qv)
self.kspQv.setOptionsPrefix('innerLSCsolver_T_')
self.kspQv.setFromOptions()
convergenceTest = 'r-true'
if convergenceTest == 'r-true':
self.r_work = self.BQinvBt.getVecLeft()
self.rnorm0 = None
self.kspBQinvBt.setConvergenceTest(self._converged_trueRes)
else:
self.r_work = None
self.kspBQinvBt.setUp()
def apply(self,A,x,y):
""" Apply the LSC inverse operator
Parameters
----------
A : NULL
A placeholder for internal function PETSc functions.
x : :class:`p4pyPETSc.Vec`
Vector which LSC operator is being applied to.
Returns
--------
y : :class:`p4pyPETSc.Vec`
Result of LSC acting on x.
"""
# create temporary vectors
B_sizes = self.B.getSizes()
x_tmp = p4pyPETSc.Vec().create()
x_tmp = x.copy()
tmp1 = self._create_tmp_vec(B_sizes[0])
tmp2 = self._create_tmp_vec(B_sizes[1])
tmp3 = self._create_tmp_vec(B_sizes[1])
if self._options.hasName('innerLSCsolver_BTinvBt_ksp_constant_null_space'):
self.const_null_space.remove(x_tmp)
self.kspBQinvBt.solve(x_tmp,tmp1)
self.B.multTranspose(tmp1,tmp2)
self.kspQv.solve(tmp2,tmp3)
self.F.mult(tmp3,tmp2)
self.kspQv.solve(tmp2,tmp3)
self.B.mult(tmp3,tmp1)
if self._options.hasName('innerLSCsolver_BTinvBt_ksp_constant_null_space'):
self.const_null_space.remove(x_tmp)
self.kspBQinvBt.solve(tmp1,y)
assert numpy.isnan(y.norm())==False, "Applying the schur complement \
resulted in not-a-number."
def _constructBQinvBt(self):
""" Private method repsonsible for building BQinvBt """
self.Qv_inv = petsc_create_diagonal_inv_matrix(self.Qv)
QinvBt = self.Qv_inv.matMult(self.Bt)
self.BQinvBt = self.B.matMult(QinvBt)
class MatrixShell(ProductOperatorShell):
""" A shell class for a matrix. """
def __init__(self,A):
"""
Specifies a basic matrix shell.
Parameters
----------
A : matrix
A petsc4py matrix object
"""
self.A = A
def mult(self,A,x,y):
"""
Multiply the matrix and x.
Parameters
----------
A : matrix
Dummy place holder for PETSc compatibility
x : vector
Returns
-------
y : vector
"""
self.A.mult(x,y)
class MatrixInvShell(InvOperatorShell):
""" A PETSc shell class for a inverse operator. """
def __init__(self, A):
""" Initializes operators and solvers for inverse operator.
Parameters
----------
A : PETSc matrix
This is the matrix object used to construct the inverse.
"""
self.A = A
self.ksp = p4pyPETSc.KSP().create()
self.ksp.setOperators(self.A,self.A)
self.ksp.setType('preonly')
self.ksp.pc.setType('lu')
self.ksp.pc.setFactorSolverType('superlu_dist')
self.ksp.setUp()
def apply(self,A,x,y):
""" Apply the inverse pressure mass matrix.
Parameters
----------
A : matrix
Dummy place holder for PETSc compatibility
x : vector
Returns
-------
y : vector
"""
self.ksp.solve(x,y)
class SpInv_shell(InvOperatorShell):
r""" Shell class for the SIMPLE preconditioner which applies the
following action:
.. math::
\hat{S}^{-1} = (A_{11} - A_{01} \text{diag}(A_{00}) A_{10})^{-1}
where :math:`A_{ij}` are sub-blocks of the global saddle point system.
Parameters
----------
A00: :class:`p4pyPETSc.Mat`
The A00 block of the global saddle point system.
A01: :class:`p4pyPETSc.Mat`
The A01 block of the global saddle point system.
A10: :class:`p4pyPETSc.Mat`
The A10 block of the global saddle point system.
A11: :class:`p4pyPETSc.Mat`
The A11 block of the global saddle point system.
use_constant_null_space: bool
Indicates whether a constant null space should be used. See
note below.
Notes
-----
For Stokes or Navier-Stokes systems, the :math:`S` operator
resembles a Laplcian matrix on the pressure. In cases where the
global saddle point system uses pure Dirichlet boundary
conditions, the :math:`S^{-1}` operator has a constant null
space. Since most saddle-point simulations of interest do not
have pure Dirichlet conditions, the `constNullSpace` flag defaults
to false. Having the null space set to false when the global
problem uses pure Dirichlet boundary conditions will likely result
in poor solver performance or failure.
"""
def __init__(self, A00, A11, A01, A10, constNullSpace):
self.A00 = A00
self.A11 = A11
self.A01 = A01
self.A10 = A10
self.constNullSpace = constNullSpace
self._create_Sp()
self._options = p4pyPETSc.Options()
self.kspSp = p4pyPETSc.KSP().create()
self.kspSp.setOperators(self.Sp,self.Sp)
self.kspSp.setOptionsPrefix('innerSpsolver_')
self.kspSp.setFromOptions()
if self.constNullSpace:
self._create_constant_nullspace()
self.Sp.setNullSpace(self.const_null_space)
self.kspSp.setUp()
def apply(self,A,x,y):
""" Applies the :math:`S_{p}` operator
Parameters
----------
A : None
Dummy argument for PETSc interface
x : :class:`p4pyPETSc.Vec`
Vector to which :math:`S` is applied
Returns
-------
y : :class:`p4pyPETSc.Vec`
Result of :math:`S^{-1}x`
"""
tmp1 = p4pyPETSc.Vec().create()
tmp1 = x.copy()
if self.constNullSpace:
self.const_null_space.remove(tmp1)
self.kspSp.solve(tmp1,y)
assert numpy.isnan(y.norm())==False, "Applying the schur complement \
resulted in not-a-number."
def _create_Sp(self):
self.A00_inv = petsc_create_diagonal_inv_matrix(self.A00)
A00_invBt = self.A00_inv.matMult(self.A01)
self.Sp = self.A10.matMult(A00_invBt)
self.Sp.aypx(-1.,self.A11)
class TwoPhase_PCDInv_shell(InvOperatorShell):
r""" Shell class for the two-phase PCD preconditioner. The
two-phase PCD_inverse shell applies the following operator.
.. math::
\hat{S}^{-1} = (Q^{(1 / \mu)})^{-1} + (A_{p}^{(1 / \rho)})^{-1}
(N_{p}^{(\rho)} + \dfrac{\alpha}{\Delta t} Q^{(\rho)} )
(Q^{(\rho)})^{-1}
where :math:`Q^{(1 / \mu)}` and :math:`Q^{(\rho)}` denote the pressure
mass matrix scaled by the inverse dynamic viscosity and density
respectively, :math:`(A_{p}^{(1 / \rho)})^{-1}`
denotes the pressure Laplacian scaled by inverse density, and
:math:`N_{p}^{(\rho)}` denotes the pressure advection operator scaled by
the density, and :math:`\alpha` is a binary operator indicating
whether the problem is temporal or steady state.
"""
def __init__(self,
Qp_visc,
Qp_dens,
Ap_rho,
Np_rho,
alpha = False,
delta_t = 0,
num_chebyshev_its = 0,
strong_dirichlet_DOF = [],
laplace_null_space = False,
par_info=None):
""" Initialize the two-phase PCD inverse operator.
Parameters
----------
Qp_visc : petsc4py matrix
The pressure mass matrix with dynamic viscocity
scaling.
Qp_dens : petsc4py matrix
The pressure mass matrix with density scaling.
Ap_rho : petsc4py matrix
The pressure Laplacian scaled with density scaling.
Np_rho : petsc4py matrix
The pressure advection operator with inverse density
scaling.
alpha : binary
True if problem is temporal, False if problem is steady
state.
delta_t : float
Time step parameter.
num_chebyshev_its : int
Number of chebyshev iteration steps to take. (0 indicates
the chebyshev semi iteration is not used)
strong_dirichlet_DOF : lst
List of DOF with known, strongly enforced values.
laplace_null_space : binary
Indicates whether the pressure Laplace matrix has a
null space or not.
par_info : ParInfoClass
Provides parallel info.
"""
from . import LinearSolvers as LS
# Set attributes
self.Qp_visc = Qp_visc
self.Qp_dens = Qp_dens
self.Ap_rho = Ap_rho
self.Np_rho = Np_rho
self.alpha = alpha
self.delta_t = delta_t
self.num_chebyshev_its = num_chebyshev_its
self.strong_dirichlet_DOF = strong_dirichlet_DOF
self.laplace_null_space = laplace_null_space
self.par_info = par_info
self.options = p4pyPETSc.Options()
self._create_constant_nullspace()
self._set_dirichlet_idx_set()
self.kspAp_rho = self.create_petsc_ksp_obj('innerTPPCDsolver_Ap_rho_',
self.Ap_rho,
self.laplace_null_space)
self.kspAp_rho.getOperators()[0].zeroRows(self.known_dof_is)
if self.num_chebyshev_its:
self.Qp_visc = LS.ChebyshevSemiIteration(self.Qp_visc,
0.5,
2.0)
self.Qp_dens = LS.ChebyshevSemiIteration(self.Qp_dens,
0.5,
2.0)
else:
pass
# Using ksp objects for the lumped mass matrices is much
# slower than pointwise division.
# self.kspQp_visc = self.create_petsc_ksp_obj('innerTPPCDsolver_Qp_visc_',
# self.Qp_visc)
# self.kspQp_dens = self.create_petsc_ksp_obj('innerTPPCDsolver_Qp_dens_',
# self.Qp_dens)
def getSize(self):
""" Return the total number of DOF for the shell problem. """
return self.Ap_rho.getSizes()[0][0]
def apply(self,A,x,y):
"""
Applies the two-phase pressure-convection-diffusion
Schur complement approximation.
Parameters
----------
A : None
Dummy variabled needed to interface with PETSc
x : petsc4py vector
Vector to which operator is applied
Returns
-------
y : petsc4py vector
Result of operator acting on x.
Notes
-----
When strong Dirichlet conditions are enforced on the pressure,
the PCD operator is applied to the set of unknowns that do not
have Dirichlet boundary conditions. At the end, the solution
is then loaded into the original y-vector.
"""
comm = Comm.get()
x_tmp = self._create_copy_vec(x)
tmp1 = self._create_copy_vec(x_tmp)
tmp2 = self._create_copy_vec(x_tmp)
if self.num_chebyshev_its:
self.Qp_visc.apply(x_tmp,
y,
self.num_chebyshev_its)
self.Qp_dens.apply(x_tmp,
tmp1,
self.num_chebyshev_its)
else:
y.pointwiseDivide(x_tmp,self.Qp_visc.getDiagonal())
tmp1.pointwiseDivide(x_tmp,self.Qp_dens.getDiagonal())
# Pointwise divide appears to be much faster than ksp.
# self.kspQp_visc.solve(x_tmp,y)
# self.kspQp_dens.solve(x_tmp,tmp1)
self.Np_rho.mult(tmp1,tmp2)
if self.alpha is True:
tmp2.axpy(old_div(1.,self.delta_t),x_tmp)
if self.options.hasName('innerTPPCDsolver_Ap_rho_ksp_constant_null_space'):
self.const_null_space.remove(tmp2)
zero_array = numpy.zeros(len(self.known_dof_is.getIndices()))
tmp2.setValues(self.known_dof_is.getIndices(),zero_array)
tmp2.assemblyEnd()
self.kspAp_rho.solve(tmp2, tmp1)
y.axpy(1.,tmp1)
y.setValues(self.known_dof_is.getIndices(),zero_array)
y.assemblyEnd()
assert numpy.isnan(y.norm())==False, "Applying the schur complement \
resulted in not-a-number."
def l2Norm(x):
"""
Compute the parallel :math:`l_2` norm
"""
return math.sqrt(globalSum(numpy.dot(x,x)))
def l1Norm(x):
"""
Compute the parallel :math:`l_1` norm
The :math:`l_1` norm of a vector :math:`\mathbf{x} \in
\mathbb{R}^n` is
.. math::
\| \mathbf{x} \|_{1} = \sum_{i=0} |x_i|
If Python is running in parallel, then the sum is over all
dimensions on all processors so that the input must not contain
"ghost" entries.
This implemtation works for a distributed array with no ghost
components (each component must be on a single processor).
:param x: numpy array of length n
:return: float
"""
return globalSum(numpy.sum(numpy.abs(x)))
def lInfNorm(x):
"""
Compute the parallel :math:`l_{\infty}` norm
The :math:`l_{\infty}` norm of a vector :math:`\mathbf{x} \in
\mathbb{R}^n` is
.. math::
\|x\|_{\infty} = \max_i |x_i|
This implemtation works for a distributed array with no ghost
components (each component must be on a single processor).
:param x: numpy array of length n
:return: float
"""
return globalMax(numpy.linalg.norm(x,numpy.inf))
def wDot(x,y,h):
"""
Compute the parallel weighted dot product of vectors x and y using
weight vector h.
The weighted dot product is defined for a weight vector
:math:`\mathbf{h}` as
.. math::
(\mathbf{x},\mathbf{y})_h = \sum_{i} h_{i} x_{i} y_{i}
All weight vector components should be positive.
:param x,y,h: numpy arrays for vectors and weight
:return: the weighted dot product
"""
return globalSum(numpy.sum(x*y*h))
def wl2Norm(x,h):
"""
Compute the parallel weighted l_2 norm with weight h
"""
return math.sqrt(globalSum(wDot(x,x,h)))
def wl1Norm(x,h):
"""
Compute the parallel weighted l_1 norm with weight h
"""
return globalSum(numpy.sum(numpy.abs(h*x)))
def wlInfNorm(x,h):
"""
Compute the parallel weighted l_{\infty} norm with weight h
"""
return globalMax(numpy.linalg.norm(h*x,numpy.inf))
def energyDot(x,y,A):
"""
Compute the "energy" dot product x^t A y (not parallel)
"""
return numpy.dot(numpy.dot(x,A),y)
def energyNorm(x,A):
"""
Compute the "energy" norm x^t A x (not parallel)
"""
return math.sqrt(energyDot(x,x,A))
def l2NormAvg(x):
"""
Compute the arithmetic averaged l_2 norm (root mean squared norm)
"""
scale = old_div(1.0,globalSum(len(x.flat)))
return math.sqrt(scale*globalSum(numpy.dot(x,x)))
rmsNorm = l2NormAvg
def l2Norm_local(x):
"""
Compute the l_2 norm for just local (processor) system (not parallel)
"""
return math.sqrt(numpy.dot(x,x))
class WeightedNorm(object):
"""
Compute the weighted norm for time step control (not currently parallel)
"""
def __init__(self,shape,atol,rtol):
self.shape = shape
self.dim = sum(self.shape)
self.atol= atol
self.rtol= rtol
self.weight = numpy.ones(shape,'d')
self.tmp = numpy.ones(shape,'d')
def setWeight(self,y):
self.weight[:] = numpy.absolute(y)
self.weight *= self.rtol
self.weight += self.atol
def norm(self,y,type):
self.tmp[:] = y
self.tmp /= self.weight
value = numpy.linalg.norm(self.tmp.flat,type)
return old_div(value,self.dim)
if __name__ == '__main__':
import doctest
doctest.testmod()
# def test_MGV():
# n=2**8 + 1
# h =1.0/(n-1.0)
# freq=10
# u = numpy.random.uniform(0,1,(n))
# u[0]=0.0
# u[n-1]=0.0
# x = numpy.arange(0,1.0+h,h)
# AList=[]
# N=n
# pList=[]
# rList=[]
# resList=[]
# while N >= 3:
# resList.append(Vec(N-2))
# A = dict()#SparseMat(N-2,N-2,3*(N-2),sym=True)
# H = 1.0/(N-1.0)
# #beginAssembly(A)
# for i in range(N-2):
# A[(i,i)] = 2.0/H**2
# if i > 0:
# A[(i,i-1)] = -1.0/H**2
# if i < N-3:
# A[(i,i+1)] = -1.0/H**2
# #endAssembly(A)
# AList.append(SparseMatFromDict(N-2,N-2,A)[0])
# cN = (N - 1)/2 + 1
# r = dict()#SparseMat(cN-2,N-2,3*(N-2))
# p = dict()#SparseMat(N-2,cN-2,3*(N-2))
# for i in range(cN-2):
# r[(i,2*i)] = 1.0/4.0
# r[(i,2*i+1)] = 2.0/4.0
# r[(i,2*i+2)] = 1.0/4.0
# p[(2*i,i)] = 1.0/2.0
# p[(2*i+1,i)]= 2.0/2.0
# p[(2*i+2,i)]= 1.0/2.0
# #r.to_csr()
# print cN-2,N-2,r.keys()
# if cN-2 > 0:
# rList.append(SparseMatFromDict(cN-2,N-2,r)[0])
# else:
# rList.append(None)
# #p.to_csr()
# pList.append(SparseMatFromDict(N-2,cN-2,p)[0])
# N = cN
# class Jacobi:
# def __init__(self,A):
# self.A=A
# self.n=A.shape[0]
# self.M=Vec(self.n)
# for i in range(self.n):
# self.M[i]=1.0/A[i,i]
# self.res=Vec(self.n)
# self.dx=Vec(self.n)
# def apply(self,w,jits,b,x):
# self.A.matvec(x,self.res)
# self.res-=b
# for it in range(jits):
# self.dx[:] = self.M*self.res
# self.dx*=w
# x -= self.dx
# self.A.matvec(x,self.res)
# self.res -= b
# jacobiList=[]
# for A in AList:
# jacobiList.append(Jacobi(A))
# jits = 3
# w = 2.0/3.0
# class MGV:
# def __init__(self,smootherList,AList,pList,rList,resList):
# self.AList = AList
# self.pList = pList
# self.rList = rList
# self.resList = resList
# self.xList=[]
# self.vList=[]
# self.bList=[]
# self.gpList=[]
# for res in resList:
# self.xList.append(Vec(len(res)))
# self.vList.append(Vec(len(res)))
# self.bList.append(Vec(len(res)))
# self.smootherList = smootherList
# def apply(self,w,nsPre,nsPost,level,b,x):
# logEvent("Level = "+`level`)
# if level == len(self.AList)-1:
# self.smootherList[level].apply(1.0,1,b,x)
# else:
# #smooth
# self.smootherList[level].apply(w,nsPre,b,x)
# #restrict the defect
# self.rList[level].matvec(self.smootherList[level].res,self.bList[level+1])
# #V-cycle on the error equation
# self.xList[level+1][:]=0.0
# self.apply(w,nsPre,nsPost,level+1,self.bList[level+1],self.xList[level+1])
# #prolong
# self.pList[level].matvec(self.xList[level+1],self.vList[level])
# #correct
# x-=self.vList[level]
# #smooth
# self.smootherList[level].apply(w,nsPost,b,x)
# self.resList[level][:]=self.smootherList[level].res
# mgv = MGV(jacobiList,AList,pList,rList,resList)
# rnorm=1.0
# mgits = 0
# while rnorm > 1.0e-10 and mgits < 20:
# mgits +=1
# mgv.apply(w,jits,jits,0,f[1:n-1],u[1:n-1])
# rnorm = l2Norm(resList[0])
| [
"numpy.absolute",
"numpy.sum",
"numpy.abs",
"past.utils.old_div",
"numpy.ones",
"petsc4py.PETSc.NullSpace",
"numpy.arange",
"numpy.linalg.norm",
"builtins.range",
"doctest.testmod",
"petsc4py.PETSc.Viewer",
"petsc4py.PETSc.Vec",
"petsc4py.PETSc.Mat.__init__",
"petsc4py.PETSc.Vec.__init__",... | [((3925, 3967), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(nr, nc)', 'dtype': '"""float"""'}), "(shape=(nr, nc), dtype='float')\n", (3936, 3967), False, 'import numpy\n'), ((4729, 4756), 'numpy.linalg.matrix_rank', 'numpy.linalg.matrix_rank', (['A'], {}), '(A)\n', (4753, 4756), False, 'import numpy\n'), ((5188, 5215), 'numpy.linalg.matrix_rank', 'numpy.linalg.matrix_rank', (['A'], {}), '(A)\n', (5212, 5215), False, 'import numpy\n'), ((8346, 8356), 'proteus.Comm.get', 'Comm.get', ([], {}), '()\n', (8354, 8356), False, 'from proteus import Comm\n'), ((26520, 26542), 'numpy.zeros', 'numpy.zeros', (['(n,)', '"""d"""'], {}), "((n,), 'd')\n", (26531, 26542), False, 'import numpy\n'), ((26725, 26749), 'numpy.zeros', 'numpy.zeros', (['(m, n)', '"""d"""'], {}), "((m, n), 'd')\n", (26736, 26749), False, 'import numpy\n'), ((26996, 27020), 'numpy.zeros', 'numpy.zeros', (['(nnz,)', '"""d"""'], {}), "((nnz,), 'd')\n", (27007, 27020), False, 'import numpy\n'), ((27034, 27061), 'numpy.zeros', 'numpy.zeros', (['(nr + 1,)', '"""i"""'], {}), "((nr + 1,), 'i')\n", (27045, 27061), False, 'import numpy\n'), ((27073, 27097), 'numpy.zeros', 'numpy.zeros', (['(nnz,)', '"""i"""'], {}), "((nnz,), 'i')\n", (27084, 27097), False, 'import numpy\n'), ((55827, 55844), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (55842, 55844), False, 'import doctest\n'), ((4217, 4249), 'numpy.save', 'numpy.save', (['output', 'dense_matrix'], {}), '(output, dense_matrix)\n', (4227, 4249), False, 'import numpy\n'), ((13047, 13075), 'petsc4py.PETSc.Vec.__init__', 'p4pyPETSc.Vec.__init__', (['self'], {}), '(self)\n', (13069, 13075), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((17526, 17536), 'proteus.Comm.get', 'Comm.get', ([], {}), '()\n', (17534, 17536), False, 'from proteus import Comm\n'), ((20243, 20271), 'petsc4py.PETSc.Mat.__init__', 'p4pyPETSc.Mat.__init__', (['self'], {}), '(self)\n', (20265, 20271), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((21948, 21958), 'proteus.Comm.get', 'Comm.get', ([], {}), '()\n', (21956, 21958), False, 'from proteus import Comm\n'), ((22652, 22669), 'petsc4py.PETSc.LGMap', 'p4pyPETSc.LGMap', ([], {}), '()\n', (22667, 22669), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((24359, 24384), 'builtins.range', 'range', (['(par_n + par_nghost)'], {}), '(par_n + par_nghost)\n', (24364, 24384), False, 'from builtins import range\n'), ((25345, 25355), 'builtins.range', 'range', (['dim'], {}), '(dim)\n', (25350, 25355), False, 'from builtins import range\n'), ((25600, 25610), 'builtins.range', 'range', (['dim'], {}), '(dim)\n', (25605, 25610), False, 'from builtins import range\n'), ((28302, 28313), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (28310, 28313), False, 'import sys\n'), ((33705, 33715), 'proteus.Comm.get', 'Comm.get', ([], {}), '()\n', (33713, 33715), False, 'from proteus import Comm\n'), ((34263, 34299), 'numpy.arange', 'numpy.arange', (['num_dof'], {'dtype': '"""int32"""'}), "(num_dof, dtype='int32')\n", (34275, 34299), False, 'import numpy\n'), ((34367, 34398), 'numpy.ones', 'numpy.ones', (['num_dof'], {'dtype': 'bool'}), '(num_dof, dtype=bool)\n', (34377, 34398), False, 'import numpy\n'), ((38985, 39004), 'petsc4py.PETSc.Options', 'p4pyPETSc.Options', ([], {}), '()\n', (39002, 39004), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((44720, 44739), 'petsc4py.PETSc.Options', 'p4pyPETSc.Options', ([], {}), '()\n', (44737, 44739), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((48769, 48788), 'petsc4py.PETSc.Options', 'p4pyPETSc.Options', ([], {}), '()\n', (48786, 48788), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((50918, 50928), 'proteus.Comm.get', 'Comm.get', ([], {}), '()\n', (50926, 50928), False, 'from proteus import Comm\n'), ((53505, 53536), 'numpy.linalg.norm', 'numpy.linalg.norm', (['x', 'numpy.inf'], {}), '(x, numpy.inf)\n', (53522, 53536), False, 'import numpy\n'), ((53997, 54017), 'numpy.sum', 'numpy.sum', (['(x * y * h)'], {}), '(x * y * h)\n', (54006, 54017), False, 'import numpy\n'), ((54416, 54451), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(h * x)', 'numpy.inf'], {}), '(h * x, numpy.inf)\n', (54433, 54451), False, 'import numpy\n'), ((54570, 54585), 'numpy.dot', 'numpy.dot', (['x', 'A'], {}), '(x, A)\n', (54579, 54585), False, 'import numpy\n'), ((55082, 55097), 'numpy.dot', 'numpy.dot', (['x', 'x'], {}), '(x, x)\n', (55091, 55097), False, 'import numpy\n'), ((55393, 55415), 'numpy.ones', 'numpy.ones', (['shape', '"""d"""'], {}), "(shape, 'd')\n", (55403, 55415), False, 'import numpy\n'), ((55437, 55459), 'numpy.ones', 'numpy.ones', (['shape', '"""d"""'], {}), "(shape, 'd')\n", (55447, 55459), False, 'import numpy\n'), ((55511, 55528), 'numpy.absolute', 'numpy.absolute', (['y'], {}), '(y)\n', (55525, 55528), False, 'import numpy\n'), ((55698, 55736), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.tmp.flat', 'type'], {}), '(self.tmp.flat, type)\n', (55715, 55736), False, 'import numpy\n'), ((55751, 55775), 'past.utils.old_div', 'old_div', (['value', 'self.dim'], {}), '(value, self.dim)\n', (55758, 55775), False, 'from past.utils import old_div\n'), ((1042, 1060), 'petsc4py.PETSc.Viewer', 'p4pyPETSc.Viewer', ([], {}), '()\n', (1058, 1060), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((1119, 1137), 'petsc4py.PETSc.Viewer', 'p4pyPETSc.Viewer', ([], {}), '()\n', (1135, 1137), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((3580, 3595), 'petsc4py.PETSc.Mat', 'p4pyPETSc.Mat', ([], {}), '()\n', (3593, 3595), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((6434, 6449), 'petsc4py.PETSc.Vec', 'p4pyPETSc.Vec', ([], {}), '()\n', (6447, 6449), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((6483, 6498), 'petsc4py.PETSc.Vec', 'p4pyPETSc.Vec', ([], {}), '()\n', (6496, 6498), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((9386, 9401), 'petsc4py.PETSc.Mat', 'p4pyPETSc.Mat', ([], {}), '()\n', (9399, 9401), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((10353, 10368), 'petsc4py.PETSc.Mat', 'p4pyPETSc.Mat', ([], {}), '()\n', (10366, 10368), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((10797, 10812), 'petsc4py.PETSc.Mat', 'p4pyPETSc.Mat', ([], {}), '()\n', (10810, 10812), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((34748, 34762), 'petsc4py.PETSc.IS', 'p4pyPETSc.IS', ([], {}), '()\n', (34760, 34762), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((34936, 34950), 'petsc4py.PETSc.IS', 'p4pyPETSc.IS', ([], {}), '()\n', (34948, 34950), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((52448, 52463), 'numpy.dot', 'numpy.dot', (['x', 'x'], {}), '(x, x)\n', (52457, 52463), False, 'import numpy\n'), ((53056, 53068), 'numpy.abs', 'numpy.abs', (['x'], {}), '(x)\n', (53065, 53068), False, 'import numpy\n'), ((54276, 54292), 'numpy.abs', 'numpy.abs', (['(h * x)'], {}), '(h * x)\n', (54285, 54292), False, 'import numpy\n'), ((1644, 1662), 'petsc4py.PETSc.Viewer', 'p4pyPETSc.Viewer', ([], {}), '()\n', (1660, 1662), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((1707, 1722), 'petsc4py.PETSc.Mat', 'p4pyPETSc.Mat', ([], {}), '()\n', (1720, 1722), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((2314, 2332), 'petsc4py.PETSc.Viewer', 'p4pyPETSc.Viewer', ([], {}), '()\n', (2330, 2332), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((2377, 2392), 'petsc4py.PETSc.Vec', 'p4pyPETSc.Vec', ([], {}), '()\n', (2390, 2392), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((2982, 3000), 'petsc4py.PETSc.Viewer', 'p4pyPETSc.Viewer', ([], {}), '()\n', (2998, 3000), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((3045, 3059), 'petsc4py.PETSc.IS', 'p4pyPETSc.IS', ([], {}), '()\n', (3057, 3059), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((8828, 8843), 'petsc4py.PETSc.Mat', 'p4pyPETSc.Mat', ([], {}), '()\n', (8841, 8843), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((21563, 21626), 'numpy.zeros', 'numpy.zeros', (['(self.blockSize * subdomain2global.shape[0],)', '"""i"""'], {}), "((self.blockSize * subdomain2global.shape[0],), 'i')\n", (21574, 21626), False, 'import numpy\n'), ((21649, 21670), 'builtins.range', 'range', (['self.blockSize'], {}), '(self.blockSize)\n', (21654, 21670), False, 'from builtins import range\n'), ((25037, 25073), 'numpy.arange', 'numpy.arange', (['start_petsc', 'end_petsc'], {}), '(start_petsc, end_petsc)\n', (25049, 25073), False, 'import numpy\n'), ((30950, 30965), 'petsc4py.PETSc.Vec', 'p4pyPETSc.Vec', ([], {}), '()\n', (30963, 30965), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((31317, 31332), 'petsc4py.PETSc.Vec', 'p4pyPETSc.Vec', ([], {}), '()\n', (31330, 31332), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((32432, 32447), 'petsc4py.PETSc.KSP', 'p4pyPETSc.KSP', ([], {}), '()\n', (32445, 32447), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((33098, 33119), 'petsc4py.PETSc.NullSpace', 'p4pyPETSc.NullSpace', ([], {}), '()\n', (33117, 33119), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((35387, 35401), 'petsc4py.PETSc.IS', 'p4pyPETSc.IS', ([], {}), '()\n', (35399, 35401), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((35582, 35596), 'petsc4py.PETSc.IS', 'p4pyPETSc.IS', ([], {}), '()\n', (35594, 35596), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((39224, 39239), 'petsc4py.PETSc.KSP', 'p4pyPETSc.KSP', ([], {}), '()\n', (39237, 39239), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((39546, 39561), 'petsc4py.PETSc.KSP', 'p4pyPETSc.KSP', ([], {}), '()\n', (39559, 39561), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((40514, 40529), 'petsc4py.PETSc.Vec', 'p4pyPETSc.Vec', ([], {}), '()\n', (40527, 40529), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((42541, 42556), 'petsc4py.PETSc.KSP', 'p4pyPETSc.KSP', ([], {}), '()\n', (42554, 42556), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((44762, 44777), 'petsc4py.PETSc.KSP', 'p4pyPETSc.KSP', ([], {}), '()\n', (44775, 44777), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((45482, 45497), 'petsc4py.PETSc.Vec', 'p4pyPETSc.Vec', ([], {}), '()\n', (45495, 45497), True, 'from petsc4py import PETSc as p4pyPETSc\n'), ((51747, 51773), 'past.utils.old_div', 'old_div', (['(1.0)', 'self.delta_t'], {}), '(1.0, self.delta_t)\n', (51754, 51773), False, 'from past.utils import old_div\n'), ((54908, 54923), 'numpy.dot', 'numpy.dot', (['x', 'x'], {}), '(x, x)\n', (54917, 54923), False, 'import numpy\n'), ((14425, 14462), 'numpy.zeros', 'numpy.zeros', (['(blockSize * nghosts)', '"""i"""'], {}), "(blockSize * nghosts, 'i')\n", (14436, 14462), False, 'import numpy\n'), ((14491, 14507), 'builtins.range', 'range', (['blockSize'], {}), '(blockSize)\n', (14496, 14507), False, 'from builtins import range\n'), ((14789, 14847), 'numpy.zeros', 'numpy.zeros', (['(blockSize * subdomain2global.shape[0],)', '"""i"""'], {}), "((blockSize * subdomain2global.shape[0],), 'i')\n", (14800, 14847), False, 'import numpy\n'), ((14874, 14890), 'builtins.range', 'range', (['blockSize'], {}), '(blockSize)\n', (14879, 14890), False, 'from builtins import range\n'), ((25116, 25156), 'numpy.arange', 'numpy.arange', (['start_proteus', 'end_proteus'], {}), '(start_proteus, end_proteus)\n', (25128, 25156), False, 'import numpy\n'), ((25419, 25450), 'builtins.range', 'range', (['rowptr[i]', 'rowptr[i + 1]'], {}), '(rowptr[i], rowptr[i + 1])\n', (25424, 25450), False, 'from builtins import range\n'), ((25692, 25735), 'builtins.range', 'range', (['rowptr_petsc[i]', 'rowptr_petsc[i + 1]'], {}), '(rowptr_petsc[i], rowptr_petsc[i + 1])\n', (25697, 25735), False, 'from builtins import range\n')] |
r""" Dataloader builder for few-shot semantic segmentation dataset """
from torchvision import transforms
from torch.utils.data import DataLoader
import albumentations as A
import albumentations.pytorch
from PIL import Image
import numpy as np
import cv2
from data.pascal import DatasetPASCAL
from data.coco import DatasetCOCO
from data.fss import DatasetFSS
class Compose(A.Compose):
def __init__(self, transforms, bbox_params=None, keypoint_params=None, additional_targets=None, p=1):
super().__init__(transforms, bbox_params=bbox_params, keypoint_params=keypoint_params, additional_targets=additional_targets, p=p)
def __call__(self, image, mask):
augmented = super().__call__(image=np.array(image), mask=np.array(mask))
return augmented['image'], augmented['mask']
class FSSDataset:
@classmethod
def initialize(cls, benchmark, img_size, datapath, use_original_imgsize, apply_cats_augmentation=False, apply_pfenet_augmentation=False):
cls.datasets = {
'pascal': DatasetPASCAL,
'coco': DatasetCOCO,
'fss': DatasetFSS,
}
cls.img_mean = [0.485, 0.456, 0.406]
cls.img_std = [0.229, 0.224, 0.225]
cls.datapath = datapath
cls.use_original_imgsize = use_original_imgsize
cats_augmentation = [
A.ToGray(p=0.2),
A.Posterize(p=0.2),
A.Equalize(p=0.2),
A.Sharpen(p=0.2),
A.RandomBrightnessContrast(p=0.2),
A.Solarize(p=0.2),
A.ColorJitter(p=0.2),
]
scale_limit = (0.9, 1.1) if benchmark == 'coco' else (0.8, 1.25)
pfenet_augmentation = [
A.RandomScale(scale_limit=scale_limit, p=1.),
A.Rotate(limit=10, p=1.),
A.GaussianBlur((5, 5), p=0.5),
A.HorizontalFlip(p=0.5),
A.PadIfNeeded(img_size, img_size, border_mode=cv2.BORDER_CONSTANT,
value=[x * 255 for x in cls.img_mean], mask_value=0),
A.RandomCrop(img_size, img_size),
]
cls.trn_transform = Compose([
*(cats_augmentation if apply_cats_augmentation else ()),
*(pfenet_augmentation if apply_pfenet_augmentation else ()),
A.Resize(img_size, img_size),
A.Normalize(cls.img_mean, cls.img_std),
A.pytorch.transforms.ToTensorV2(),
])
cls.transform = Compose([
A.Resize(img_size, img_size),
A.Normalize(cls.img_mean, cls.img_std),
A.pytorch.transforms.ToTensorV2(),
])
@classmethod
def build_dataloader(cls, benchmark, bsz, nworker, fold, split, shot=1):
# Force randomness during training for diverse episode combinations
# Freeze randomness during testing for reproducibility
shuffle = split == 'trn'
nworker = nworker if split == 'trn' else 0
transform = cls.trn_transform if split == 'trn' else cls.transform
dataset = cls.datasets[benchmark](cls.datapath, fold=fold, transform=transform, split=split, shot=shot, use_original_imgsize=cls.use_original_imgsize)
dataloader = DataLoader(dataset, batch_size=bsz, shuffle=shuffle, num_workers=nworker)
return dataloader | [
"albumentations.PadIfNeeded",
"torch.utils.data.DataLoader",
"albumentations.Solarize",
"albumentations.RandomScale",
"albumentations.Resize",
"albumentations.RandomBrightnessContrast",
"albumentations.Posterize",
"albumentations.Rotate",
"albumentations.Normalize",
"albumentations.ToGray",
"alb... | [((3163, 3236), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'bsz', 'shuffle': 'shuffle', 'num_workers': 'nworker'}), '(dataset, batch_size=bsz, shuffle=shuffle, num_workers=nworker)\n', (3173, 3236), False, 'from torch.utils.data import DataLoader\n'), ((1346, 1361), 'albumentations.ToGray', 'A.ToGray', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1354, 1361), True, 'import albumentations as A\n'), ((1375, 1393), 'albumentations.Posterize', 'A.Posterize', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1386, 1393), True, 'import albumentations as A\n'), ((1407, 1424), 'albumentations.Equalize', 'A.Equalize', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1417, 1424), True, 'import albumentations as A\n'), ((1438, 1454), 'albumentations.Sharpen', 'A.Sharpen', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1447, 1454), True, 'import albumentations as A\n'), ((1468, 1501), 'albumentations.RandomBrightnessContrast', 'A.RandomBrightnessContrast', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1494, 1501), True, 'import albumentations as A\n'), ((1515, 1532), 'albumentations.Solarize', 'A.Solarize', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1525, 1532), True, 'import albumentations as A\n'), ((1546, 1566), 'albumentations.ColorJitter', 'A.ColorJitter', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1559, 1566), True, 'import albumentations as A\n'), ((1697, 1742), 'albumentations.RandomScale', 'A.RandomScale', ([], {'scale_limit': 'scale_limit', 'p': '(1.0)'}), '(scale_limit=scale_limit, p=1.0)\n', (1710, 1742), True, 'import albumentations as A\n'), ((1755, 1780), 'albumentations.Rotate', 'A.Rotate', ([], {'limit': '(10)', 'p': '(1.0)'}), '(limit=10, p=1.0)\n', (1763, 1780), True, 'import albumentations as A\n'), ((1793, 1822), 'albumentations.GaussianBlur', 'A.GaussianBlur', (['(5, 5)'], {'p': '(0.5)'}), '((5, 5), p=0.5)\n', (1807, 1822), True, 'import albumentations as A\n'), ((1836, 1859), 'albumentations.HorizontalFlip', 'A.HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1852, 1859), True, 'import albumentations as A\n'), ((1873, 1999), 'albumentations.PadIfNeeded', 'A.PadIfNeeded', (['img_size', 'img_size'], {'border_mode': 'cv2.BORDER_CONSTANT', 'value': '[(x * 255) for x in cls.img_mean]', 'mask_value': '(0)'}), '(img_size, img_size, border_mode=cv2.BORDER_CONSTANT, value=[(\n x * 255) for x in cls.img_mean], mask_value=0)\n', (1886, 1999), True, 'import albumentations as A\n'), ((2022, 2054), 'albumentations.RandomCrop', 'A.RandomCrop', (['img_size', 'img_size'], {}), '(img_size, img_size)\n', (2034, 2054), True, 'import albumentations as A\n'), ((718, 733), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (726, 733), True, 'import numpy as np\n'), ((740, 754), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (748, 754), True, 'import numpy as np\n'), ((2262, 2290), 'albumentations.Resize', 'A.Resize', (['img_size', 'img_size'], {}), '(img_size, img_size)\n', (2270, 2290), True, 'import albumentations as A\n'), ((2304, 2342), 'albumentations.Normalize', 'A.Normalize', (['cls.img_mean', 'cls.img_std'], {}), '(cls.img_mean, cls.img_std)\n', (2315, 2342), True, 'import albumentations as A\n'), ((2356, 2389), 'albumentations.pytorch.transforms.ToTensorV2', 'A.pytorch.transforms.ToTensorV2', ([], {}), '()\n', (2387, 2389), True, 'import albumentations as A\n'), ((2449, 2477), 'albumentations.Resize', 'A.Resize', (['img_size', 'img_size'], {}), '(img_size, img_size)\n', (2457, 2477), True, 'import albumentations as A\n'), ((2491, 2529), 'albumentations.Normalize', 'A.Normalize', (['cls.img_mean', 'cls.img_std'], {}), '(cls.img_mean, cls.img_std)\n', (2502, 2529), True, 'import albumentations as A\n'), ((2543, 2576), 'albumentations.pytorch.transforms.ToTensorV2', 'A.pytorch.transforms.ToTensorV2', ([], {}), '()\n', (2574, 2576), True, 'import albumentations as A\n')] |
# coding: utf-8
'''
# Author: <NAME>
# Date: 2021/08/11
# Email: <EMAIL>
# Description: 数据pipline
'''
import tensorflow as tf
from tensorflow.data import Dataset as tfd
import numpy as np
import cv2
import os
from imgaug import augmenters as iaa
aug = iaa.SomeOf((0, 6),
[
iaa.Affine(scale=[0.9, 1.0], cval=255, mode='constant'), # mode='edge'
iaa.Affine(rotate=(-1,1), cval=255, mode='constant'), # 旋转增强器
iaa.GaussianBlur(sigma=(0.0,0.5)), # 高斯模糊增强器
iaa.AdditiveGaussianNoise(scale=(0, 0.001*255)), # 高斯噪声增强器
iaa.JpegCompression(compression=[0, 10]),
iaa.PiecewiseAffine(scale=(0.004, 0.006)) # 扭曲增强器
])
START_TOKEN = 0
PAD_TOKEN = 1
END_TOKEN = 2
UNK_TOKEN = 3
STD = 1.0
MEAN = 0.0
def augment_func(image):
image = image.astype(np.uint8)
image = aug.augment_image(image)
image = image.astype(np.float32)
image = np.expand_dims(image, axis=-1)
return image
def tf_augment_func(image, label):
[image,] = tf.numpy_function(augment_func, [image], [tf.float32])
return image, label
def process_resize(img, target_size=(56, 512)):
h, w = img.shape
scale = target_size[0] / h
t_img = cv2.resize(img, (int(w * scale), int(h * scale)))
if t_img.shape[1] > target_size[1]:
return img, False
else:
img_mask = np.full((target_size[0], target_size[1]), fill_value=255, dtype=np.float32)
img_mask[:t_img.shape[0], :t_img.shape[1]] = t_img
return img_mask, True
def process_resize_(img, target_size=(56, 512)):
h, w = img.shape
scale = target_size[0 ] / h
t_img = cv2.resize(img, (int(w*scale//64*64),int( h*scale)))
return t_img, True
# In[4]:
def yield_func(img_dir, label_file, voc_file, max_length=160, image_size=(56, 512), filter_size=True, resize=True, is_training=True, shuffle=True, padsize=True):
img_dir = bytes.decode(img_dir, encoding='utf-8')
label_file = bytes.decode(label_file, encoding='utf-8')
img_lists = []
label_lists = []
with open(label_file, mode='r') as fr:
ff = fr.readlines()
for line in ff:
line = line.strip()
line = line.split(' ')
img_name = line[0]
if not img_name.endswith('g'):
img_name = img_name[:-1]
img_lists.append(os.path.join(str(img_dir), img_name))
# print(line)
label_lists.append(line[1:])
print('imgs num & labels num: {}-{}'.format(len(img_lists), len(label_lists)))
assert len(img_lists) == len(label_lists), 'train_labels != train_images'
assert len(img_lists) != 0, 'No file exists'
voc2id = {}
voc2id['START_TOKEN'] = 0
voc2id['PAD_TOKEN'] = 1
voc2id['END_TOKEN'] = 2
voc2id['UNK_TOKEN'] = 3
with open(voc_file, mode='r') as f:
ff = f.readlines()
for i, voc in enumerate(ff):
voc = voc.strip()
voc2id[voc] = i+4
index = 0
assert len(img_lists)==len(label_lists), 'check the inputs length!'
stop_nums = len(img_lists)
while index < stop_nums:
image = cv2.imread(img_lists[index], cv2.IMREAD_GRAYSCALE)
h, w = image.shape
if filter_size and not resize:
if w > image_size[1] or h > image_size[0]:
index += 1
continue
if resize:
if padsize:
image, flag = process_resize(image, target_size=image_size)
if not flag:
index += 1
continue
else:
try:
image, _ = process_resize_(image, target_size=image_size)
h, w = image.shape
except:
index += 1
continue
image = np.rot90(image, 3)
# image = (image/255. - MEAN) / STD
label_mask = np.full(shape=(max_length), fill_value=voc2id['PAD_TOKEN'], dtype=int)
label = label_lists[index]
label = [voc2id.get(i, voc2id['UNK_TOKEN']) for i in label]
label.insert(0,voc2id['START_TOKEN'])
label.append(voc2id['END_TOKEN'])
label_len = len(label) if len(label) < max_length-1 else max_length-1
label_mask[:label_len] = label[:label_len]
index += 1
yield image, label_mask
def DataSetPipline(img_dir, label_file, voc_file, max_length=160, batch_size=1, image_size=(56,512), filter_size=True, resize=True,
is_training=True, shuffle=True, padsize=True):
dataset = tfd.from_generator(yield_func,
output_types=(tf.float32, tf.int16),
output_shapes=((tf.TensorShape([None, None]),
tf.TensorShape([max_length]))),
args=(img_dir, label_file, voc_file, max_length, image_size, filter_size, resize,
is_training, shuffle, padsize)
)
if is_training:
dataset = dataset.map(tf_augment_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if shuffle:
dataset = dataset.shuffle(10000, reshuffle_each_iteration=True)
return dataset.batch(batch_size, drop_remainder=True)
# In[ ]:
if __name__ == '__main__':
voc_file = '/data/small_vocab.txt'
img_dir = '/data/images_train/'
label_file = '/data/train.formulas.norm.txt'
dataset = DataSetPipline(img_dir, label_file, voc_file, max_length=160, batch_size=1, image_size=(56,336),
resize=False, is_training=True, shuffle=True)
dataset_test = dataset.take(5)
print(len(list(dataset_test.as_numpy_iterator())))
print(np.array(list(dataset_test.as_numpy_iterator())[0][0]).shape)
print(np.array(list(dataset_test.as_numpy_iterator())[0][1]).shape)
print('...')
print(np.array(list(dataset_test.as_numpy_iterator())[1][0]).shape)
print(np.array(list(dataset_test.as_numpy_iterator())[1][1]).shape)
print('...')
print(np.array(list(dataset_test.as_numpy_iterator())[2][0]).shape)
print(np.array(list(dataset_test.as_numpy_iterator())[2][1]).shape)
print('...')
print(np.array(list(dataset_test.as_numpy_iterator())[3][0]).shape)
print(np.array(list(dataset_test.as_numpy_iterator())[3][1]).shape)
| [
"numpy.full",
"imgaug.augmenters.JpegCompression",
"tensorflow.TensorShape",
"numpy.expand_dims",
"imgaug.augmenters.Affine",
"cv2.imread",
"numpy.rot90",
"imgaug.augmenters.AdditiveGaussianNoise",
"tensorflow.numpy_function",
"imgaug.augmenters.PiecewiseAffine",
"imgaug.augmenters.GaussianBlur"... | [((872, 902), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(-1)'}), '(image, axis=-1)\n', (886, 902), True, 'import numpy as np\n'), ((971, 1025), 'tensorflow.numpy_function', 'tf.numpy_function', (['augment_func', '[image]', '[tf.float32]'], {}), '(augment_func, [image], [tf.float32])\n', (988, 1025), True, 'import tensorflow as tf\n'), ((287, 342), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': '[0.9, 1.0]', 'cval': '(255)', 'mode': '"""constant"""'}), "(scale=[0.9, 1.0], cval=255, mode='constant')\n", (297, 342), True, 'from imgaug import augmenters as iaa\n'), ((363, 416), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'rotate': '(-1, 1)', 'cval': '(255)', 'mode': '"""constant"""'}), "(rotate=(-1, 1), cval=255, mode='constant')\n", (373, 416), True, 'from imgaug import augmenters as iaa\n'), ((430, 464), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0.0, 0.5)'}), '(sigma=(0.0, 0.5))\n', (446, 464), True, 'from imgaug import augmenters as iaa\n'), ((480, 529), 'imgaug.augmenters.AdditiveGaussianNoise', 'iaa.AdditiveGaussianNoise', ([], {'scale': '(0, 0.001 * 255)'}), '(scale=(0, 0.001 * 255))\n', (505, 529), True, 'from imgaug import augmenters as iaa\n'), ((544, 584), 'imgaug.augmenters.JpegCompression', 'iaa.JpegCompression', ([], {'compression': '[0, 10]'}), '(compression=[0, 10])\n', (563, 584), True, 'from imgaug import augmenters as iaa\n'), ((590, 631), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', ([], {'scale': '(0.004, 0.006)'}), '(scale=(0.004, 0.006))\n', (609, 631), True, 'from imgaug import augmenters as iaa\n'), ((1309, 1384), 'numpy.full', 'np.full', (['(target_size[0], target_size[1])'], {'fill_value': '(255)', 'dtype': 'np.float32'}), '((target_size[0], target_size[1]), fill_value=255, dtype=np.float32)\n', (1316, 1384), True, 'import numpy as np\n'), ((3087, 3137), 'cv2.imread', 'cv2.imread', (['img_lists[index]', 'cv2.IMREAD_GRAYSCALE'], {}), '(img_lists[index], cv2.IMREAD_GRAYSCALE)\n', (3097, 3137), False, 'import cv2\n'), ((3778, 3796), 'numpy.rot90', 'np.rot90', (['image', '(3)'], {}), '(image, 3)\n', (3786, 3796), True, 'import numpy as np\n'), ((3875, 3943), 'numpy.full', 'np.full', ([], {'shape': 'max_length', 'fill_value': "voc2id['PAD_TOKEN']", 'dtype': 'int'}), "(shape=max_length, fill_value=voc2id['PAD_TOKEN'], dtype=int)\n", (3882, 3943), True, 'import numpy as np\n'), ((4697, 4725), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None]'], {}), '([None, None])\n', (4711, 4725), True, 'import tensorflow as tf\n'), ((4776, 4804), 'tensorflow.TensorShape', 'tf.TensorShape', (['[max_length]'], {}), '([max_length])\n', (4790, 4804), True, 'import tensorflow as tf\n')] |
"""Utility functions and constants for signal class unit tests."""
from numbers import Number
import os.path
import random
import numpy as np
def test_init(args, defaults, cls, check):
for i in range(len(args)):
some_args = args[:i]
a = cls(*some_args)
expected = args[:i] + defaults[i:]
check(a, *expected)
def test_eq(cls, args, changes):
a = cls(*args)
b = cls(*args)
assert a == b
for i in range(len(changes)):
changed_args = args[:i] + (changes[i],) + args[i + 1:]
b = cls(*changed_args)
assert a != b
def test_indexing(x, expected, test_count):
# Before random indexing, try indexing with a single colon.
# This will elicit many possible bugs.
assert_arrays_equal(x[:], expected[:], strict=True)
shape = expected.shape
if _any_zero(shape):
return
for _ in range(test_count):
index_count = random.randrange(len(shape)) + 1
if index_count == 1:
key = _get_test_index(shape[0])
else:
key = tuple(_get_test_index(n) for n in shape[:index_count])
# print(key, x[key], expected[key])
assert_arrays_equal(x[key], expected[key], strict=True)
def _any_zero(x):
return not np.all(np.array(x))
def _get_test_index(n):
index_type = random.choice(('number', 'range', 'colon'))
if index_type == 'number':
return random.randrange(n)
elif index_type == 'range':
start = random.randrange(-n, n)
stop = random.randrange(-n, n)
return slice(start, stop)
else:
return slice(None, None, None)
# def test_mapping(a, forward_name, inverse_name, cases):
#
# for x, y in cases:
#
# method = getattr(a, forward_name)
# result = method(x)
# assert_numbers_or_arrays_equal(result, y)
#
# method = getattr(a, inverse_name)
# result = method(y)
# assert_numbers_or_arrays_equal(result, x)
def assert_numbers_or_arrays_equal(x, y):
if isinstance(x, Number):
assert x == y
else:
assert_arrays_equal(x, y)
def assert_arrays_equal(x, y, strict=False):
if strict:
assert x.dtype == y.dtype
assert np.alltrue(x == y)
def create_samples(shape, factor=100, dtype='int32'):
arrays = [
_create_samples_aux(shape, factor, dtype, i)
for i in range(len(shape))]
return sum(arrays)
def _create_samples_aux(shape, factor, dtype, i):
n = len(shape)
j = n - 1 - i
m = shape[i]
s = (factor ** j) * np.arange(m, dtype=dtype)
s.shape = (m,) + (1,) * j
return s
def create_test_audio_file_path(file_name):
dir_path = os.path.dirname(__file__)
return os.path.join(dir_path, 'data', 'Audio Files', file_name)
| [
"random.choice",
"numpy.arange",
"random.randrange",
"numpy.array",
"numpy.alltrue"
] | [((1429, 1472), 'random.choice', 'random.choice', (["('number', 'range', 'colon')"], {}), "(('number', 'range', 'colon'))\n", (1442, 1472), False, 'import random\n'), ((2398, 2416), 'numpy.alltrue', 'np.alltrue', (['(x == y)'], {}), '(x == y)\n', (2408, 2416), True, 'import numpy as np\n'), ((1524, 1543), 'random.randrange', 'random.randrange', (['n'], {}), '(n)\n', (1540, 1543), False, 'import random\n'), ((2738, 2763), 'numpy.arange', 'np.arange', (['m'], {'dtype': 'dtype'}), '(m, dtype=dtype)\n', (2747, 2763), True, 'import numpy as np\n'), ((1368, 1379), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1376, 1379), True, 'import numpy as np\n'), ((1597, 1620), 'random.randrange', 'random.randrange', (['(-n)', 'n'], {}), '(-n, n)\n', (1613, 1620), False, 'import random\n'), ((1636, 1659), 'random.randrange', 'random.randrange', (['(-n)', 'n'], {}), '(-n, n)\n', (1652, 1659), False, 'import random\n')] |
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
iris=datasets.load_iris()
X=iris.data[:,[0,3]]
y=iris.target
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)
#將樣本特徵進行標準化
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
sc.fit(X_train)
X_train_std=sc.transform(X_train)
X_test_std=sc.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
acc_1=[]
acc_2=[]
for i in range(1,10):
knn=KNeighborsClassifier(n_neighbors=i,p=2,metric="minkowski")
knn.fit(X_train_std,y_train)
res_1=knn.predict(X_test_std)
acc_1.append(accuracy_score(y_test,res_1))
acc_1=np.round(acc_1,2)
#執行結果圖2[10,15,20~40]
for j in range(10,45,5):
knn = KNeighborsClassifier(n_neighbors=j, p=2, metric="minkowski")
knn.fit(X_train_std, y_train)
res_2 = knn.predict(X_test_std)
acc_2.append(accuracy_score(y_test, res_2))
acc_2 = np.round(acc_2, 2)
#結果圖1
plt.plot(range(1,10),acc_1)
plt.title("KNeighborsClassifier")
plt.xlabel("Number of nighbors")
plt.ylabel("Accuracy")
plt.yticks([0.93,0.96,0.98])
plt.grid("on")
plt.show()
#結果圖2
plt.plot(range(10,45,5),acc_2)
plt.title("KNeighborsClassifier")
plt.xlabel("Number of nighbors")
plt.ylabel("Accuracy")
plt.yticks([0.91,0.93,0.96,0.98])
plt.grid("on")
plt.show() | [
"matplotlib.pyplot.title",
"sklearn.datasets.load_iris",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.yticks",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.ylabel",
... | [((86, 106), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (104, 106), False, 'from sklearn import datasets\n'), ((226, 279), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (242, 279), False, 'from sklearn.model_selection import train_test_split\n'), ((342, 358), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (356, 358), False, 'from sklearn.preprocessing import StandardScaler\n'), ((763, 781), 'numpy.round', 'np.round', (['acc_1', '(2)'], {}), '(acc_1, 2)\n', (771, 781), True, 'import numpy as np\n'), ((1025, 1043), 'numpy.round', 'np.round', (['acc_2', '(2)'], {}), '(acc_2, 2)\n', (1033, 1043), True, 'import numpy as np\n'), ((1079, 1112), 'matplotlib.pyplot.title', 'plt.title', (['"""KNeighborsClassifier"""'], {}), "('KNeighborsClassifier')\n", (1088, 1112), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1145), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of nighbors"""'], {}), "('Number of nighbors')\n", (1123, 1145), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1168), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1156, 1168), True, 'import matplotlib.pyplot as plt\n'), ((1169, 1199), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.93, 0.96, 0.98]'], {}), '([0.93, 0.96, 0.98])\n', (1179, 1199), True, 'import matplotlib.pyplot as plt\n'), ((1198, 1212), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (1206, 1212), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1221, 1223), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1295), 'matplotlib.pyplot.title', 'plt.title', (['"""KNeighborsClassifier"""'], {}), "('KNeighborsClassifier')\n", (1271, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of nighbors"""'], {}), "('Number of nighbors')\n", (1306, 1328), True, 'import matplotlib.pyplot as plt\n'), ((1329, 1351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1339, 1351), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1388), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.91, 0.93, 0.96, 0.98]'], {}), '([0.91, 0.93, 0.96, 0.98])\n', (1362, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1386, 1400), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (1394, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1409, 1411), True, 'import matplotlib.pyplot as plt\n'), ((584, 644), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'i', 'p': '(2)', 'metric': '"""minkowski"""'}), "(n_neighbors=i, p=2, metric='minkowski')\n", (604, 644), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((838, 898), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'j', 'p': '(2)', 'metric': '"""minkowski"""'}), "(n_neighbors=j, p=2, metric='minkowski')\n", (858, 898), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((727, 756), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'res_1'], {}), '(y_test, res_1)\n', (741, 756), False, 'from sklearn.metrics import accuracy_score\n'), ((986, 1015), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'res_2'], {}), '(y_test, res_2)\n', (1000, 1015), False, 'from sklearn.metrics import accuracy_score\n')] |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from openmdao.api import ExplicitComponent
import warnings
def bdf3_cache_matrix(n,all_bdf=False):
"""
This implements the base block Jacobian of the BDF3 method.
BDF3 is third order accurate and suitable for stiff systems.
The first couple of points are handled by 3rd-order offset finite difference stencils.
"""
"""
Any multistep method can be posed as the following:
[A] y = h[B] y'
Where A and B are both N-1 rows by N columns (since y(0) aka y1 is already determined as the initial condition).
h is a time step.
Remove the first COLUMN of both matrices to obtain N-1 by N-1 matrices [a] and [b].
The first columns are [av] and [bv] which are both N-1 by 1.
The system can then be expressed as: [a] {y2-yN} + [av] y1 = [b] {y'2-y'N} + [bv] y'1
We can then obtain a closed-form expression for {y2-yN} (the unknown states) as follows:
{y2-yN} = h inv([a]) [b] {y'2-y'N} + h inv([a]) [bv] y'1 - inv([a]) [av] y1
The last quantity inv([a]) [av] always turns out as just ones
(since all states are equally linearly dependent on the initial condition).
We can then solve for the entire state vector {y1-yN} by constructing an N x N block matrix with:
All zeros in the first row (as y1 cannot depend on anything else)
inv([a]) [bv] in the first column (to capture the y'1 dependency, if any)
inv([a]) [b] in the lower right Nx1 by Nx1 squares
The final form is:
y = h [M] y' + [ones] y(0)
where
_____1_____________N-1__________
[M] = 1 |___0____________|____0...._____|
| inv([a])[bv] | inv([a])[b]|
N-1 |.. | |
|.._____________|_______________|
In this case, bv is all zeros because BDF has no dependence on y1'
In the event that the method is being applied across multiple subintervals, a generally lower-triangular matrix will need to be constructed.
The [M] matrix for each subinterval * h will go on the block diagonals.
Any block diagonals below will need to be filled in with dense matrices consisting of the LAST row ([M] * h) repeated over and over again.
It will look like this:
[Big Matrix] = ______ N1_______|________N2______|_______N3_____|
N1 |____[M] * h1___|____zeros_______|_____zeros____|
N2 |__last row of_1|___[M] * h2_____|_____zeros____|
N3 |__last row of_1|__last_row_of_2_|___[M] * h3___|
Since the first row of [M] is completely blank, this basically means that the FIRST point of each subinterval is equal to the LAST point of the prior one.
"""
# construct [a] and [b] matrices for a BDF3 scheme with 3rd order finite difference for the first two derivatives
# the FULL [A] matrix looks like:
# -1/3 | -1/2 1 -1/6 0 ......
# 1/6 | -1 1/2 1/3 0 ......
# -2/11| 9/11 -18/11 1 0 ......
# 0 | -2/11 9/11 -18/11 1 0......
# 0 | 0 -2/11 9/11 -18/11 .... and so on
# the full [B] matrix looks like:
# 0 | 1 0 0 ...
# 0 | 0 1 0 ....
# 0 | 0 0 6/11 ....
# 0 | 0 0 0 6/11 0 ..... and so on
# the all_bdf stencil bootstrps the first two points with BDF1 (backward euler) and BDF2 respectively.
if all_bdf:
a_diag_1 = np.zeros((n-1,))
#a_diag_1[0] = 1/2
a_diag_2 = np.ones((n-1,))
#a_diag_2[0] = 0
a_diag_2[0] = 1
a_diag_3 = np.ones((n-1,)) * -18/11
a_diag_3[0] = -4/3
a_diag_4 = np.ones((n-1,)) * 9/11
a_diag_5 = np.ones((n-1,)) * -2/11
A = sp.diags([a_diag_1, a_diag_2, a_diag_3, a_diag_4, a_diag_5],
[1,0,-1,-2,-3], shape=(n-1,n-1)).asformat('csc')
b_diag = np.ones((n-1,))*6/11
b_diag[0] = 1
b_diag[1] = 2/3
else:
# otherwise use a full third order stencil as described in the ASCII art above
a_diag_0 = np.zeros((n-1,))
a_diag_0[0] = -1/6
a_diag_1 = np.zeros((n-1,))
a_diag_1[0] = 1
a_diag_1[1] = 1/3
a_diag_2 = np.ones((n-1,))
a_diag_2[0] = -1/2
a_diag_2[1] = 1/2
a_diag_3 = np.ones((n-1,)) * -18/11
a_diag_3[0] = -1
a_diag_4 = np.ones((n-1,)) * 9/11
a_diag_5 = np.ones((n-1,)) * -2/11
A = sp.diags([a_diag_0, a_diag_1, a_diag_2, a_diag_3, a_diag_4, a_diag_5],
[2, 1,0,-1,-2,-3], shape=(n-1,n-1)).asformat('csc')
b_diag = np.ones((n-1,))*6/11
b_diag[0] = 1
b_diag[1] = 1
B = sp.diags([b_diag],[0])
# C is the base Jacobian matrix
C = sp.linalg.inv(A).dot(B)
# we need to offset the entire thing by one row (because the first quantity Q1 is given as an initial condition)
# and one column (because we do not make use of the initial derivative dQdt1, as this is a stiff method)
# this is the same as saying that Bv = 0
C = C.asformat('csr')
indices = C.nonzero()
# the main lower triangular-ish matrix:
tri_mat = sp.csc_matrix((C.data, (indices[0]+1, indices[1]+1)))
# we need to create a dense matrix of the last row repeated n times for multi-subinterval problems
last_row = tri_mat.getrow(-1).toarray()
# but we need it in sparse format for openMDAO
repeat_mat = sp.csc_matrix(np.tile(last_row, n).reshape(n,n))
return tri_mat, repeat_mat
def simpson_cache_matrix(n):
# Simpsons rule defines the "deltas" between each segment as [B] dqdt as follows
# B is n-1 rows by n columns
# the structure of this is (1/12) * the following:
# 5 8 -1
# -1 8 5
# 5 8 -1
# -1 8 5
# 5 8 -1
# -1 8 5 and so on
# the row indices are basically 0 0 0 1 1 1 2 2 2 ....
jacmat_rowidx = np.repeat(np.arange((n-1)), 3)
# the column indices are 0 1 2 0 1 2 2 3 4 2 3 4 4 5 6 and so on
# so superimpose a 0 1 2 repeating pattern on a 0 0 0 0 0 0 2 2 2 2 2 2 2 repeating pattern
jacmat_colidx = np.repeat(np.arange(0, (n-1), 2), 6) + np.tile(np.arange(3), (n-1))
jacmat_data = np.tile(np.array([5, 8, -1, -1, 8, 5]) / 12, (n-1) // 2)
jacmat_base = sp.csr_matrix((jacmat_data, (jacmat_rowidx, jacmat_colidx)))
b = jacmat_base[:,1:]
bv = jacmat_base[:,0]
a = sp.diags([-1, 1],[-1, 0],shape=(n-1,n-1)).asformat('csc')
ia = sp.linalg.inv(a)
c = ia.dot(b)
cv = ia.dot(bv)
first_row_zeros = sp.csr_matrix(np.zeros((1,n-1)))
tri_mat = sp.bmat([[None, first_row_zeros],[cv, c]])
# we need to create a dense matrix of the last row repeated n times for multi-subinterval problems
last_row = tri_mat.getrow(-1).toarray()
# but we need it in sparse format for openMDAO
repeat_mat = sp.csc_matrix(np.tile(last_row, n).reshape(n,n))
return tri_mat, repeat_mat
def multistep_integrator(q0, dqdt, dts, tri_mat, repeat_mat, segment_names=None, segments_to_count=None, partials=True):
"""
This implements the base block Jacobian of the BDF3 method.
BDF3 is third order accurate and suitable for stiff systems.
A central-difference approximation and BDF2 are used for the first couple of points,
so strictly speaking this method is only second order accurate.
"""
n = int(len(dqdt) / len(dts))
n_segments = len(dts)
row_list = []
for i in range(n_segments):
col_list = []
for j in range(n_segments):
dt = dts[j]
count_col = True
if segment_names is not None and segments_to_count is not None:
if segment_names[j] not in segments_to_count:
# skip col IFF not counting this segment
count_col = False
if i > j and count_col:
# repeat mat
col_list.append(repeat_mat*dt)
elif i == j and count_col:
# diagonal
col_list.append(tri_mat*dt)
else:
col_list.append(sp.csr_matrix(([],([],[])),shape=(n,n)))
row_list.append(col_list)
dQdqdt = sp.bmat(row_list).asformat('csr')
if not partials:
Q = dQdqdt.dot(dqdt) + q0
return Q
# compute dQ / d dt for each segment
dt_partials_list = []
for j in range(n_segments):
count_col = True
if segment_names is not None and segments_to_count is not None:
if segment_names[j] not in segments_to_count:
# skip col IFF not counting this segment
count_col = False
#jth segment
row_list = []
for i in range(n_segments):
# ith row
if i > j and count_col:
row_list.append([repeat_mat])
elif i == j and count_col:
row_list.append([tri_mat])
else:
row_list.append([sp.csr_matrix(([],([],[])),shape=(n,n))])
dQddt = sp.bmat(row_list).dot(dqdt[j*n:(j+1)*n])
dt_partials_list.append(sp.csr_matrix(dQddt).transpose())
return dQdqdt, dt_partials_list
# def three_point_lagrange_integration(dqdt, dts, num_segments=1, num_intervals=2,):
# """This method integrates a rate over time using a 3 point Lagrange interpolant
# Similar to Simpson's rule except extended to provide increments at every subinterval
# The number of points per segment nn_seg = (2 * num_intervals + 1)
# The total number of points is nn_tot = nn_seg * num_segments
# Inputs
# ------
# dqdt : float
# The rate dqdt to integrate into quantity q (vector, length nn_tot)
# dts : list
# A list of timesteps dt corresponding to each interval (length num_intervals)
# num_segments : int
# The number of segments to integrate with different dts
# num_intervals : int
# The number of Simpson / 3 point quadrature intervals per segment
# Returns
# -------
# delta_q : float
# Amount of q accumulated during each interval (vector, length num_segments * (nn_seg - 1)
# partials_wrt_dqdt : float
# The Jacobian of delta_q with respect to the input rate dqdt
# The result is a sparse matrix with num_segments * (nn_seg - 1) rows and nn_tot columns
# partials_wrt_dts : list
# A list of the Jacobians of delta_q with respect to the time steps
# There will be one sparse vector with num_segments * (nn_seg - 1) rows per segment
# But only (nn_seg - 1) rows will actually be populated
# """
# nn_seg = (2 * num_intervals + 1)
# ndelta_seg = 2 * num_intervals
# nn_tot = nn_seg * num_segments
# ndelta_tot = ndelta_seg * num_segments
# if len(dqdt) != nn_tot:
# raise ValueError('dqdt must be of the correct length. dqdt is of length ' + str(len(dqdt)) +
# ' the number of nodes should be' + str(nn_tot))
# if len(dts) != num_segments:
# raise ValueError('must provide same number of dts as segments')
# # first let us construct the basic three point quadrature jacobian which will be
# # multiplied by the timesteps to obtain the block matrices for the overall jacobian
# # the structure of this is (1/12) * the following:
# # 5 8 -1
# # -1 8 5
# # 5 8 -1
# # -1 8 5
# # 5 8 -1
# # -1 8 5 and so on
# # the row indices are basically 0 0 0 1 1 1 2 2 2 ....
# jacmat_rowidx = np.repeat(np.arange(ndelta_seg), 3)
# # the column indices are 0 1 2 0 1 2 2 3 4 2 3 4 4 5 6 and so on
# # so superimpose a 0 1 2 repeating pattern on a 0 0 0 0 0 0 2 2 2 2 2 2 2 repeating pattern
# jacmat_colidx = np.repeat(np.arange(0, ndelta_seg, 2), 6) + np.tile(np.arange(3), ndelta_seg)
# jacmat_data = np.tile(np.array([5, 8, -1, -1, 8, 5]) / 12, ndelta_seg // 2)
# jacmat_base = sp.csr_matrix((jacmat_data, (jacmat_rowidx, jacmat_colidx)))
# jacmats_list = []
# partials_wrt_dts = []
# for i_seg in range(num_segments):
# jacmats_list.append(jacmat_base * dts[i_seg])
# # get the vector of partials of q with respect to this time step
# dt_partials = jacmat_base.dot(dqdt[i_seg * nn_seg: (i_seg + 1) * nn_seg])
# # offset the sparse partials if not the first segment to make it work in OpenMDAO terms
# dt_partials_rowidxs = np.arange(i_seg * ndelta_seg, (i_seg + 1) * ndelta_seg)
# dt_partials_colidxs = np.zeros((ndelta_seg,), dtype=np.int32)
# partials_wrt_dts.append(sp.csr_matrix((dt_partials,
# (dt_partials_rowidxs, dt_partials_colidxs)),
# shape=(ndelta_tot, nn_tot)))
# # now assemble the overall sparse block diagonal matrix to obtain the final result
# partials_wrt_dqdt = sp.block_diag(jacmats_list)
# delta_q = partials_wrt_dqdt.dot(dqdt)
# return delta_q, partials_wrt_dqdt, partials_wrt_dts
# def trapezoid_integration(dqdt, dts, num_segments=1, num_intervals=2,):
# """This method integrates a rate over time using a 2 point Trapezoid rule
# For now this component is written to be interoperable with Simpson's rule,
# but the concept of subintervals is not strictly necessary.
# The number of points per segment nn_seg = (2 * num_intervals + 1)
# The total number of points is nn_tot = nn_seg * num_segments
# Inputs
# ------
# dqdt : float
# The rate dqdt to integrate into quantity q (vector, length nn_tot)
# dts : list
# A list of timesteps dt corresponding to each interval (length num_intervals)
# num_segments : int
# The number of segments to integrate with different dts
# num_intervals : int
# The number of Simpson / 3 point quadrature intervals per segment
# Returns
# -------
# delta_q : float
# Amount of q accumulated during each interval (vector, length num_segments * (nn_seg - 1)
# partials_wrt_dqdt : float
# The Jacobian of delta_q with respect to the input rate dqdt
# The result is a sparse matrix with num_segments * (nn_seg - 1) rows and nn_tot columns
# partials_wrt_dts : list
# A list of the Jacobians of delta_q with respect to the time steps
# There will be one sparse vector with num_segments * (nn_seg - 1) rows per segment
# But only (nn_seg - 1) rows will actually be populated
# """
# nn_seg = (2 * num_intervals + 1)
# ndelta_seg = 2 * num_intervals
# nn_tot = nn_seg * num_segments
# ndelta_tot = ndelta_seg * num_segments
# if len(dqdt) != nn_tot:
# raise ValueError('dqdt must be of the correct length. dqdt is of length ' + str(len(dqdt)) +
# ' the number of nodes should be' + str(nn_tot))
# if len(dts) != num_segments:
# raise ValueError('must provide same number of dts as segments')
# # the structure of this is (1/2) * the following:
# # 1 1
# # 1 1
# # 1 1 and so on
# # the row indices are basically 0 0 1 1 2 2 ....
# jacmat_rowidx = np.repeat(np.arange(ndelta_seg), 2)
# # the column indices are 0 1 1 2 2 3 3 4....
# # so superimpose a 0 1 repeating pattern on a 0 0 1 1 2 2 repeating pattern
# jacmat_colidx = np.tile(np.arange(2), ndelta_seg) + np.repeat(np.arange(0, ndelta_seg, 1), 2)
# jacmat_data = np.tile(np.array([1, 1]) / 2, ndelta_seg)
# jacmat_base = sp.csr_matrix((jacmat_data, (jacmat_rowidx, jacmat_colidx)))
# jacmats_list = []
# partials_wrt_dts = []
# for i_seg in range(num_segments):
# jacmats_list.append(jacmat_base * dts[i_seg])
# # get the vector of partials of q with respect to this time step
# dt_partials = jacmat_base.dot(dqdt[i_seg * nn_seg: (i_seg + 1) * nn_seg])
# # offset the sparse partials if not the first segment to make it work in OpenMDAO terms
# dt_partials_rowidxs = np.arange(i_seg * ndelta_seg, (i_seg + 1) * ndelta_seg)
# dt_partials_colidxs = np.zeros((ndelta_seg,), dtype=np.int32)
# partials_wrt_dts.append(sp.csr_matrix((dt_partials,
# (dt_partials_rowidxs, dt_partials_colidxs)),
# shape=(ndelta_tot, nn_tot)))
# # now assemble the overall sparse block diagonal matrix to obtain the final result
# partials_wrt_dqdt = sp.block_diag(jacmats_list)
# delta_q = partials_wrt_dqdt.dot(dqdt)
# return delta_q, partials_wrt_dqdt, partials_wrt_dts
# def backward_euler(dqdt, dts, num_segments=1, num_intervals=2,):
# """This method integrates a rate over time using a backward Euler method
# For now this component is written to be interoperable with Simpson's rule,
# but the concept of subintervals is not strictly necessary.
# The number of points per segment nn_seg = (2 * num_intervals + 1)
# The total number of points is nn_tot = nn_seg * num_segments
# Inputs
# ------
# dqdt : float
# The rate dqdt to integrate into quantity q (vector, length nn_tot)
# dts : list
# A list of timesteps dt corresponding to each interval (length num_intervals)
# num_segments : int
# The number of segments to integrate with different dts
# num_intervals : int
# The number of Simpson / 3 point quadrature intervals per segment
# Returns
# -------
# delta_q : float
# Amount of q accumulated during each interval (vector, length num_segments * (nn_seg - 1)
# partials_wrt_dqdt : float
# The Jacobian of delta_q with respect to the input rate dqdt
# The result is a sparse matrix with num_segments * (nn_seg - 1) rows and nn_tot columns
# partials_wrt_dts : list
# A list of the Jacobians of delta_q with respect to the time steps
# There will be one sparse vector with num_segments * (nn_seg - 1) rows per segment
# But only (nn_seg - 1) rows will actually be populated
# """
# nn_seg = (2 * num_intervals + 1)
# ndelta_seg = 2 * num_intervals
# nn_tot = nn_seg * num_segments
# ndelta_tot = ndelta_seg * num_segments
# if len(dqdt) != nn_tot:
# raise ValueError('dqdt must be of the correct length. dqdt is of length ' + str(len(dqdt)) +
# ' the number of nodes should be' + str(nn_tot))
# if len(dts) != num_segments:
# raise ValueError('must provide same number of dts as segments')
# # the structure of this is the following:
# # 0 1
# # 0 0 1
# # 0 0 0 1 1 and so on
# # the row indices are 0, 1, 2 ... n_delta seg
# jacmat_rowidx = np.arange(ndelta_seg)
# # the column indices are 1, 2, 3, ....
# jacmat_colidx = np.arange(1, ndelta_seg + 1, 1)
# jacmat_data = np.tile(np.array([1]), ndelta_seg)
# jacmat_base = sp.csr_matrix((jacmat_data, (jacmat_rowidx, jacmat_colidx)))
# jacmats_list = []
# partials_wrt_dts = []
# for i_seg in range(num_segments):
# jacmats_list.append(jacmat_base * dts[i_seg])
# # get the vector of partials of q with respect to this time step
# dt_partials = jacmat_base.dot(dqdt[i_seg * nn_seg: (i_seg + 1) * nn_seg])
# # offset the sparse partials if not the first segment to make it work in OpenMDAO terms
# dt_partials_rowidxs = np.arange(i_seg * ndelta_seg, (i_seg + 1) * ndelta_seg)
# dt_partials_colidxs = np.zeros((ndelta_seg,), dtype=np.int32)
# partials_wrt_dts.append(sp.csr_matrix((dt_partials,
# (dt_partials_rowidxs, dt_partials_colidxs)),
# shape=(ndelta_tot, nn_tot)))
# # now assemble the overall sparse block diagonal matrix to obtain the final result
# partials_wrt_dqdt = sp.block_diag(jacmats_list)
# delta_q = partials_wrt_dqdt.dot(dqdt)
# return delta_q, partials_wrt_dqdt, partials_wrt_dts
# def integrator_partials_wrt_deltas(num_segments, num_intervals):
# """
# This function computes partials of an integrated quantity with respect to the "delta quantity per interval"
# in the context of openConcept's Simpson's rule approximated integration technique.
# Inputs
# ------
# num_segments : float
# Number of mission segments to integrate (scalar)
# num_intervals : float
# Number of Simpson intervals per segment (scalar)
# Outputs
# -------
# partial_q_wrt_deltas : float
# A sparse (CSR) matrix representation of the partial derivatives of q
# with respect to the delta quantity per half-interval
# Dimension is nn * num_segments (rows) by (nn -1) * num_segments (cols)
# where nn = (2 * num_intervals + 1)
# """
# nn = num_intervals * 2 + 1
# # the basic structure of the jacobian is lower triangular (all late values depend on all early ones)
# jacmat = np.tril(np.ones((num_segments*(nn-1),num_segments*(nn-1))))
# # the first entry of q has no dependence on the deltas so insert a row of zeros
# jacmat = np.insert(jacmat,0,np.zeros(num_segments*(nn-1)),axis=0)
# for i in range(1,num_segments):
# # since the end of each segment is equal to the beginning of the next
# # duplicate the jacobian row once at the end of each segment
# duplicate_row = jacmat[nn*i-1,:]
# jacmat = np.insert(jacmat,nn*i,duplicate_row,axis=0)
# partials_q_wrt_deltas = sp.csr_matrix(jacmat)
# return partials_q_wrt_deltas
class Integrator(ExplicitComponent):
"""
Integrates rate variables implicitly.
Add new integrated quantities by using the add_integrand method.
"q" inputs here are illustrative only.
Inputs
------
duration : float
The duration of the integration interval (can also use dt) (scalar)
dq_dt : float
Rate to integrate (vector)
q_initial : float
Starting value of quantity (scalar)
Outputs
-------
q : float
The vector quantity corresponding integral of dqdt over time
Will have units 'rate_units' / 'diff_units'
q_final : float
The final value of the vector (scalar)
Useful for connecting the end of one integrator to beginning of another
Options
-------
num_nodes : int
num_nodes = 2N + 1 where N = num_intervals
The total length of the vector q is 2N + 1
diff_units : str
The units of the integrand (none by default)
method : str
Numerical method (default 'bdf3'; alternatively, 'simpson)
time_setup : str
Time configuration (default 'dt')
'dt' creates input 'dt'
'duration' creates input 'duration'
'bounds' creates inputs 't_initial', 't_final'
"""
def __init__(self, **kwargs):
super(Integrator, self).__init__(**kwargs)
self._state_vars = {}
num_nodes = self.options['num_nodes']
method = self.options['method']
# check to make sure num nodes is OK
if (num_nodes - 1) % 2 > 0:
raise ValueError('num_nodes is ' +str(num_nodes) + ' and must be odd')
if num_nodes > 1:
if method == 'bdf3':
self.tri_mat, self.repeat_mat = bdf3_cache_matrix(num_nodes)
elif method == 'simpson':
self.tri_mat, self.repeat_mat = simpson_cache_matrix(num_nodes)
def initialize(self):
self.options.declare('diff_units',default=None, desc="Units of the differential")
self.options.declare('num_nodes',default=11, desc="Analysis points per segment")
self.options.declare('method',default='bdf3', desc="Numerical method to use.")
self.options.declare('time_setup',default='dt')
def add_integrand(self, name, rate_name=None, start_name=None, end_name=None, val=0.0, start_val=0.0,
units=None, rate_units=None, zero_start=False, final_only=False, lower=-1e30, upper=1e30):
"""
Add a new integrated variable q = integrate(dqdt) + q0
This will add an output with the integrated quantity, an output with the final value,
an input with the rate source, and an input for the initial quantity.
Parameters
----------
name : str
The name of the integrated variable to be created.
rate_name : str
The name of the input rate (default name"_rate")
start_name : str
The name of the initial value input (default value name"_initial")
end_name : str
The name of the end value output (default value name"_final")
units : str or None
Units for the integrated quantity (or inferred automatically from rate_units)
rate_units : str or None
Units of the rate (can be inferred automatically from units)
zero_start : bool
If true, eliminates start value input and always begins from zero (default False)
final_only : bool
If true, only integrates final quantity, not all the intermediate points (default False)
val : float
Default value for the integrated output (default 0.0)
Can be scalar or shape num_nodes
upper : float
Upper bound on integrated quantity
lower : float
Lower bound on integrated quantity
"""
num_nodes = self.options['num_nodes']
diff_units = self.options['diff_units']
time_setup = self.options['time_setup']
if units and rate_units:
raise ValueError('Specify either quantity units or rate units, but not both')
if units:
# infer rate units from diff units and quantity units
if not diff_units:
rate_units = units
warnings.warn('You have specified a integral with respect to a unitless integrand. Be aware of this.')
else:
rate_units = '('+units+') / (' + diff_units +')'
elif rate_units:
# infer quantity units from rate units and diff units
if not diff_units:
units = rate_units
warnings.warn('You have specified a integral with respect to a unitless integrand. Be aware of this.')
else:
units = '('+rate_units+') * (' + diff_units + ')'
elif diff_units:
# neither quantity nor rate units specified
rate_units = '(' + diff_units +')** -1'
if not rate_name:
rate_name = name + '_rate'
if not start_name:
start_name = name + '_initial'
if not end_name:
end_name = name + '_final'
options = {'name': name,
'rate_name': rate_name,
'start_name': start_name,
'start_val': start_val,
'end_name': end_name,
'units': units,
'rate_units': rate_units,
'zero_start': zero_start,
'final_only': final_only,
'upper': upper,
'lower': lower}
# TODO maybe later can pass kwargs
self._state_vars[name] = options
if not hasattr(val, '__len__'):
# scalar
default_final_val = val
else:
# vector
default_final_val = val[-1]
self.add_input(rate_name, val=0.0, shape=(num_nodes), units=rate_units)
self.add_output(end_name, units=units, val=default_final_val, upper=options['upper'],lower=options['lower'])
if not final_only:
self.add_output(name, shape=(num_nodes), val=val, units=units, upper=options['upper'],lower=options['lower'])
if not zero_start:
self.add_input(start_name, val=start_val, units=units)
if not final_only:
self.declare_partials([name], [start_name], rows=np.arange(num_nodes), cols=np.zeros((num_nodes,)), val=np.ones((num_nodes,)))
self.declare_partials([end_name], [start_name], val=1)
# set up sparse partial structure
if num_nodes > 1:
# single point analysis has no dqdt dependency since the outputs are equal to the inputs
dQdrate, dQddtlist = multistep_integrator(0, np.ones((num_nodes,)), np.ones((1,)), self.tri_mat, self.repeat_mat,
segment_names=None, segments_to_count=None, partials=True)
dQdrate_indices = dQdrate.nonzero()
dQfdrate_indices = dQdrate.getrow(-1).nonzero()
if not final_only:
self.declare_partials([name], [rate_name], rows=dQdrate_indices[0], cols=dQdrate_indices[1])
self.declare_partials([end_name], [rate_name], rows=dQfdrate_indices[0], cols=dQfdrate_indices[1]) # rows are zeros
dQddt_seg = dQddtlist[0]
dQddt_indices = dQddt_seg.nonzero()
dQfddt_indices = dQddt_seg.getrow(-1).nonzero()
if time_setup == 'dt':
if not final_only:
self.declare_partials([name], ['dt'], rows=dQddt_indices[0], cols=dQddt_indices[1])
self.declare_partials([end_name], ['dt'], rows=dQfddt_indices[0], cols=dQfddt_indices[1])
elif time_setup == 'duration':
if not final_only:
self.declare_partials([name], ['duration'], rows=dQddt_indices[0], cols=dQddt_indices[1])
self.declare_partials([end_name], ['duration'], rows=dQfddt_indices[0], cols=dQfddt_indices[1])
elif time_setup == 'bounds':
if not final_only:
self.declare_partials([name], ['t_initial','t_final'], rows=dQddt_indices[0], cols=dQddt_indices[1])
self.declare_partials([end_name], ['t_initial','t_final'], rows=dQfddt_indices[0], cols=dQfddt_indices[1])
else:
raise ValueError('Only dt, duration, and bounds are allowable values of time_setup')
def setup(self):
diff_units = self.options['diff_units']
num_nodes = self.options['num_nodes']
method = self.options['method']
time_setup = self.options['time_setup']
# branch logic here for the corner case of 0 segments
# so point analysis can be run without breaking everything
if num_nodes == 1:
single_point = True
else:
single_point = False
if not single_point:
if method == 'bdf3':
self.tri_mat, self.repeat_mat = bdf3_cache_matrix(num_nodes)
elif method == 'simpson':
self.tri_mat, self.repeat_mat = simpson_cache_matrix(num_nodes)
if time_setup == 'dt':
self.add_input('dt', units=diff_units, desc='Time step')
elif time_setup == 'duration':
self.add_input('duration', units=diff_units, desc='Time duration')
elif time_setup == 'bounds':
self.add_input('t_initial', units=diff_units, desc='Initial time')
self.add_input('t_final', units=diff_units, desc='Initial time')
else:
raise ValueError('Only dt, duration, and bounds are allowable values of time_setup')
def compute(self, inputs, outputs):
num_nodes = self.options['num_nodes']
time_setup=self.options['time_setup']
if num_nodes == 1:
single_point = True
else:
single_point = False
if time_setup == 'dt':
dts = [inputs['dt'][0]]
elif time_setup == 'duration':
if num_nodes == 1:
dts = [inputs['duration'][0]]
else:
dts = [inputs['duration'][0]/(num_nodes-1)]
elif time_setup == 'bounds':
delta_t = inputs['t_final'] - inputs['t_initial']
dts = [delta_t[0]/(num_nodes-1)]
for name, options in self._state_vars.items():
if options['zero_start']:
q0 = np.array([0.0])
else:
q0 = inputs[options['start_name']]
if not single_point:
Q = multistep_integrator(q0, inputs[options['rate_name']], dts, self.tri_mat, self.repeat_mat,
segment_names=None, segments_to_count=None, partials=False)
else:
# single point case, no change, no dependence on time
Q = q0
if not options['final_only']:
outputs[options['name']] = Q
outputs[options['end_name']] = Q[-1]
def compute_partials(self, inputs, J):
num_nodes = self.options['num_nodes']
time_setup = self.options['time_setup']
if num_nodes == 1:
single_point = True
else:
single_point = False
if not single_point:
if time_setup == 'dt':
dts = [inputs['dt'][0]]
elif time_setup == 'duration':
dts = [inputs['duration'][0]/(num_nodes-1)]
elif time_setup == 'bounds':
delta_t = inputs['t_final'] - inputs['t_initial']
dts = [delta_t[0]/(num_nodes-1)]
for name, options in self._state_vars.items():
start_name = options['start_name']
end_name = options['end_name']
qty_name = options['name']
rate_name = options['rate_name']
final_only = options['final_only']
if options['zero_start']:
q0 = 0
else:
q0 = inputs[start_name]
dQdrate, dQddtlist = multistep_integrator(q0, inputs[rate_name], dts, self.tri_mat, self.repeat_mat,
segment_names=None, segments_to_count=None, partials=True)
if not final_only:
J[qty_name, rate_name] = dQdrate.data
J[end_name, rate_name] = dQdrate.getrow(-1).data
if time_setup == 'dt':
if not final_only:
J[qty_name, 'dt'] = np.squeeze(dQddtlist[0].toarray()[1:])
J[end_name, 'dt'] = np.squeeze(dQddtlist[0].getrow(-1).toarray())
elif time_setup == 'duration':
if not final_only:
J[qty_name, 'duration'] = np.squeeze(dQddtlist[0].toarray()[1:] / (num_nodes - 1))
J[end_name, 'duration'] = np.squeeze(dQddtlist[0].getrow(-1).toarray() / (num_nodes - 1))
elif time_setup == 'bounds':
if not final_only:
if len(dQddtlist[0].data) == 0:
J[qty_name, 't_initial'] = np.zeros(J[qty_name, 't_initial'].shape)
J[qty_name, 't_final'] = np.zeros(J[qty_name, 't_final'].shape)
else:
J[qty_name, 't_initial'] = -dQddtlist[0].data / (num_nodes - 1)
J[qty_name, 't_final'] = dQddtlist[0].data / (num_nodes - 1)
if len(dQddtlist[0].getrow(-1).data) == 0:
J[end_name, 't_initial'] = 0
J[end_name, 't_final'] = 0
else:
J[end_name, 't_initial'] = -dQddtlist[0].getrow(-1).data / (num_nodes - 1)
J[end_name, 't_final'] = dQddtlist[0].getrow(-1).data / (num_nodes - 1)
class OldIntegrator(ExplicitComponent):
"""
This component integrates a vector using a BDF3 formulation
with 2nd order startup.
Inputs
------
dqdt : float
The vector quantity to integrate.
Length of the vector = (2 * num_intervals + 1) * num_segments
segment|dt : float
The timestep of "segment" (scalar)
1 per segment
q_initial : float
Starting value of the quantity (scalar)
Outputs
-------
q : float
The vector quantity corresponding integral of dqdt over time
Will have units 'rate_units' / 'diff_units'
q_final : float
The final value of the vector (scalar)
Useful for connecting the end of one integrator to beginning of another
Options
-------
segment_names : list
A list of str with the names of the individual segments
By default, if no segment_names are provided, one segment will be assumed and segment|dt will just be named "dt"
segments_to_count : list
A list of str with the names of segments to be included in the integration.
By default, ALL segments will be included.
num_nodes : int
num_nodes = 2N + 1 where N = num_intervals
The total length of the vector q is n_segments x (2N + 1)
quantity_units : str
The units of quantity being integrated (not the rate)
diff_units : str
The units of the integrand (none by default)
rate_units : str
The units of the rate being integrated
method : str
Numerical method (default 'bdf3'; alternatively, 'simpson)
zero_start : bool
If True, disables q_initial input (default False)
final_only : bool
If True, disables q output (q_final only) (default False)
time_setup : str
Time configuration (default 'dt')
'dt' creates input 'dt'
'duration' creates input 'duration'
'bounds' creates inputs 't_initial', 't_final'
"""
def initialize(self):
self.options.declare('segment_names', default=None, desc="Names of differentiation segments")
self.options.declare('segments_to_count', default=None, desc="Names of differentiation segments")
self.options.declare('quantity_units',default=None, desc="Units of the quantity being differentiated")
self.options.declare('diff_units',default=None, desc="Units of the differential")
self.options.declare('rate_units',default=None, desc="Units of the rate being integrated")
self.options.declare('num_nodes',default=11, desc="Analysis points per segment")
self.options.declare('method',default='bdf3', desc="Numerical method to use.")
self.options.declare('zero_start',default=False)
self.options.declare('final_only',default=False)
self.options.declare('lower',default=-1e30)
self.options.declare('upper',default=1e30)
self.options.declare('time_setup',default='dt')
def setup(self):
segment_names = self.options['segment_names']
segments_to_count = self.options['segments_to_count']
quantity_units = self.options['quantity_units']
diff_units = self.options['diff_units']
num_nodes = self.options['num_nodes']
method = self.options['method']
zero_start = self.options['zero_start']
final_only = self.options['final_only']
time_setup = self.options['time_setup']
# check to make sure num nodes is OK
if (num_nodes - 1) % 2 > 0:
raise ValueError('num_nodes must be odd')
# branch logic here for the corner case of 0 segments
# so point analysis can be run without breaking everything
if num_nodes == 1:
single_point = True
else:
single_point = False
if not single_point:
if method == 'bdf3':
self.tri_mat, self.repeat_mat = bdf3_cache_matrix(num_nodes)
elif method == 'simpson':
self.tri_mat, self.repeat_mat = simpson_cache_matrix(num_nodes)
if segment_names is None:
n_segments = 1
else:
n_segments = len(segment_names)
nn_tot = num_nodes * n_segments
# TODO enable specifying rate units
if quantity_units is None and diff_units is None:
rate_units = None
elif quantity_units is None:
rate_units = '(' + diff_units +')** -1'
elif diff_units is None:
rate_units = quantity_units
warnings.warn('You have specified a integral with respect to a unitless integrand. Be aware of this.')
else:
rate_units = '('+quantity_units+') / (' + diff_units +')'
# the output of this function is of length nn - 1. NO partial for first row (initial value)
# get the partials of the delta quantities WRT the rates dDelta / drate
self.add_input('dqdt', val=0, units=rate_units, desc='Quantity to integrate',shape=(nn_tot,))
self.add_output('q_final', units=quantity_units, desc='Final value of q',upper=self.options['upper'],lower=self.options['lower'])
if not final_only:
self.add_output('q', units=quantity_units, desc='Integral of dqdt', shape=(nn_tot,),upper=self.options['upper'],lower=self.options['lower'])
if not zero_start:
self.add_input('q_initial', val=0, units=quantity_units, desc='Initial value')
if not final_only:
self.declare_partials(['q'], ['q_initial'], rows=np.arange(nn_tot), cols=np.zeros((nn_tot,)), val=np.ones((nn_tot,)))
self.declare_partials(['q_final'], ['q_initial'], val=1)
if not single_point:
# single point analysis has no dqdt dependency since the outputs are equal to the inputs
dQdrate, dQddtlist = multistep_integrator(0, np.ones((nn_tot,)), np.ones((n_segments,)), self.tri_mat, self.repeat_mat,
segment_names=segment_names, segments_to_count=segments_to_count, partials=True)
dQdrate_indices = dQdrate.nonzero()
dQfdrate_indices = dQdrate.getrow(-1).nonzero()
if not final_only:
self.declare_partials(['q'], ['dqdt'], rows=dQdrate_indices[0], cols=dQdrate_indices[1])
self.declare_partials(['q_final'], ['dqdt'], rows=dQfdrate_indices[0], cols=dQfdrate_indices[1]) # rows are zeros
if segment_names is None:
dQddt_seg = dQddtlist[0]
dQddt_indices = dQddt_seg.nonzero()
dQfddt_indices = dQddt_seg.getrow(-1).nonzero()
if time_setup == 'dt':
self.add_input('dt', units=diff_units, desc='Time step')
if not final_only:
self.declare_partials(['q'], ['dt'], rows=dQddt_indices[0], cols=dQddt_indices[1])
self.declare_partials(['q_final'], ['dt'], rows=dQfddt_indices[0], cols=dQfddt_indices[1])
elif time_setup == 'duration':
self.add_input('duration', units=diff_units, desc='Time duration')
if not final_only:
self.declare_partials(['q'], ['duration'], rows=dQddt_indices[0], cols=dQddt_indices[1])
self.declare_partials(['q_final'], ['duration'], rows=dQfddt_indices[0], cols=dQfddt_indices[1])
elif time_setup == 'bounds':
self.add_input('t_initial', units=diff_units, desc='Initial time')
self.add_input('t_final', units=diff_units, desc='Initial time')
if not final_only:
self.declare_partials(['q'], ['t_initial','t_final'], rows=dQddt_indices[0], cols=dQddt_indices[1])
self.declare_partials(['q_final'], ['t_initial','t_final'], rows=dQfddt_indices[0], cols=dQfddt_indices[1])
else:
raise ValueError('Only dt, duration, and bounds are allowable values of time_setup')
else:
if time_setup != 'dt':
raise ValueError('dt is the only time_setup supported for multisegment integrations')
for i_seg, segment_name in enumerate(segment_names):
self.add_input(segment_name +'|dt', units=diff_units, desc='Time step')
dQddt_seg = dQddtlist[i_seg]
dQddt_indices = dQddt_seg.nonzero()
dQfddt_indices = dQddt_seg.getrow(-1).nonzero()
if not final_only:
self.declare_partials(['q'], [segment_name +'|dt'], rows=dQddt_indices[0], cols=dQddt_indices[1])
self.declare_partials(['q_final'], [segment_name +'|dt'], rows=dQfddt_indices[0], cols=dQfddt_indices[1])
else:
if time_setup == 'dt':
self.add_input('dt', units=diff_units, desc='Time step')
elif time_setup == 'duration':
self.add_input('duration', units=diff_units, desc='Time duration')
elif time_setup == 'bounds':
self.add_input('t_initial', units=diff_units, desc='Initial time')
self.add_input('t_final', units=diff_units, desc='Initial time')
else:
raise ValueError('Only dt, duration, and bounds are allowable values of time_setup')
def compute(self, inputs, outputs):
segment_names = self.options['segment_names']
num_nodes = self.options['num_nodes']
segments_to_count = self.options['segments_to_count']
zero_start = self.options['zero_start']
final_only = self.options['final_only']
time_setup=self.options['time_setup']
if num_nodes == 1:
single_point = True
else:
single_point = False
if segment_names is None:
n_segments = 1
if time_setup == 'dt':
dts = [inputs['dt'][0]]
elif time_setup == 'duration':
if num_nodes == 1:
dts = [inputs['duration'][0]]
else:
dts = [inputs['duration'][0]/(num_nodes-1)]
elif time_setup == 'bounds':
delta_t = inputs['t_final'] - inputs['t_initial']
dts = [delta_t[0]/(num_nodes-1)]
else:
n_segments = len(segment_names)
dts = []
for i_seg, segment_name in enumerate(segment_names):
input_name = segment_name+'|dt'
dts.append(inputs[input_name][0])
if zero_start:
q0 = 0
else:
q0 = inputs['q_initial']
if not single_point:
Q = multistep_integrator(q0, inputs['dqdt'], dts, self.tri_mat, self.repeat_mat,
segment_names=segment_names, segments_to_count=segments_to_count, partials=False)
else:
# single point case, no change, no dependence on time
Q = q0
if not final_only:
outputs['q'] = Q
outputs['q_final'] = Q[-1]
def compute_partials(self, inputs, J):
segment_names = self.options['segment_names']
quantity_units = self.options['quantity_units']
diff_units = self.options['diff_units']
num_nodes = self.options['num_nodes']
segments_to_count = self.options['segments_to_count']
zero_start = self.options['zero_start']
final_only = self.options['final_only']
time_setup = self.options['time_setup']
if num_nodes == 1:
single_point = True
else:
single_point = False
if not single_point:
if segment_names is None:
n_segments = 1
else:
n_segments = len(segment_names)
nn_tot = num_nodes * n_segments
if segment_names is None:
n_segments = 1
if time_setup == 'dt':
dts = [inputs['dt'][0]]
elif time_setup == 'duration':
dts = [inputs['duration'][0]/(num_nodes-1)]
elif time_setup == 'bounds':
delta_t = inputs['t_final'] - inputs['t_initial']
dts = [delta_t[0]/(num_nodes-1)]
else:
n_segments = len(segment_names)
dts = []
for i_seg, segment_name in enumerate(segment_names):
input_name = segment_name+'|dt'
dts.append(inputs[input_name][0])
if zero_start:
q0 = 0
else:
q0 = inputs['q_initial']
dQdrate, dQddtlist = multistep_integrator(q0, inputs['dqdt'], dts, self.tri_mat, self.repeat_mat,
segment_names=segment_names, segments_to_count=segments_to_count, partials=True)
if not final_only:
J['q','dqdt'] = dQdrate.data
J['q_final', 'dqdt'] = dQdrate.getrow(-1).data
if segment_names is None:
if time_setup == 'dt':
if not final_only:
# if len(dQddtlist[0].data) == 0:
# J['q','dt'] = np.zeros(J['q','dt'].shape)
# else:
# J['q','dt'] = dQddtlist[0].data
J['q','dt'] = np.squeeze(dQddtlist[0].toarray()[1:])
# if len(dQddtlist[0].getrow(-1).data) == 0:
# J['q_final','dt'] = 0
# else:
# J['q_final','dt'] = dQddtlist[0].getrow(-1).data
J['q_final','dt'] = np.squeeze(dQddtlist[0].getrow(-1).toarray())
elif time_setup == 'duration':
if not final_only:
# if len(dQddtlist[0].data) == 0:
# J['q','duration'] = np.zeros(J['q','duration'].shape)
# else:
# J['q','duration'] = dQddtlist[0].data / (num_nodes - 1)
J['q','duration'] = np.squeeze(dQddtlist[0].toarray()[1:] / (num_nodes - 1))
# if len(dQddtlist[0].getrow(-1).data) == 0:
# J['q_final','duration'] = 0
# else:
# J['q_final','duration'] = dQddtlist[0].getrow(-1).data / (num_nodes - 1)
J['q_final','duration'] = np.squeeze(dQddtlist[0].getrow(-1).toarray() / (num_nodes - 1))
elif time_setup == 'bounds':
if not final_only:
if len(dQddtlist[0].data) == 0:
J['q','t_initial'] = np.zeros(J['q','t_initial'].shape)
J['q','t_final'] = np.zeros(J['q','t_final'].shape)
else:
J['q','t_initial'] = -dQddtlist[0].data / (num_nodes - 1)
J['q','t_final'] = dQddtlist[0].data / (num_nodes - 1)
if len(dQddtlist[0].getrow(-1).data) == 0:
J['q_final','t_initial'] = 0
J['q_final','t_final'] = 0
else:
J['q_final','t_initial'] = -dQddtlist[0].getrow(-1).data / (num_nodes - 1)
J['q_final','t_final'] = dQddtlist[0].getrow(-1).data / (num_nodes - 1)
else:
for i_seg, segment_name in enumerate(segment_names):
if not final_only:
J['q',segment_name+'|dt'] = dQddtlist[i_seg].data
J['q_final',segment_name+'|dt'] = dQddtlist[i_seg].getrow(-1).data | [
"scipy.sparse.diags",
"numpy.zeros",
"numpy.ones",
"scipy.sparse.bmat",
"scipy.sparse.csc_matrix",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.inv",
"numpy.arange",
"numpy.array",
"numpy.tile",
"warnings.warn"
] | [((4649, 4672), 'scipy.sparse.diags', 'sp.diags', (['[b_diag]', '[0]'], {}), '([b_diag], [0])\n', (4657, 4672), True, 'import scipy.sparse as sp\n'), ((5121, 5178), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(C.data, (indices[0] + 1, indices[1] + 1))'], {}), '((C.data, (indices[0] + 1, indices[1] + 1)))\n', (5134, 5178), True, 'import scipy.sparse as sp\n'), ((6251, 6311), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(jacmat_data, (jacmat_rowidx, jacmat_colidx))'], {}), '((jacmat_data, (jacmat_rowidx, jacmat_colidx)))\n', (6264, 6311), True, 'import scipy.sparse as sp\n'), ((6441, 6457), 'scipy.sparse.linalg.inv', 'sp.linalg.inv', (['a'], {}), '(a)\n', (6454, 6457), True, 'import scipy.sparse as sp\n'), ((6565, 6608), 'scipy.sparse.bmat', 'sp.bmat', (['[[None, first_row_zeros], [cv, c]]'], {}), '([[None, first_row_zeros], [cv, c]])\n', (6572, 6608), True, 'import scipy.sparse as sp\n'), ((3405, 3423), 'numpy.zeros', 'np.zeros', (['(n - 1,)'], {}), '((n - 1,))\n', (3413, 3423), True, 'import numpy as np\n'), ((3468, 3485), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (3475, 3485), True, 'import numpy as np\n'), ((4031, 4049), 'numpy.zeros', 'np.zeros', (['(n - 1,)'], {}), '((n - 1,))\n', (4039, 4049), True, 'import numpy as np\n'), ((4094, 4112), 'numpy.zeros', 'np.zeros', (['(n - 1,)'], {}), '((n - 1,))\n', (4102, 4112), True, 'import numpy as np\n'), ((4180, 4197), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (4187, 4197), True, 'import numpy as np\n'), ((5884, 5900), 'numpy.arange', 'np.arange', (['(n - 1)'], {}), '(n - 1)\n', (5893, 5900), True, 'import numpy as np\n'), ((6532, 6552), 'numpy.zeros', 'np.zeros', (['(1, n - 1)'], {}), '((1, n - 1))\n', (6540, 6552), True, 'import numpy as np\n'), ((4716, 4732), 'scipy.sparse.linalg.inv', 'sp.linalg.inv', (['A'], {}), '(A)\n', (4729, 4732), True, 'import scipy.sparse as sp\n'), ((6100, 6122), 'numpy.arange', 'np.arange', (['(0)', '(n - 1)', '(2)'], {}), '(0, n - 1, 2)\n', (6109, 6122), True, 'import numpy as np\n'), ((6137, 6149), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (6146, 6149), True, 'import numpy as np\n'), ((6184, 6214), 'numpy.array', 'np.array', (['[5, 8, -1, -1, 8, 5]'], {}), '([5, 8, -1, -1, 8, 5])\n', (6192, 6214), True, 'import numpy as np\n'), ((6373, 6421), 'scipy.sparse.diags', 'sp.diags', (['[-1, 1]', '[-1, 0]'], {'shape': '(n - 1, n - 1)'}), '([-1, 1], [-1, 0], shape=(n - 1, n - 1))\n', (6381, 6421), True, 'import scipy.sparse as sp\n'), ((8147, 8164), 'scipy.sparse.bmat', 'sp.bmat', (['row_list'], {}), '(row_list)\n', (8154, 8164), True, 'import scipy.sparse as sp\n'), ((3552, 3569), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (3559, 3569), True, 'import numpy as np\n'), ((3623, 3640), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (3630, 3640), True, 'import numpy as np\n'), ((3665, 3682), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (3672, 3682), True, 'import numpy as np\n'), ((3701, 3807), 'scipy.sparse.diags', 'sp.diags', (['[a_diag_1, a_diag_2, a_diag_3, a_diag_4, a_diag_5]', '[1, 0, -1, -2, -3]'], {'shape': '(n - 1, n - 1)'}), '([a_diag_1, a_diag_2, a_diag_3, a_diag_4, a_diag_5], [1, 0, -1, -2,\n -3], shape=(n - 1, n - 1))\n', (3709, 3807), True, 'import scipy.sparse as sp\n'), ((3848, 3865), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (3855, 3865), True, 'import numpy as np\n'), ((4268, 4285), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (4275, 4285), True, 'import numpy as np\n'), ((4337, 4354), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (4344, 4354), True, 'import numpy as np\n'), ((4379, 4396), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (4386, 4396), True, 'import numpy as np\n'), ((4415, 4535), 'scipy.sparse.diags', 'sp.diags', (['[a_diag_0, a_diag_1, a_diag_2, a_diag_3, a_diag_4, a_diag_5]', '[2, 1, 0, -1, -2, -3]'], {'shape': '(n - 1, n - 1)'}), '([a_diag_0, a_diag_1, a_diag_2, a_diag_3, a_diag_4, a_diag_5], [2, \n 1, 0, -1, -2, -3], shape=(n - 1, n - 1))\n', (4423, 4535), True, 'import scipy.sparse as sp\n'), ((4576, 4593), 'numpy.ones', 'np.ones', (['(n - 1,)'], {}), '((n - 1,))\n', (4583, 4593), True, 'import numpy as np\n'), ((5404, 5424), 'numpy.tile', 'np.tile', (['last_row', 'n'], {}), '(last_row, n)\n', (5411, 5424), True, 'import numpy as np\n'), ((6838, 6858), 'numpy.tile', 'np.tile', (['last_row', 'n'], {}), '(last_row, n)\n', (6845, 6858), True, 'import numpy as np\n'), ((8973, 8990), 'scipy.sparse.bmat', 'sp.bmat', (['row_list'], {}), '(row_list)\n', (8980, 8990), True, 'import scipy.sparse as sp\n'), ((26069, 26181), 'warnings.warn', 'warnings.warn', (['"""You have specified a integral with respect to a unitless integrand. Be aware of this."""'], {}), "(\n 'You have specified a integral with respect to a unitless integrand. Be aware of this.'\n )\n", (26082, 26181), False, 'import warnings\n'), ((28770, 28791), 'numpy.ones', 'np.ones', (['(num_nodes,)'], {}), '((num_nodes,))\n', (28777, 28791), True, 'import numpy as np\n'), ((28793, 28806), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (28800, 28806), True, 'import numpy as np\n'), ((32575, 32590), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (32583, 32590), True, 'import numpy as np\n'), ((41946, 41964), 'numpy.ones', 'np.ones', (['(nn_tot,)'], {}), '((nn_tot,))\n', (41953, 41964), True, 'import numpy as np\n'), ((41966, 41988), 'numpy.ones', 'np.ones', (['(n_segments,)'], {}), '((n_segments,))\n', (41973, 41988), True, 'import numpy as np\n'), ((9046, 9066), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['dQddt'], {}), '(dQddt)\n', (9059, 9066), True, 'import scipy.sparse as sp\n'), ((26456, 26568), 'warnings.warn', 'warnings.warn', (['"""You have specified a integral with respect to a unitless integrand. Be aware of this."""'], {}), "(\n 'You have specified a integral with respect to a unitless integrand. Be aware of this.'\n )\n", (26469, 26568), False, 'import warnings\n'), ((40616, 40728), 'warnings.warn', 'warnings.warn', (['"""You have specified a integral with respect to a unitless integrand. Be aware of this."""'], {}), "(\n 'You have specified a integral with respect to a unitless integrand. Be aware of this.'\n )\n", (40629, 40728), False, 'import warnings\n'), ((8059, 8102), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['([], ([], []))'], {'shape': '(n, n)'}), '(([], ([], [])), shape=(n, n))\n', (8072, 8102), True, 'import scipy.sparse as sp\n'), ((28378, 28398), 'numpy.arange', 'np.arange', (['num_nodes'], {}), '(num_nodes)\n', (28387, 28398), True, 'import numpy as np\n'), ((28405, 28427), 'numpy.zeros', 'np.zeros', (['(num_nodes,)'], {}), '((num_nodes,))\n', (28413, 28427), True, 'import numpy as np\n'), ((28433, 28454), 'numpy.ones', 'np.ones', (['(num_nodes,)'], {}), '((num_nodes,))\n', (28440, 28454), True, 'import numpy as np\n'), ((41620, 41637), 'numpy.arange', 'np.arange', (['nn_tot'], {}), '(nn_tot)\n', (41629, 41637), True, 'import numpy as np\n'), ((41644, 41663), 'numpy.zeros', 'np.zeros', (['(nn_tot,)'], {}), '((nn_tot,))\n', (41652, 41663), True, 'import numpy as np\n'), ((41669, 41687), 'numpy.ones', 'np.ones', (['(nn_tot,)'], {}), '((nn_tot,))\n', (41676, 41687), True, 'import numpy as np\n'), ((8915, 8958), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['([], ([], []))'], {'shape': '(n, n)'}), '(([], ([], [])), shape=(n, n))\n', (8928, 8958), True, 'import scipy.sparse as sp\n'), ((35351, 35391), 'numpy.zeros', 'np.zeros', (["J[qty_name, 't_initial'].shape"], {}), "(J[qty_name, 't_initial'].shape)\n", (35359, 35391), True, 'import numpy as np\n'), ((35445, 35483), 'numpy.zeros', 'np.zeros', (["J[qty_name, 't_final'].shape"], {}), "(J[qty_name, 't_final'].shape)\n", (35453, 35483), True, 'import numpy as np\n'), ((50896, 50931), 'numpy.zeros', 'np.zeros', (["J['q', 't_initial'].shape"], {}), "(J['q', 't_initial'].shape)\n", (50904, 50931), True, 'import numpy as np\n'), ((50978, 51011), 'numpy.zeros', 'np.zeros', (["J['q', 't_final'].shape"], {}), "(J['q', 't_final'].shape)\n", (50986, 51011), True, 'import numpy as np\n')] |
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import argparse
import cv2
import pickle
import random
import torch.nn as nn
COLORS = np.array([
[255,255,255], [66,135,245], [245,90,66], [245,206,66],
[209,245,66], [105,245,66], [129,66,245], [245,66,203],
]) / 255.0 # green, pink, yellow
def downsampling(im, sx, sy):
m = nn.AdaptiveAvgPool2d((round(sx),round(sy)))
return m(im)
def upsampling(im,sx,sy):
m = nn.Upsample(size=[round(sx),round(sy)],mode='bilinear',align_corners=True)
return m(im)
def assign_color(mask, n_labels):
if len(mask.size()) == 4:
mask = mask.sequeeze()
N,H,W = mask.size()
ret = []
for i in range(n_labels):
curr_parse = []
for j in range(3):
curr = (mask == i).float() * COLORS[i,j]
curr_parse.append(curr.unsqueeze(1))
ret += [torch.cat(curr_parse, 1)]
return sum(ret)
def generate_zeros(min_h, max_h, min_w, max_w):
h = random.randint(min_h, max_h - 1)
w = random.randint(min_w, max_w - 1)
zeros = torch.zeros((1,1,h,w))
return zeros
def inject_zeros(img, margin=0.2, min_pad_size=0.2, max_pad_size=0.4):
N,C,H,W = img.size()
# generate pad
min_h, min_w = max(1, int(min_pad_size * H)), max(1, int(min_pad_size * W))
max_h, max_w = int(max_pad_size * H), int(max_pad_size * W)
zeros = generate_zeros(min_h, max_h, min_w, max_w).to(img.device)
# insert pad
_,_,h,w = zeros.size()
min_left, max_left = int(margin * W), int(W - margin * W - w)
min_top, max_top = int(margin * H), int(H - margin * H - h)
left = random.randint(min_left, max_left - 1)
top = random.randint(min_top, max_top - 1)
zeros = zeros.expand(N,C,h,w)
img[:,:,top:top+h, left:left+w] = zeros
return img
class StoreDictKeyPair(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values.split(","):
# print(kv)
k,v = kv.split("=")
my_dict[k] = int(v)
setattr(namespace, self.dest, my_dict)
class StoreList(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
my_list = [int(item) for item in values.split(',')]
setattr(namespace, self.dest, my_list)
#
def tensor2im(input_image, imtype=np.uint8, max_n=4):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
if len(image_tensor.size()) == 4:
all_image = [image_tensor[i] for i in range(min(image_tensor.size(0),max_n))]
all_image = torch.cat(all_image, 1)
else:
all_image = image_tensor
image_numpy = all_image.cpu().float().numpy()
#image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def load_pickle_file(pkl_path):
with open(pkl_path, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def write_pickle_file(pkl_path, data_dict):
with open(pkl_path, 'wb') as fp:
pickle.dump(data_dict, fp, protocol=2)
class ImageTransformer(object):
"""
Rescale the image in a sample to a given size.
"""
def __init__(self, output_size):
"""
Args:
output_size (tuple or int): Desired output size. If tuple, output is matched to output_size.
If int, smaller of image edges is matched to output_size keeping aspect ratio the same.
"""
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
images = sample['images']
resized_images = []
for image in images:
image = cv2.resize(image, (self.output_size, self.output_size))
image = image.astype(np.float32)
image /= 255.0
image = image * 2 - 1
image = np.transpose(image, (2, 0, 1))
resized_images.append(image)
resized_images = np.stack(resized_images, axis=0)
sample['images'] = resized_images
return sample
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors.
"""
def __call__(self, sample):
sample['images'] = torch.Tensor(sample['images']).float()
sample['smpls'] = torch.Tensor(sample['smpls']).float()
return sample
| [
"numpy.stack",
"pickle.dump",
"random.randint",
"os.makedirs",
"os.path.exists",
"torch.cat",
"numpy.transpose",
"PIL.Image.fromarray",
"pickle.load",
"numpy.array",
"numpy.tile",
"torch.Tensor",
"torch.zeros",
"torch.abs",
"cv2.resize"
] | [((241, 384), 'numpy.array', 'np.array', (['[[255, 255, 255], [66, 135, 245], [245, 90, 66], [245, 206, 66], [209, 245,\n 66], [105, 245, 66], [129, 66, 245], [245, 66, 203]]'], {}), '([[255, 255, 255], [66, 135, 245], [245, 90, 66], [245, 206, 66], [\n 209, 245, 66], [105, 245, 66], [129, 66, 245], [245, 66, 203]])\n', (249, 384), True, 'import numpy as np\n'), ((1075, 1107), 'random.randint', 'random.randint', (['min_h', '(max_h - 1)'], {}), '(min_h, max_h - 1)\n', (1089, 1107), False, 'import random\n'), ((1116, 1148), 'random.randint', 'random.randint', (['min_w', '(max_w - 1)'], {}), '(min_w, max_w - 1)\n', (1130, 1148), False, 'import random\n'), ((1161, 1186), 'torch.zeros', 'torch.zeros', (['(1, 1, h, w)'], {}), '((1, 1, h, w))\n', (1172, 1186), False, 'import torch\n'), ((1726, 1764), 'random.randint', 'random.randint', (['min_left', '(max_left - 1)'], {}), '(min_left, max_left - 1)\n', (1740, 1764), False, 'import random\n'), ((1775, 1811), 'random.randint', 'random.randint', (['min_top', '(max_top - 1)'], {}), '(min_top, max_top - 1)\n', (1789, 1811), False, 'import random\n'), ((4427, 4455), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (4442, 4455), False, 'from PIL import Image\n'), ((5191, 5211), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5205, 5211), False, 'import os\n'), ((5221, 5238), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (5232, 5238), False, 'import os\n'), ((5324, 5357), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5335, 5357), False, 'import pickle\n'), ((5466, 5504), 'pickle.dump', 'pickle.dump', (['data_dict', 'fp'], {'protocol': '(2)'}), '(data_dict, fp, protocol=2)\n', (5477, 5504), False, 'import pickle\n'), ((6422, 6454), 'numpy.stack', 'np.stack', (['resized_images'], {'axis': '(0)'}), '(resized_images, axis=0)\n', (6430, 6454), True, 'import numpy as np\n'), ((964, 988), 'torch.cat', 'torch.cat', (['curr_parse', '(1)'], {}), '(curr_parse, 1)\n', (973, 988), False, 'import torch\n'), ((3105, 3128), 'torch.cat', 'torch.cat', (['all_image', '(1)'], {}), '(all_image, 1)\n', (3114, 3128), False, 'import torch\n'), ((3413, 3444), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (3420, 3444), True, 'import numpy as np\n'), ((6140, 6195), 'cv2.resize', 'cv2.resize', (['image', '(self.output_size, self.output_size)'], {}), '(image, (self.output_size, self.output_size))\n', (6150, 6195), False, 'import cv2\n'), ((6323, 6353), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (6335, 6353), True, 'import numpy as np\n'), ((4043, 4069), 'torch.abs', 'torch.abs', (['param.grad.data'], {}), '(param.grad.data)\n', (4052, 4069), False, 'import torch\n'), ((6664, 6694), 'torch.Tensor', 'torch.Tensor', (["sample['images']"], {}), "(sample['images'])\n", (6676, 6694), False, 'import torch\n'), ((6729, 6758), 'torch.Tensor', 'torch.Tensor', (["sample['smpls']"], {}), "(sample['smpls'])\n", (6741, 6758), False, 'import torch\n'), ((3468, 3504), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (3480, 3504), True, 'import numpy as np\n')] |
''' fasta
Module for reading and writing PHYLIP files.
'''
import os
import re
from typing import Tuple, Union
import numpy
import pyckmeans.distance
WHITESPACE_RE = re.compile(r'\s+')
class InvalidPhylipAlignmentError(Exception):
'''InvalidPhylipAlignmentError
'''
def read_phylip_alignment(
phylip_file: str,
dtype: Union[str, numpy.dtype] = 'U',
) -> Tuple[numpy.ndarray, numpy.ndarray]:
'''read_phylip_alignment
Read phylip alignment file. This function expects the phylip to be a valid alignment,
meaning that it should contain at least 2 sequences of the same length, including
gaps.
WARNING: whitespace characters in entry names are NOT supported.
Parameters
----------
phylip_file : str
Path to a phylip file.
dtype: Union[str, numpy.dtype]
Data type to use for the sequence array.
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
Tuple of sequences and names, each as numpy array.
Raises
------
InvalidPhylipAlignmentError
Raised if header is malformed.
InvalidPhylipAlignmentError
Raised if less than 2 entries are present in phylip_file.
InvalidPhylipAlignmentError
Raised if number of entries does not match header.
'''
names = []
seqs = []
with open(phylip_file) as phylip_f:
# header
header_str = next(phylip_f)
try:
n_entries, n_sites = [int(s) for s in header_str.split()]
except:
raise InvalidPhylipAlignmentError('Malformed header.')
for line in phylip_f:
_line = re.sub(WHITESPACE_RE, '', line)
if not _line:
continue
l_len = len(_line)
start = l_len-n_sites
name = _line[:start]
seq = _line[start:].upper()
names.append(name)
seqs.append(list(seq))
# check alignment validity
n_seq = len(seqs)
if len(seqs) < 2:
msg = f'Expected at least 2 entries but found only {n_seq}.'
raise InvalidPhylipAlignmentError(msg)
if n_seq != n_entries:
msg = f'Expected {n_entries} entries but found {n_seq} instead.'
raise InvalidPhylipAlignmentError(msg)
# construct output
seqs = numpy.array(seqs, dtype=dtype)
names = numpy.array(names)
return seqs, names
class InvalidPhylipMatrixError(Exception):
'''InvalidPhylipMatrixTypeError
'''
def read_phylip_distmat(phylip_file: str) -> 'pyckmeans.distance.DistanceMatrix':
'''read_phylip_distmat
Read distance matrix in PHYLIP format.
Supports full and lower-triangle matrices.
Parameters
----------
phylip_file : str
Path to distance file in phylip format.
Returns
-------
pyckmeans.distance.DistanceMatrix
Distance matrix as pyckmeans.distance DistanceMatrix object.
Raises
------
InvalidPhylipMatrixError
Raised if the header is malformed.
InvalidPhylipMatrixError
Raised if an empty line is encountered as second line.
InvalidPhylipMatrixError
Raised if file format can neither be inferred as full nor
as lower-triangle matrix.
InvalidPhylipMatrixError
Raised if an empty line is encountered.
InvalidPhylipMatrixError
Raised if expecting a full matrix but number of values
does not match the header.
InvalidPhylipMatrixError
Raised if an empty line is encountered.
InvalidPhylipMatrixError
Raised if expecting lower-triangle matrix but number of values
does not match the expected number of values for that entry.
InvalidPhylipMatrixError
Raised if number of names does not match number of entries
stated in the header.
'''
with open(phylip_file) as phylip_f:
# == header
header_str = next(phylip_f)
try:
n_entries = int(header_str.strip())
except:
raise InvalidPhylipMatrixError('Malformed header.')
dist_mat = numpy.zeros((n_entries, n_entries))
names = []
# == detect matrix type (full, lower-triangle)
line = next(phylip_f)
_line = line.strip()
if not _line:
msg = 'Line 2: Empty lines are not allowed.'
raise InvalidPhylipMatrixError(msg)
name, *mat_entries = _line.split()
names.append(name)
# lower-triangle matrix
if len(mat_entries) == 0:
mat_type = 'lower-triangle'
# full matrix
elif len(mat_entries) == n_entries:
mat_type = 'full'
dist_mat[0,] = numpy.array(mat_entries, dtype=float)
# error
else:
msg = 'Line 2: Expected either 0 values for a lower-triangle ' +\
f'matrix or {n_entries} values for a full matrix; found ' +\
f'{len(mat_entries)} values instead.'
raise InvalidPhylipMatrixError(msg)
# == full matrix
if mat_type == 'full':
for i, line in enumerate(phylip_f):
l_num = i + 3 # 1-based line number: header + first line already read
_line = line.strip()
if not _line:
# last line can be empty
if i + 2 == n_entries:
continue
msg = f'Line {l_num}: Empty lines are not allowed.'
raise InvalidPhylipMatrixError(msg)
name, *mat_entries = _line.split()
names.append(name)
# error
if len(mat_entries) != n_entries:
msg = f'Line {l_num}: Expected {n_entries} values for a full matrix but ' +\
f'found {len(mat_entries)} values instead.'
raise InvalidPhylipMatrixError(msg)
dist_mat[i+1,] = numpy.array(mat_entries, dtype=float)
# == lower-triangle matrix
elif mat_type == 'lower-triangle':
for i, line in enumerate(phylip_f):
l_num = i + 3 # 1-based line number: header + first line already read
_line = line.strip()
if not _line:
# last line can be empty
if i + 2 == n_entries:
continue
msg = f'Line {l_num}: Empty lines are not allowed.'
raise InvalidPhylipMatrixError(msg)
name, *mat_entries = _line.split()
names.append(name)
# error
if len(mat_entries) != i+1:
msg = f'Line {l_num}: Expected {i+1} values for a lower-triangle ' +\
f'matrix but found {len(mat_entries)} values instead.'
raise InvalidPhylipMatrixError(msg)
dist_mat[i+1, :i+1] = numpy.array(mat_entries, dtype=float)
# fill upper triangle
dist_mat = dist_mat + dist_mat.T
# check validity
if len(names) != n_entries:
msg = f'Expected {n_entries} entries but found {len(names)}.'
raise InvalidPhylipMatrixError(msg)
return pyckmeans.distance.DistanceMatrix(dist_mat, names)
class IncompatibleNamesError(Exception):
'''IncompatibleNamesError'''
NAME_PADDING = 64
def write_phylip_distmat(
dist: 'pyckmeans.distance.DistanceMatrix',
file_path: str,
force: bool = False,
) -> None:
'''write_phylip_distmat
Write distance matrix to file in PHYLIP matrix format.
Parameters
----------
dist : pyckmeans.distance.DistanceMatrix
Distance matrix as pyckmeans.distance DistanceMatrix object.
file_path : str
Output file path.
force : bool, optional
Force overwrite if file exists, by default False
Raises
------
FileExistsError
Raised if file at file_path already exists and force is False.
FileExistsError
Raised if file_path points to an existing directory.
IncompatibleNamesError
Raised if names are incompatible with dist_mat.
'''
if os.path.exists(file_path):
if os.path.isfile(file_path) and not force:
msg = f'File {file_path} already exists. If you want to overwrite ' +\
'it run the function with force=True.'
raise FileExistsError(msg)
else:
msg = f'A directory exists at path {file_path}.'
raise FileExistsError(msg)
dist_mat = dist.dist_mat
names = dist.names
n_entries = dist_mat.shape[0]
if len(names) != n_entries:
msg = f'Expected {n_entries} names but got {len(names)} instead.'
raise IncompatibleNamesError(msg)
with open(file_path, 'w') as phylip_f:
# header
phylip_f.write(f'{n_entries}\n')
# body
for name, dists in zip(names, dist_mat):
nm_str = f'{name: <{NAME_PADDING}}'
dst_str = '\t'.join(dists.astype(str))
phylip_f.write(f'{nm_str} {dst_str}\n')
| [
"numpy.zeros",
"os.path.exists",
"os.path.isfile",
"numpy.array",
"re.sub",
"re.compile"
] | [((175, 193), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (185, 193), False, 'import re\n'), ((2290, 2320), 'numpy.array', 'numpy.array', (['seqs'], {'dtype': 'dtype'}), '(seqs, dtype=dtype)\n', (2301, 2320), False, 'import numpy\n'), ((2333, 2351), 'numpy.array', 'numpy.array', (['names'], {}), '(names)\n', (2344, 2351), False, 'import numpy\n'), ((8118, 8143), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (8132, 8143), False, 'import os\n'), ((4056, 4091), 'numpy.zeros', 'numpy.zeros', (['(n_entries, n_entries)'], {}), '((n_entries, n_entries))\n', (4067, 4091), False, 'import numpy\n'), ((1627, 1658), 're.sub', 're.sub', (['WHITESPACE_RE', '""""""', 'line'], {}), "(WHITESPACE_RE, '', line)\n", (1633, 1658), False, 'import re\n'), ((8156, 8181), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (8170, 8181), False, 'import os\n'), ((4653, 4690), 'numpy.array', 'numpy.array', (['mat_entries'], {'dtype': 'float'}), '(mat_entries, dtype=float)\n', (4664, 4690), False, 'import numpy\n'), ((5902, 5939), 'numpy.array', 'numpy.array', (['mat_entries'], {'dtype': 'float'}), '(mat_entries, dtype=float)\n', (5913, 5939), False, 'import numpy\n'), ((6889, 6926), 'numpy.array', 'numpy.array', (['mat_entries'], {'dtype': 'float'}), '(mat_entries, dtype=float)\n', (6900, 6926), False, 'import numpy\n')] |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchvision import transforms
from torchvision.transforms import Normalize as norm
import trimesh
from sklearn.preprocessing import normalize
import kaolin as kal
from PIL import Image
from collections import defaultdict
import numpy as np
from kaolin.rep import TriangleMesh
import kaolin as kal
preprocess = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()
])
def get_pooling_index( positions, cam_mat, cam_pos, dims):
#project points into 2D
positions = positions * .57 # accounting for recaling in 3Dr2n
positions = positions - cam_pos
positions = torch.mm(positions,cam_mat.permute(1,0))
positions_xs = positions[:, 1] / positions[:, 2]
positions_ys = -positions[:, 0] / positions[:, 2]
# do bilinear interpolation over pixel coordiantes
data_meta = defaultdict(list)
for dim in dims:
focal_length = 250./224. * dim
xs = positions_xs * focal_length + dim/2.
ys = positions_ys * focal_length + dim/2.
cur_xs = torch.clamp(xs , 0, dim - 1)
cur_ys = torch.clamp(ys , 0, dim - 1)
# img = np.zeros((dim,dim))
# for x,y in zip (cur_xs, cur_ys):
# img[x.int(), y.int()] = 255
# from PIL import Image
# Image.fromarray(img).show()
x1s, y1s, x2s, y2s = torch.floor(cur_xs), torch.floor(cur_ys), torch.ceil(cur_xs), torch.ceil(cur_ys)
A = x2s - cur_xs
B = cur_xs - x1s
G = y2s - cur_ys
H = cur_ys - y1s
y1s = y1s + torch.arange(positions.shape[0]).float().to(positions.device)*dim
y2s = y2s + torch.arange(positions.shape[0]).float().to(positions.device)*dim
data_meta['A'].append(A.float().unsqueeze(0))
data_meta['B'].append(B.float().unsqueeze(0))
data_meta['G'].append(G.float().unsqueeze(0))
data_meta['H'].append(H.float().unsqueeze(0))
data_meta['x1s'].append(x1s.long().unsqueeze(0))
data_meta['x2s'].append(x2s.long().unsqueeze(0))
data_meta['y1s'].append(y1s.long().unsqueeze(0))
data_meta['y2s'].append(y2s.long().unsqueeze(0))
for key in data_meta:
data_meta[key] = torch.cat(data_meta[key], dim=0)
return data_meta
def pooling(blocks, pooling_indices):
full_features = None
for i_block, block in enumerate(blocks):
A = pooling_indices['A'][i_block]
B = pooling_indices['B'][i_block]
G = pooling_indices['G'][i_block]
H = pooling_indices['H'][i_block]
x1s = pooling_indices['x1s'][i_block]
x2s = pooling_indices['x2s'][i_block]
y1s = pooling_indices['y1s'][i_block]
y2s = pooling_indices['y2s'][i_block]
C =torch.index_select(block, 1, x1s).view(block.shape[0], -1 )
C = torch.index_select(C, 1, y1s)
D =torch.index_select(block, 1, x1s).view(block.shape[0], -1 )
D = torch.index_select(D, 1, y2s)
E =torch.index_select(block, 1, x2s).view(block.shape[0], -1 )
E = torch.index_select(E, 1, y1s)
F =torch.index_select(block, 1, x2s).view(block.shape[0], -1 )
F = torch.index_select(F, 1, y2s)
features = (A*C*G + H*D*A + G*E*B + B*F*H).permute(1,0)
if full_features is None: full_features = features
else: full_features = torch.cat((full_features, features), dim = 1)
return full_features
norm_distance = kal.metrics.point.SidedDistance()
def chamfer_normal(pred_mesh, gt_points,gt_norms):
# find closest gt points
gt_indices = norm_distance(pred_mesh.vertices.unsqueeze(0), gt_points.unsqueeze(0))[0]
# select norms from closest points and exand to match edges lengths
gt_norm_selections = gt_norms[gt_indices]
new_dimensions = (gt_norm_selections.shape[0],pred_mesh.ve.shape[1], 3 )
vertex_norms = gt_norm_selections.view(-1,1,3).expand(new_dimensions)
# get all nieghbor positions
neighbor_indecies = pred_mesh.vv.clone()
empty_indecies = (neighbor_indecies >=0)
other_indecies = (neighbor_indecies <0)
neighbor_indecies[other_indecies] = 0
empty_indecies = (empty_indecies).float().unsqueeze(-1)
neighbor_indecies = neighbor_indecies.view(-1)
vertex_neighbors = pred_mesh.vertices[neighbor_indecies].view(new_dimensions)
# mask both tensors
vertex_norms = vertex_norms * empty_indecies
vertex_norms = vertex_norms.contiguous().view(-1,3)
vertex_neighbors = vertex_neighbors * empty_indecies
vertex_neighbors = vertex_neighbors.contiguous().view(-1,3)
# calculate normal loss, devide by number of unmasked elements to get mean
normal_loss = (torch.abs(torch.sum(vertex_norms * vertex_neighbors, dim = 1)))
normal_loss = normal_loss.sum() / float(empty_indecies.sum())
return normal_loss
def setup_meshes(filename='meshes/156.obj', device="cuda" ):
mesh_1 = kal.rep.TriangleMesh.from_obj(filename, enable_adjacency=True)
if device == 'cuda':
mesh_1.cuda()
adj_1 = mesh_1.compute_adjacency_matrix_full().clone()
adj_1 = normalize_adj(adj_1)
mesh_1_i = kal.rep.TriangleMesh.from_tensors(mesh_1.vertices.clone(), mesh_1.faces.clone())
mesh_2, split_mx_1 = split_mesh(mesh_1)
adj_2 = mesh_2.compute_adjacency_matrix_full().clone()
adj_2 = normalize_adj(adj_2)
mesh_2_i = kal.rep.TriangleMesh.from_tensors(mesh_2.vertices.clone(), mesh_2.faces.clone())
mesh_3, split_mx_2 = split_mesh(mesh_2)
adj_3 = mesh_3.compute_adjacency_matrix_full().clone()
adj_3 = normalize_adj(adj_3)
mesh_3_i = kal.rep.TriangleMesh.from_tensors(mesh_3.vertices.clone(), mesh_3.faces.clone())
initial_meshes = [mesh_1_i, mesh_2_i, mesh_3_i]
updated_meshes = [mesh_1, mesh_2, mesh_3]
adjs = [adj_1, adj_2, adj_3]
split_mxs = [split_mx_1, split_mx_2]
mesh_info = {'init':initial_meshes, 'update':updated_meshes , 'adjs': adjs, 'split_mxs': split_mxs}
return mesh_info
def normalize_adj(mx):
rowsum = mx.sum(dim =1).view(-1)
r_inv = 1./rowsum
r_inv[r_inv!= r_inv] = 0.
r_mat_inv = torch.eye(r_inv.shape[0]).to(mx.device)*r_inv
mx = torch.mm(r_mat_inv,mx)
return mx
def split(meshes, features, index):
meshes['init'][index+1].vertices = split_features(meshes['split_mxs'][index], meshes['update'][index].vertices)
new_features = split_features(meshes['split_mxs'][index],features)
return new_features
def split_mesh(mesh):
faces = mesh.faces.clone()
tracker = dict()
vertex_count = mesh.vertices.shape[0]
constant_vertex_count = vertex_count
columns = np.zeros((vertex_count, 0))
new_faces = []
for face in faces:
x,y,z = face.int()
new_verts = []
edges = [[x,y], [y,z], [z, x]]
for a,b in edges:
key = [a,b]
key.sort()
key = str(key)
if key in tracker:
new_verts.append(tracker[key])
else:
new_verts.append(vertex_count)
column = np.zeros((constant_vertex_count, 1))
column[a] = .5
column[b] = .5
columns = np.concatenate((columns, column), axis = 1)
tracker[key] = vertex_count
vertex_count += 1
v1,v2,v3 = new_verts
new_faces.append([x,v1,v3])
new_faces.append([v1,y,v2])
new_faces.append([v2,z,v3])
new_faces.append([v1,v2,v3])
split_mx = torch.FloatTensor(columns).to(face.device)
new_faces = torch.LongTensor(new_faces).to(face.device)
new_verts = split_features(split_mx, mesh.vertices)
updated_mesh = TriangleMesh.from_tensors(new_verts, new_faces, enable_adjacency=True)
return updated_mesh, split_mx
def split_features(split_mx, features):
features = features.permute(1,0)
new_features = torch.mm(features, split_mx)
features = torch.cat((features, new_features), dim= 1 ).permute(1,0)
return features
def loss_surf(meshes, tgt_points):
loss = kal.metrics.point.chamfer_distance(meshes['update'][0].vertices, tgt_points, w1=1., w2 =0.55)
loss += kal.metrics.point.chamfer_distance(meshes['update'][1].vertices, tgt_points, w1=1., w2 =0.55)
loss += kal.metrics.point.chamfer_distance(meshes['update'][2].vertices, tgt_points, w1=1., w2 =0.55)
return loss
def loss_edge(meshes):
loss = kal.metrics.mesh.edge_length(meshes['update'][0])
loss += kal.metrics.mesh.edge_length(meshes['update'][1])
loss += kal.metrics.mesh.edge_length(meshes['update'][2])
return loss
def loss_lap(meshes):
loss = .1* kal.metrics.mesh.laplacian_loss(meshes['init'][0],meshes['update'][0])
loss += kal.metrics.mesh.laplacian_loss(meshes['init'][1],meshes['update'][1])
loss += kal.metrics.mesh.laplacian_loss(meshes['init'][2],meshes['update'][2])
loss += torch.sum((meshes['init'][0].vertices-meshes['update'][0].vertices)**2, 1).mean() * .0666 * .1
loss += torch.sum((meshes['init'][1].vertices-meshes['update'][1].vertices)**2, 1).mean() * .0666
loss += torch.sum((meshes['init'][2].vertices-meshes['update'][2].vertices)**2, 1).mean() * .0666
return loss
def loss_norm(meshes, tgt_points, tgt_norms):
loss = chamfer_normal(meshes['update'][0], tgt_points, tgt_norms)
loss += chamfer_normal(meshes['update'][1], tgt_points, tgt_norms)
loss += chamfer_normal(meshes['update'][2], tgt_points, tgt_norms)
return loss
| [
"torch.eye",
"torch.mm",
"torch.cat",
"collections.defaultdict",
"torch.ceil",
"torch.arange",
"torch.FloatTensor",
"kaolin.rep.TriangleMesh.from_obj",
"torch.clamp",
"torch.floor",
"torch.sum",
"numpy.concatenate",
"torchvision.transforms.Resize",
"torch.LongTensor",
"kaolin.metrics.mes... | [((3718, 3751), 'kaolin.metrics.point.SidedDistance', 'kal.metrics.point.SidedDistance', ([], {}), '()\n', (3749, 3751), True, 'import kaolin as kal\n'), ((1426, 1443), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1437, 1443), False, 'from collections import defaultdict\n'), ((5120, 5182), 'kaolin.rep.TriangleMesh.from_obj', 'kal.rep.TriangleMesh.from_obj', (['filename'], {'enable_adjacency': '(True)'}), '(filename, enable_adjacency=True)\n', (5149, 5182), True, 'import kaolin as kal\n'), ((6296, 6319), 'torch.mm', 'torch.mm', (['r_mat_inv', 'mx'], {}), '(r_mat_inv, mx)\n', (6304, 6319), False, 'import torch\n'), ((6730, 6757), 'numpy.zeros', 'np.zeros', (['(vertex_count, 0)'], {}), '((vertex_count, 0))\n', (6738, 6757), True, 'import numpy as np\n'), ((7577, 7647), 'kaolin.rep.TriangleMesh.from_tensors', 'TriangleMesh.from_tensors', (['new_verts', 'new_faces'], {'enable_adjacency': '(True)'}), '(new_verts, new_faces, enable_adjacency=True)\n', (7602, 7647), False, 'from kaolin.rep import TriangleMesh\n'), ((7772, 7800), 'torch.mm', 'torch.mm', (['features', 'split_mx'], {}), '(features, split_mx)\n', (7780, 7800), False, 'import torch\n'), ((7934, 8031), 'kaolin.metrics.point.chamfer_distance', 'kal.metrics.point.chamfer_distance', (["meshes['update'][0].vertices", 'tgt_points'], {'w1': '(1.0)', 'w2': '(0.55)'}), "(meshes['update'][0].vertices, tgt_points,\n w1=1.0, w2=0.55)\n", (7968, 8031), True, 'import kaolin as kal\n'), ((8038, 8135), 'kaolin.metrics.point.chamfer_distance', 'kal.metrics.point.chamfer_distance', (["meshes['update'][1].vertices", 'tgt_points'], {'w1': '(1.0)', 'w2': '(0.55)'}), "(meshes['update'][1].vertices, tgt_points,\n w1=1.0, w2=0.55)\n", (8072, 8135), True, 'import kaolin as kal\n'), ((8142, 8239), 'kaolin.metrics.point.chamfer_distance', 'kal.metrics.point.chamfer_distance', (["meshes['update'][2].vertices", 'tgt_points'], {'w1': '(1.0)', 'w2': '(0.55)'}), "(meshes['update'][2].vertices, tgt_points,\n w1=1.0, w2=0.55)\n", (8176, 8239), True, 'import kaolin as kal\n'), ((8284, 8333), 'kaolin.metrics.mesh.edge_length', 'kal.metrics.mesh.edge_length', (["meshes['update'][0]"], {}), "(meshes['update'][0])\n", (8312, 8333), True, 'import kaolin as kal\n'), ((8343, 8392), 'kaolin.metrics.mesh.edge_length', 'kal.metrics.mesh.edge_length', (["meshes['update'][1]"], {}), "(meshes['update'][1])\n", (8371, 8392), True, 'import kaolin as kal\n'), ((8402, 8451), 'kaolin.metrics.mesh.edge_length', 'kal.metrics.mesh.edge_length', (["meshes['update'][2]"], {}), "(meshes['update'][2])\n", (8430, 8451), True, 'import kaolin as kal\n'), ((8582, 8653), 'kaolin.metrics.mesh.laplacian_loss', 'kal.metrics.mesh.laplacian_loss', (["meshes['init'][1]", "meshes['update'][1]"], {}), "(meshes['init'][1], meshes['update'][1])\n", (8613, 8653), True, 'import kaolin as kal\n'), ((8662, 8733), 'kaolin.metrics.mesh.laplacian_loss', 'kal.metrics.mesh.laplacian_loss', (["meshes['init'][2]", "meshes['update'][2]"], {}), "(meshes['init'][2], meshes['update'][2])\n", (8693, 8733), True, 'import kaolin as kal\n'), ((965, 987), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (982, 987), False, 'from torchvision import transforms\n'), ((992, 1013), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1011, 1013), False, 'from torchvision import transforms\n'), ((1597, 1624), 'torch.clamp', 'torch.clamp', (['xs', '(0)', '(dim - 1)'], {}), '(xs, 0, dim - 1)\n', (1608, 1624), False, 'import torch\n'), ((1637, 1664), 'torch.clamp', 'torch.clamp', (['ys', '(0)', '(dim - 1)'], {}), '(ys, 0, dim - 1)\n', (1648, 1664), False, 'import torch\n'), ((2617, 2649), 'torch.cat', 'torch.cat', (['data_meta[key]'], {'dim': '(0)'}), '(data_meta[key], dim=0)\n', (2626, 2649), False, 'import torch\n'), ((3160, 3189), 'torch.index_select', 'torch.index_select', (['C', '(1)', 'y1s'], {}), '(C, 1, y1s)\n', (3178, 3189), False, 'import torch\n'), ((3261, 3290), 'torch.index_select', 'torch.index_select', (['D', '(1)', 'y2s'], {}), '(D, 1, y2s)\n', (3279, 3290), False, 'import torch\n'), ((3362, 3391), 'torch.index_select', 'torch.index_select', (['E', '(1)', 'y1s'], {}), '(E, 1, y1s)\n', (3380, 3391), False, 'import torch\n'), ((3463, 3492), 'torch.index_select', 'torch.index_select', (['F', '(1)', 'y2s'], {}), '(F, 1, y2s)\n', (3481, 3492), False, 'import torch\n'), ((4903, 4952), 'torch.sum', 'torch.sum', (['(vertex_norms * vertex_neighbors)'], {'dim': '(1)'}), '(vertex_norms * vertex_neighbors, dim=1)\n', (4912, 4952), False, 'import torch\n'), ((8502, 8573), 'kaolin.metrics.mesh.laplacian_loss', 'kal.metrics.mesh.laplacian_loss', (["meshes['init'][0]", "meshes['update'][0]"], {}), "(meshes['init'][0], meshes['update'][0])\n", (8533, 8573), True, 'import kaolin as kal\n'), ((1856, 1875), 'torch.floor', 'torch.floor', (['cur_xs'], {}), '(cur_xs)\n', (1867, 1875), False, 'import torch\n'), ((1877, 1896), 'torch.floor', 'torch.floor', (['cur_ys'], {}), '(cur_ys)\n', (1888, 1896), False, 'import torch\n'), ((1898, 1916), 'torch.ceil', 'torch.ceil', (['cur_xs'], {}), '(cur_xs)\n', (1908, 1916), False, 'import torch\n'), ((1918, 1936), 'torch.ceil', 'torch.ceil', (['cur_ys'], {}), '(cur_ys)\n', (1928, 1936), False, 'import torch\n'), ((3631, 3674), 'torch.cat', 'torch.cat', (['(full_features, features)'], {'dim': '(1)'}), '((full_features, features), dim=1)\n', (3640, 3674), False, 'import torch\n'), ((7405, 7431), 'torch.FloatTensor', 'torch.FloatTensor', (['columns'], {}), '(columns)\n', (7422, 7431), False, 'import torch\n'), ((7462, 7489), 'torch.LongTensor', 'torch.LongTensor', (['new_faces'], {}), '(new_faces)\n', (7478, 7489), False, 'import torch\n'), ((7813, 7855), 'torch.cat', 'torch.cat', (['(features, new_features)'], {'dim': '(1)'}), '((features, new_features), dim=1)\n', (7822, 7855), False, 'import torch\n'), ((3094, 3127), 'torch.index_select', 'torch.index_select', (['block', '(1)', 'x1s'], {}), '(block, 1, x1s)\n', (3112, 3127), False, 'import torch\n'), ((3195, 3228), 'torch.index_select', 'torch.index_select', (['block', '(1)', 'x1s'], {}), '(block, 1, x1s)\n', (3213, 3228), False, 'import torch\n'), ((3296, 3329), 'torch.index_select', 'torch.index_select', (['block', '(1)', 'x2s'], {}), '(block, 1, x2s)\n', (3314, 3329), False, 'import torch\n'), ((3397, 3430), 'torch.index_select', 'torch.index_select', (['block', '(1)', 'x2s'], {}), '(block, 1, x2s)\n', (3415, 3430), False, 'import torch\n'), ((6244, 6269), 'torch.eye', 'torch.eye', (['r_inv.shape[0]'], {}), '(r_inv.shape[0])\n', (6253, 6269), False, 'import torch\n'), ((7057, 7093), 'numpy.zeros', 'np.zeros', (['(constant_vertex_count, 1)'], {}), '((constant_vertex_count, 1))\n', (7065, 7093), True, 'import numpy as np\n'), ((7146, 7187), 'numpy.concatenate', 'np.concatenate', (['(columns, column)'], {'axis': '(1)'}), '((columns, column), axis=1)\n', (7160, 7187), True, 'import numpy as np\n'), ((8847, 8925), 'torch.sum', 'torch.sum', (["((meshes['init'][1].vertices - meshes['update'][1].vertices) ** 2)", '(1)'], {}), "((meshes['init'][1].vertices - meshes['update'][1].vertices) ** 2, 1)\n", (8856, 8925), False, 'import torch\n'), ((8946, 9024), 'torch.sum', 'torch.sum', (["((meshes['init'][2].vertices - meshes['update'][2].vertices) ** 2)", '(1)'], {}), "((meshes['init'][2].vertices - meshes['update'][2].vertices) ** 2, 1)\n", (8955, 9024), False, 'import torch\n'), ((8743, 8821), 'torch.sum', 'torch.sum', (["((meshes['init'][0].vertices - meshes['update'][0].vertices) ** 2)", '(1)'], {}), "((meshes['init'][0].vertices - meshes['update'][0].vertices) ** 2, 1)\n", (8752, 8821), False, 'import torch\n'), ((2029, 2061), 'torch.arange', 'torch.arange', (['positions.shape[0]'], {}), '(positions.shape[0])\n', (2041, 2061), False, 'import torch\n'), ((2110, 2142), 'torch.arange', 'torch.arange', (['positions.shape[0]'], {}), '(positions.shape[0])\n', (2122, 2142), False, 'import torch\n')] |
"""core module for preprocessing
This module wraps the pyintorg interfaces into xr.apply_ufunc.
"""
import xarray as xr
import numpy as np
import warnings
xr.set_options(keep_attrs=True)
try:
from pyintorg import interface as intf
except:
warnings.warn(
"could not find pyintorg, you need this for preprocessing. Please consider installing it from https://git.gerics.de/python/pyintorg.git"
)
lev_i = "lev_i"
lev = "lev"
lev_gm = "lev_gm"
class const:
"""constants used for unit conversion"""
grav_const = 9.806805923
absolute_zero = 273.5
def pbl_index(akgm, bkgm):
return intf.pbl_index(akgm, bkgm)
def open_mfdataset(
files,
use_cftime=True,
parallel=True,
data_vars="minimal",
chunks={"time": 1},
coords="minimal",
compat="override",
drop=None,
**kwargs
):
"""optimized function for opening CMIP6 6hrLev 3d datasets
based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115
"""
def drop_all_coords(ds):
# ds = ds.drop(drop)
return ds.reset_coords(drop=True)
ds = xr.open_mfdataset(
files,
parallel=parallel,
decode_times=False,
combine="by_coords",
preprocess=drop_all_coords,
decode_cf=False,
chunks=chunks,
data_vars=data_vars,
coords="minimal",
compat="override",
**kwargs
)
return xr.decode_cf(ds, use_cftime=use_cftime)
def get_akbkem(vc):
"""create vertical coordinate dataset"""
akbk = vc.to_xarray().drop("index")
# bkem = pr.tables.vc.tables['vc_27lev']
akem = akbk.ak.swap_dims({"index": lev_i})
bkem = akbk.bk.swap_dims({"index": lev_i})
akhem = (0.5 * (akbk.ak[:-1] + akbk.ak[1:])).swap_dims({"index": lev})
bkhem = (0.5 * (akbk.bk[:-1] + akbk.bk[1:])).swap_dims({"index": lev})
akem[lev_i] = xr.DataArray(np.arange(1, akem.size + 1), dims=lev_i)
bkem[lev_i] = xr.DataArray(np.arange(1, bkem.size + 1), dims=lev_i)
akhem[lev] = xr.DataArray(np.arange(1, akhem.size + 1), dims=lev, name="akh")
bkhem[lev] = xr.DataArray(np.arange(1, bkhem.size + 1), dims=lev, name="bkh")
akhem.name = "akh"
bkhem.name = "bkh"
return xr.merge([akem, bkem, akhem, bkhem])
# return akem, bkem, akhem, bkhem
def horizontal_dims(da):
for dim in da.dims:
if "lon" in dim:
lon_dim = dim
if "lat" in dim:
lat_dim = dim
return (lon_dim, lat_dim)
def intersect(lamgm, phigm, lamem, phiem):
gcm_dims = list(horizontal_dims(lamgm))
rcm_dims = list(horizontal_dims(lamem))
rcm_dims.append("pos")
out_dims = rcm_dims
result = xr.apply_ufunc(
intf.intersection_points, # first the function
lamgm * 1.0 / 57.296, # now arguments in the order expected by 'druint'
phigm * 1.0 / 57.296,
lamem * 1.0 / 57.296,
phiem * 1.0 / 57.296,
input_core_dims=[
gcm_dims,
gcm_dims,
rcm_dims,
rcm_dims,
], # list with one entry per arg
output_core_dims=[out_dims, out_dims], # returned data has 3 dimensions
dask="parallelized",
output_dtypes=[lamgm.dtype],
)
return result
def interpolate_horizontal(
da, lamem, phiem, lamgm, phigm, name=None, igr=None, blagm=None, blaem=None
):
if name is None:
name = da.name
if igr is None:
igr = 0
indii, indjj = intersect(lamgm, phigm, lamem, phiem)
if blagm is None or blaem is None:
return interp_horiz(
da,
lamgm,
phigm,
lamem.isel(pos=igr),
phiem.isel(pos=igr),
indii.isel(pos=igr),
indjj.isel(pos=igr),
name,
)
else:
return interp_horiz_cm(
da,
lamgm,
phigm,
lamem.isel(pos=igr),
phiem.isel(pos=igr),
indii.isel(pos=igr),
indjj.isel(pos=igr),
name,
blagm,
blaem,
)
# def interp_horiz_2d(field, lamgm, phigm, lamem, phiem, indii, indjj, name):
# """interpolates 2d global data horizontally.
# Interpolates 2d data from the global grid to the regional grid.
# """
# #from intorg import intorg
# return intf.hiobla(field, lamgm, phigm, lamem, phiem, indii, indjj, name)
# def interp_horiz_2d_cm(field, lamgm, phigm, lamem, phiem, indii, indjj, name):
# """interpolates 2d global data horizontally.
# Interpolates 2d data from the global grid to the regional grid.
# """
# from intorg import intorg
# return intorg.hiobla(field, lamgm, phigm, lamem, phiem, indii, indjj, name)
def interp_horiz(da, lamgm, phigm, lamem, phiem, indii, indjj, name, keep_attrs=False):
"""main interface"""
gcm_dims = list(horizontal_dims(lamgm))
rcm_dims = list(horizontal_dims(lamem))
input_core_dims = [
gcm_dims,
gcm_dims,
gcm_dims,
rcm_dims,
rcm_dims,
rcm_dims,
rcm_dims,
[],
]
result = xr.apply_ufunc(
intf.interp_horiz_2d, # first the function
da, # now arguments in the order expected
lamgm * 1.0 / 57.296,
phigm * 1.0 / 57.296,
lamem * 1.0 / 57.296,
phiem * 1.0 / 57.296,
indii,
indjj,
name,
input_core_dims=input_core_dims, # list with one entry per arg
output_core_dims=[rcm_dims], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
# exclude_dims=set(("lev",)), # dimensions allowed to change size. Must be a set!
dask="parallelized",
dask_gufunc_kwargs={"allow_rechunk": True},
output_dtypes=[da.dtype],
)
result.name = name
# result = result.to_dataset()
if keep_attrs:
result.attrs = da.attrs
# result = result.transpose(..., *spatial_dims(da)[::-1])
return result
def interp_horiz_cm(
da, lamgm, phigm, lamem, phiem, indii, indjj, name, blagm, blaem, keep_attrs=False
):
"""main interface"""
gcm_dims = list(horizontal_dims(lamgm))
rcm_dims = list(horizontal_dims(lamem))
input_core_dims = [
gcm_dims,
gcm_dims,
rcm_dims,
gcm_dims,
gcm_dims,
rcm_dims,
rcm_dims,
rcm_dims,
rcm_dims,
[],
]
result = xr.apply_ufunc(
intf.interp_horiz_2d_cm, # first the function
da, # now arguments in the order expected
blagm,
blaem,
lamgm * 1.0 / 57.296,
phigm * 1.0 / 57.296,
lamem * 1.0 / 57.296,
phiem * 1.0 / 57.296,
indii,
indjj,
name,
# dataset_fill_value=1.e20,
input_core_dims=input_core_dims, # list with one entry per arg
output_core_dims=[rcm_dims], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
# exclude_dims=set(("lev",)), # dimensions allowed to change size. Must be a set!
dask="parallelized",
dask_gufunc_kwargs={"allow_rechunk": True},
output_dtypes=[da.dtype],
)
result.name = name
# result = result.to_dataset()
if keep_attrs:
result.attrs = da.attrs
# result = result.transpose(..., *spatial_dims(da)[::-1])
return result
def geopotential(fibgm, tgm, qdgm, psgm, akgm, bkgm):
"""main interface"""
# gcm_dims = list(spatial_dims(lamgm))
twoD_dims = list(horizontal_dims(fibgm))
threeD_dims = list(horizontal_dims(fibgm))
threeD_dims.append(lev_gm)
# lev_dims.append("lev")
# plev_dims = list(spatial_dims(da))
# plev_dims.append("plev")
# nlev = a.dims[0]
input_core_dims = [
twoD_dims,
threeD_dims,
threeD_dims,
twoD_dims,
["lev_2"],
["lev_2"],
# [],
# []
]
# print(input_core_dims)
result = xr.apply_ufunc(
intf.geopotential, # first the function
fibgm, # now arguments in the order expected
tgm,
qdgm,
psgm,
akgm,
bkgm,
input_core_dims=input_core_dims, # list with one entry per arg
# output_core_dims=[threeD_dims], # returned data has 3 dimensions
output_core_dims=[twoD_dims], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
# exclude_dims=set(("lev",)), # dimensions allowed to change size. Must be a set!
dask="parallelized",
dask_gufunc_kwargs={"allow_rechunk": True},
output_dtypes=[fibgm.dtype],
)
return result
def relative_humidity(qdgm, tgm, psgm, akgm, bkgm, qwgm=None):
"""main interface"""
if qwgm is None:
qwgm = xr.zeros_like(qdgm)
twoD_dims = list(horizontal_dims(qdgm))
threeD_dims = list(horizontal_dims(qdgm)) + ["lev_gm"]
# print(twoD_dims)
# threeD_dims.append("lev")
input_core_dims = [
threeD_dims,
threeD_dims,
twoD_dims,
[akgm.dims[0]],
[bkgm.dims[0]],
threeD_dims,
]
result = xr.apply_ufunc(
intf.relative_humidity, # first the function
qdgm, # now arguments in the order expected
tgm,
psgm,
akgm,
bkgm,
qwgm,
input_core_dims=input_core_dims, # list with one entry per arg
# output_core_dims=[threeD_dims], # returned data has 3 dimensions
output_core_dims=[threeD_dims], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
# exclude_dims=set(("lev",)), # dimensions allowed to change size. Must be a set!
dask="parallelized",
dask_gufunc_kwargs={"allow_rechunk": True},
output_dtypes=[qdgm.dtype],
)
return result
def geo_coords(domain_info, rlon, rlat):
import numpy as np
ll_lam = domain_info["ll_lon"] # * 1.0/57.296
ll_phi = domain_info["ll_lat"] # * 1.0/57.296
dlam = domain_info["dlon"]
dphi = domain_info["dlat"]
nlam = domain_info["nlon"]
nphi = domain_info["nlat"]
pollam = domain_info["pollon"]
polphi = domain_info["pollat"]
lamem, phiem = intf.geo_coords(
ll_lam, ll_phi, dlam, dphi, pollam, polphi, nlam + 2, nphi + 2
)
lamda = xr.DataArray(
np.rad2deg(lamem),
dims=("rlon", "rlat", "pos"),
coords={"rlon": rlon, "rlat": rlat},
)
phida = xr.DataArray(
np.rad2deg(phiem),
dims=("rlon", "rlat", "pos"),
coords={"rlon": rlon, "rlat": rlat},
)
return lamda, phida
def get_vc(ds):
"""Reads the vertical hybrid coordinate from a dataset."""
ak_valid = ["ap_bnds", "a_bnds"]
bk_valid = ["b_bnds"]
ak_bnds = None
bk_bnds = None
for ak_name in ak_valid:
if ak_name in ds:
ak_bnds = ds[ak_name]
print("using {} for akgm".format(ak_name))
for bk_name in bk_valid:
if bk_name in ds:
bk_bnds = ds[bk_name]
print("using {} for bkgm".format(bk_name))
# if not all([ak_bnds, bk_bnds]):
# print('could not identify vertical coordinate, tried: {}, {}'.format(ak_valid, bk_valid))
# raise Exception('incomplete input dataset')
# ak_bnds, bk_bnds = (ak_bnds[:1], bk_bnds[:,1])
nlev = ak_bnds.shape[0]
ak = np.zeros([nlev + 1], dtype=np.float64)
bk = np.ones([nlev + 1], dtype=np.float64)
if ds.lev.positive == "down":
ak[:-1] = np.flip(ak_bnds[:, 1])
bk[:-1] = np.flip(bk_bnds[:, 1])
else:
ak[1:] = np.flip(ak_bnds[:, 1])
bk[1:] = np.flip(bk_bnds[:, 1])
return xr.DataArray(ak, dims="lev_2"), xr.DataArray(bk, dims="lev_2")
def map_sst(tos, ref_ds, resample="6H", regrid=True):
from datetime import timedelta as td
import xesmf as xe
# tos_res = tos
attrs = tos.attrs
tos_times = (ref_ds.time.min() - td(days=1), ref_ds.time.max() + td(days=1))
tos = tos.sel(time=slice(tos_times[0], tos_times[1]))
# return tos_res
tos = tos.resample(time=resample).interpolate("linear").chunk({"time": 1})
tos = tos.sel(time=ref_ds.time)
if regrid:
regridder = xe.Regridder(tos, ref_ds, "nearest_s2d")
tos = regridder(tos)
tos.attrs.update(attrs)
return tos
def convert_units(ds):
"""convert units for use in the preprocessor"""
try:
if ds.sftlf.units == "%":
print("converting sftlf units to fractional")
attrs = ds.sftlf.attrs
ds["sftlf"] = ds.sftlf * 0.01
attrs["units"] = 1
ds.sftlf.attrs = attrs
except:
warnings.warn("sftlf has no units attribute, must be fractional.")
try:
if ds.tos.units == "degC":
print("converting tos units to K")
attrs = ds.tos.attrs
ds["tos"] = ds.tos + const.absolute_zero
attrs["units"] = "K"
ds.tos.attrs = attrs
except:
warnings.warn("tos has no units attribute, must be Kelvin!")
try:
if ds.orog.units == "m":
print("converting orography to geopotential")
attrs = ds.orog.attrs
ds["orog"] = ds.orog * const.grav_const
attrs["units"] = "m2 s-2"
ds.orog.attrs = attrs
except:
warnings.warn("orog has no units attribute, must be m2 s-2")
return ds
def gfile(datasets, ref_ds=None, tos=None, time_range=None):
"""Creates a virtual gfile"""
if ref_ds is None:
try:
ref_ds = open_mfdataset(datasets["ta"])
except:
raise Exception("ta is required in the datasets dict if no ref_ds is given")
lon, lat = horizontal_dims(ref_ds)
if time_range is None:
time_range = ref_ds.time
dsets = []
for var, f in datasets.items():
try:
da = open_mfdataset(f, chunks={"time": 1})[var]
da = da.sel(time=time_range)
except:
da = open_mfdataset(f, chunks={})[var]
try:
if da.lev.positive == "down":
da = da.reindex(lev=da.lev[::-1])
except:
pass
# print(var)
# print(da)
da[lon] = ref_ds[lon]
da[lat] = ref_ds[lat]
dsets.append(da)
ds = xr.merge(dsets)
if tos is not None:
ds["tos"] = map_sst(tos, ref_ds.sel(time=time_range))
ds["akgm"], ds["bkgm"] = get_vc(ref_ds)
ds = ds.rename({"lev": lev_gm})
ds = convert_units(ds)
if "sftlf" in ds:
ds["sftlf"] = np.around(ds.sftlf)
ds.attrs = ref_ds.attrs
return ds
def rotate_uv(uge, vge, uvge, vuge, lamem, phiem, pollam, polphi):
ulamem, uphiem = lamem.isel(pos=1), phiem.isel(pos=1)
vlamem, vphiem = lamem.isel(pos=2), phiem.isel(pos=2)
twoD_dims = list(horizontal_dims(uge))
input_core_dims = 4 * [twoD_dims + [lev_gm]] + 4 * [twoD_dims] + 2 * [[]]
uge_rot, vge_rot = xr.apply_ufunc(
intf.rotate_uv, # first the function
uge, # now arguments in the order expected
vge,
uvge,
vuge,
ulamem * 1.0 / 57.296, # pi/180 (deg2rad)
uphiem * 1.0 / 57.296,
vlamem * 1.0 / 57.296,
vphiem * 1.0 / 57.296,
pollam,
polphi,
input_core_dims=input_core_dims, # list with one entry per arg
# output_core_dims=[threeD_dims], # returned data has 3 dimensions
output_core_dims=2 * [twoD_dims + [lev_gm]], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
# exclude_dims=set(("lev",)), # dimensions allowed to change size. Must be a set!
dask="parallelized",
dask_gufunc_kwargs={"allow_rechunk": True},
output_dtypes=(uge.dtype, vge.dtype),
)
return uge_rot, vge_rot
def pressure_correction_em(psge, tge, arfge, fibge, fibem, akgm, bkgm, kpbl):
twoD_dims = list(horizontal_dims(psge))
threeD_dims = list(horizontal_dims(psge)) + [lev_gm]
input_core_dims = (
[twoD_dims]
+ 2 * [threeD_dims]
+ 2 * [twoD_dims]
+ [[akgm.dims[0]], [bkgm.dims[0]], []]
)
# print(input_core_dims)
result = xr.apply_ufunc(
intf.pressure_correction_em, # first the function
psge, # now arguments in the order expected
tge,
arfge,
fibge,
fibem,
akgm,
bkgm,
kpbl,
input_core_dims=input_core_dims, # list with one entry per arg
output_core_dims=[twoD_dims], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
dask="parallelized",
dask_gufunc_kwargs={"allow_rechunk": True},
output_dtypes=[psge.dtype],
)
return result
def interpolate_vertical(xge, psge, ps1em, akhgm, bkhgm, akhem, bkhem, varname, kpbl):
twoD_dims = list(horizontal_dims(psge))
threeD_dims = list(horizontal_dims(psge)) + [lev_gm]
input_core_dims = (
[threeD_dims]
+ 2 * [twoD_dims]
+ [[akhgm.dims[0]], [bkhgm.dims[0]], [akhem.dims[0]], [bkhem.dims[0]], [], []]
)
output_core_dims = [twoD_dims + [akhem.dims[0]]]
# print(output_core_dims)
result = xr.apply_ufunc(
intf.interp_vert, # first the function
xge, # now arguments in the order expected
psge,
ps1em,
akhgm,
bkhgm,
akhem,
bkhem,
varname,
kpbl,
input_core_dims=input_core_dims, # list with one entry per arg
output_core_dims=output_core_dims, # returned data has 3 dimensions
# exclude_dims=set(("index",)),
vectorize=True, # loop over non-core dims, in this case: time
dask="parallelized",
dask_gufunc_kwargs={"allow_rechunk": True},
output_dtypes=[xge.dtype],
)
result.name = varname
return result
def pressure_correction_ge(ps1em, tem, arfem, ficge, fibem, akem, bkem):
twoD_dims = list(horizontal_dims(ps1em))
threeD_dims = list(horizontal_dims(ps1em)) + [lev]
input_core_dims = (
[twoD_dims]
+ 2 * [threeD_dims]
+ 2 * [twoD_dims]
+ [[akem.dims[0]], [bkem.dims[0]]]
)
# print(input_core_dims)
result = xr.apply_ufunc(
intf.pressure_correction_ge, # first the function
ps1em, # now arguments in the order expected
tem,
arfem,
ficge,
fibem,
akem,
bkem,
input_core_dims=input_core_dims, # list with one entry per arg
output_core_dims=[twoD_dims], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
dask="parallelized",
output_dtypes=[ps1em.dtype],
)
return result
def correct_uv(uem, vem, psem, akem, bkem, lamem, phiem, ll_lam, dlam, dphi):
ulamem, uphiem = lamem.isel(pos=1), phiem.isel(pos=1)
vlamem, vphiem = lamem.isel(pos=2), phiem.isel(pos=2)
twoD_dims = list(horizontal_dims(uem))
input_core_dims = (
2 * [twoD_dims + [lev]]
+ 1 * [twoD_dims]
+ [[akem.dims[0]], [bkem.dims[0]]]
+ 3 * [[]]
)
# print(input_core_dims)
uge_corr, vge_corr = xr.apply_ufunc(
intf.correct_uv, # first the function
uem, # now arguments in the order expected
vem,
psem,
akem,
bkem,
ll_lam,
dlam,
dphi,
input_core_dims=input_core_dims, # list with one entry per arg
# output_core_dims=[threeD_dims], # returned data has 3 dimensions
output_core_dims=2 * [twoD_dims + [lev]], # returned data has 3 dimensions
vectorize=True, # loop over non-core dims, in this case: time
# exclude_dims=set(("lev",)), # dimensions allowed to change size. Must be a set!
dask="parallelized",
# dask_gufunc_kwargs = {'allow_rechunk':True},
output_dtypes=(uem.dtype, vem.dtype),
)
uge_corr.name = "U"
vge_corr.name = "V"
return uge_corr, vge_corr
| [
"xarray.decode_cf",
"numpy.flip",
"xesmf.Regridder",
"numpy.zeros",
"numpy.ones",
"xarray.merge",
"xarray.zeros_like",
"numpy.rad2deg",
"xarray.set_options",
"pyintorg.interface.geo_coords",
"xarray.open_mfdataset",
"numpy.arange",
"xarray.apply_ufunc",
"xarray.DataArray",
"warnings.warn... | [((159, 190), 'xarray.set_options', 'xr.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (173, 190), True, 'import xarray as xr\n'), ((622, 648), 'pyintorg.interface.pbl_index', 'intf.pbl_index', (['akgm', 'bkgm'], {}), '(akgm, bkgm)\n', (636, 648), True, 'from pyintorg import interface as intf\n'), ((1112, 1333), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['files'], {'parallel': 'parallel', 'decode_times': '(False)', 'combine': '"""by_coords"""', 'preprocess': 'drop_all_coords', 'decode_cf': '(False)', 'chunks': 'chunks', 'data_vars': 'data_vars', 'coords': '"""minimal"""', 'compat': '"""override"""'}), "(files, parallel=parallel, decode_times=False, combine=\n 'by_coords', preprocess=drop_all_coords, decode_cf=False, chunks=chunks,\n data_vars=data_vars, coords='minimal', compat='override', **kwargs)\n", (1129, 1333), True, 'import xarray as xr\n'), ((1430, 1469), 'xarray.decode_cf', 'xr.decode_cf', (['ds'], {'use_cftime': 'use_cftime'}), '(ds, use_cftime=use_cftime)\n', (1442, 1469), True, 'import xarray as xr\n'), ((2231, 2267), 'xarray.merge', 'xr.merge', (['[akem, bkem, akhem, bkhem]'], {}), '([akem, bkem, akhem, bkhem])\n', (2239, 2267), True, 'import xarray as xr\n'), ((2686, 2974), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.intersection_points', '(lamgm * 1.0 / 57.296)', '(phigm * 1.0 / 57.296)', '(lamem * 1.0 / 57.296)', '(phiem * 1.0 / 57.296)'], {'input_core_dims': '[gcm_dims, gcm_dims, rcm_dims, rcm_dims]', 'output_core_dims': '[out_dims, out_dims]', 'dask': '"""parallelized"""', 'output_dtypes': '[lamgm.dtype]'}), "(intf.intersection_points, lamgm * 1.0 / 57.296, phigm * 1.0 /\n 57.296, lamem * 1.0 / 57.296, phiem * 1.0 / 57.296, input_core_dims=[\n gcm_dims, gcm_dims, rcm_dims, rcm_dims], output_core_dims=[out_dims,\n out_dims], dask='parallelized', output_dtypes=[lamgm.dtype])\n", (2700, 2974), True, 'import xarray as xr\n'), ((5120, 5454), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.interp_horiz_2d', 'da', '(lamgm * 1.0 / 57.296)', '(phigm * 1.0 / 57.296)', '(lamem * 1.0 / 57.296)', '(phiem * 1.0 / 57.296)', 'indii', 'indjj', 'name'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '[rcm_dims]', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'dask_gufunc_kwargs': "{'allow_rechunk': True}", 'output_dtypes': '[da.dtype]'}), "(intf.interp_horiz_2d, da, lamgm * 1.0 / 57.296, phigm * 1.0 /\n 57.296, lamem * 1.0 / 57.296, phiem * 1.0 / 57.296, indii, indjj, name,\n input_core_dims=input_core_dims, output_core_dims=[rcm_dims], vectorize\n =True, dask='parallelized', dask_gufunc_kwargs={'allow_rechunk': True},\n output_dtypes=[da.dtype])\n", (5134, 5454), True, 'import xarray as xr\n'), ((6463, 6815), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.interp_horiz_2d_cm', 'da', 'blagm', 'blaem', '(lamgm * 1.0 / 57.296)', '(phigm * 1.0 / 57.296)', '(lamem * 1.0 / 57.296)', '(phiem * 1.0 / 57.296)', 'indii', 'indjj', 'name'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '[rcm_dims]', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'dask_gufunc_kwargs': "{'allow_rechunk': True}", 'output_dtypes': '[da.dtype]'}), "(intf.interp_horiz_2d_cm, da, blagm, blaem, lamgm * 1.0 / \n 57.296, phigm * 1.0 / 57.296, lamem * 1.0 / 57.296, phiem * 1.0 / \n 57.296, indii, indjj, name, input_core_dims=input_core_dims,\n output_core_dims=[rcm_dims], vectorize=True, dask='parallelized',\n dask_gufunc_kwargs={'allow_rechunk': True}, output_dtypes=[da.dtype])\n", (6477, 6815), True, 'import xarray as xr\n'), ((8020, 8275), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.geopotential', 'fibgm', 'tgm', 'qdgm', 'psgm', 'akgm', 'bkgm'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '[twoD_dims]', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'dask_gufunc_kwargs': "{'allow_rechunk': True}", 'output_dtypes': '[fibgm.dtype]'}), "(intf.geopotential, fibgm, tgm, qdgm, psgm, akgm, bkgm,\n input_core_dims=input_core_dims, output_core_dims=[twoD_dims],\n vectorize=True, dask='parallelized', dask_gufunc_kwargs={\n 'allow_rechunk': True}, output_dtypes=[fibgm.dtype])\n", (8034, 8275), True, 'import xarray as xr\n'), ((9211, 9471), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.relative_humidity', 'qdgm', 'tgm', 'psgm', 'akgm', 'bkgm', 'qwgm'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '[threeD_dims]', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'dask_gufunc_kwargs': "{'allow_rechunk': True}", 'output_dtypes': '[qdgm.dtype]'}), "(intf.relative_humidity, qdgm, tgm, psgm, akgm, bkgm, qwgm,\n input_core_dims=input_core_dims, output_core_dims=[threeD_dims],\n vectorize=True, dask='parallelized', dask_gufunc_kwargs={\n 'allow_rechunk': True}, output_dtypes=[qdgm.dtype])\n", (9225, 9471), True, 'import xarray as xr\n'), ((10311, 10390), 'pyintorg.interface.geo_coords', 'intf.geo_coords', (['ll_lam', 'll_phi', 'dlam', 'dphi', 'pollam', 'polphi', '(nlam + 2)', '(nphi + 2)'], {}), '(ll_lam, ll_phi, dlam, dphi, pollam, polphi, nlam + 2, nphi + 2)\n', (10326, 10390), True, 'from pyintorg import interface as intf\n'), ((11482, 11520), 'numpy.zeros', 'np.zeros', (['[nlev + 1]'], {'dtype': 'np.float64'}), '([nlev + 1], dtype=np.float64)\n', (11490, 11520), True, 'import numpy as np\n'), ((11530, 11567), 'numpy.ones', 'np.ones', (['[nlev + 1]'], {'dtype': 'np.float64'}), '([nlev + 1], dtype=np.float64)\n', (11537, 11567), True, 'import numpy as np\n'), ((14415, 14430), 'xarray.merge', 'xr.merge', (['dsets'], {}), '(dsets)\n', (14423, 14430), True, 'import xarray as xr\n'), ((15060, 15438), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.rotate_uv', 'uge', 'vge', 'uvge', 'vuge', '(ulamem * 1.0 / 57.296)', '(uphiem * 1.0 / 57.296)', '(vlamem * 1.0 / 57.296)', '(vphiem * 1.0 / 57.296)', 'pollam', 'polphi'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '(2 * [twoD_dims + [lev_gm]])', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'dask_gufunc_kwargs': "{'allow_rechunk': True}", 'output_dtypes': '(uge.dtype, vge.dtype)'}), "(intf.rotate_uv, uge, vge, uvge, vuge, ulamem * 1.0 / 57.296,\n uphiem * 1.0 / 57.296, vlamem * 1.0 / 57.296, vphiem * 1.0 / 57.296,\n pollam, polphi, input_core_dims=input_core_dims, output_core_dims=2 * [\n twoD_dims + [lev_gm]], vectorize=True, dask='parallelized',\n dask_gufunc_kwargs={'allow_rechunk': True}, output_dtypes=(uge.dtype,\n vge.dtype))\n", (15074, 15438), True, 'import xarray as xr\n'), ((16324, 16603), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.pressure_correction_em', 'psge', 'tge', 'arfge', 'fibge', 'fibem', 'akgm', 'bkgm', 'kpbl'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '[twoD_dims]', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'dask_gufunc_kwargs': "{'allow_rechunk': True}", 'output_dtypes': '[psge.dtype]'}), "(intf.pressure_correction_em, psge, tge, arfge, fibge, fibem,\n akgm, bkgm, kpbl, input_core_dims=input_core_dims, output_core_dims=[\n twoD_dims], vectorize=True, dask='parallelized', dask_gufunc_kwargs={\n 'allow_rechunk': True}, output_dtypes=[psge.dtype])\n", (16338, 16603), True, 'import xarray as xr\n'), ((17359, 17641), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.interp_vert', 'xge', 'psge', 'ps1em', 'akhgm', 'bkhgm', 'akhem', 'bkhem', 'varname', 'kpbl'], {'input_core_dims': 'input_core_dims', 'output_core_dims': 'output_core_dims', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'dask_gufunc_kwargs': "{'allow_rechunk': True}", 'output_dtypes': '[xge.dtype]'}), "(intf.interp_vert, xge, psge, ps1em, akhgm, bkhgm, akhem,\n bkhem, varname, kpbl, input_core_dims=input_core_dims, output_core_dims\n =output_core_dims, vectorize=True, dask='parallelized',\n dask_gufunc_kwargs={'allow_rechunk': True}, output_dtypes=[xge.dtype])\n", (17373, 17641), True, 'import xarray as xr\n'), ((18385, 18616), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.pressure_correction_ge', 'ps1em', 'tem', 'arfem', 'ficge', 'fibem', 'akem', 'bkem'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '[twoD_dims]', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'output_dtypes': '[ps1em.dtype]'}), "(intf.pressure_correction_ge, ps1em, tem, arfem, ficge, fibem,\n akem, bkem, input_core_dims=input_core_dims, output_core_dims=[\n twoD_dims], vectorize=True, dask='parallelized', output_dtypes=[ps1em.\n dtype])\n", (18399, 18616), True, 'import xarray as xr\n'), ((19348, 19589), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['intf.correct_uv', 'uem', 'vem', 'psem', 'akem', 'bkem', 'll_lam', 'dlam', 'dphi'], {'input_core_dims': 'input_core_dims', 'output_core_dims': '(2 * [twoD_dims + [lev]])', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'output_dtypes': '(uem.dtype, vem.dtype)'}), "(intf.correct_uv, uem, vem, psem, akem, bkem, ll_lam, dlam,\n dphi, input_core_dims=input_core_dims, output_core_dims=2 * [twoD_dims +\n [lev]], vectorize=True, dask='parallelized', output_dtypes=(uem.dtype,\n vem.dtype))\n", (19362, 19589), True, 'import xarray as xr\n'), ((252, 413), 'warnings.warn', 'warnings.warn', (['"""could not find pyintorg, you need this for preprocessing. Please consider installing it from https://git.gerics.de/python/pyintorg.git"""'], {}), "(\n 'could not find pyintorg, you need this for preprocessing. Please consider installing it from https://git.gerics.de/python/pyintorg.git'\n )\n", (265, 413), False, 'import warnings\n'), ((1897, 1924), 'numpy.arange', 'np.arange', (['(1)', '(akem.size + 1)'], {}), '(1, akem.size + 1)\n', (1906, 1924), True, 'import numpy as np\n'), ((1969, 1996), 'numpy.arange', 'np.arange', (['(1)', '(bkem.size + 1)'], {}), '(1, bkem.size + 1)\n', (1978, 1996), True, 'import numpy as np\n'), ((2040, 2068), 'numpy.arange', 'np.arange', (['(1)', '(akhem.size + 1)'], {}), '(1, akhem.size + 1)\n', (2049, 2068), True, 'import numpy as np\n'), ((2122, 2150), 'numpy.arange', 'np.arange', (['(1)', '(bkhem.size + 1)'], {}), '(1, bkhem.size + 1)\n', (2131, 2150), True, 'import numpy as np\n'), ((8859, 8878), 'xarray.zeros_like', 'xr.zeros_like', (['qdgm'], {}), '(qdgm)\n', (8872, 8878), True, 'import xarray as xr\n'), ((10439, 10456), 'numpy.rad2deg', 'np.rad2deg', (['lamem'], {}), '(lamem)\n', (10449, 10456), True, 'import numpy as np\n'), ((10581, 10598), 'numpy.rad2deg', 'np.rad2deg', (['phiem'], {}), '(phiem)\n', (10591, 10598), True, 'import numpy as np\n'), ((11620, 11642), 'numpy.flip', 'np.flip', (['ak_bnds[:, 1]'], {}), '(ak_bnds[:, 1])\n', (11627, 11642), True, 'import numpy as np\n'), ((11661, 11683), 'numpy.flip', 'np.flip', (['bk_bnds[:, 1]'], {}), '(bk_bnds[:, 1])\n', (11668, 11683), True, 'import numpy as np\n'), ((11711, 11733), 'numpy.flip', 'np.flip', (['ak_bnds[:, 1]'], {}), '(ak_bnds[:, 1])\n', (11718, 11733), True, 'import numpy as np\n'), ((11751, 11773), 'numpy.flip', 'np.flip', (['bk_bnds[:, 1]'], {}), '(bk_bnds[:, 1])\n', (11758, 11773), True, 'import numpy as np\n'), ((11786, 11816), 'xarray.DataArray', 'xr.DataArray', (['ak'], {'dims': '"""lev_2"""'}), "(ak, dims='lev_2')\n", (11798, 11816), True, 'import xarray as xr\n'), ((11818, 11848), 'xarray.DataArray', 'xr.DataArray', (['bk'], {'dims': '"""lev_2"""'}), "(bk, dims='lev_2')\n", (11830, 11848), True, 'import xarray as xr\n'), ((12322, 12362), 'xesmf.Regridder', 'xe.Regridder', (['tos', 'ref_ds', '"""nearest_s2d"""'], {}), "(tos, ref_ds, 'nearest_s2d')\n", (12334, 12362), True, 'import xesmf as xe\n'), ((14668, 14687), 'numpy.around', 'np.around', (['ds.sftlf'], {}), '(ds.sftlf)\n', (14677, 14687), True, 'import numpy as np\n'), ((12049, 12059), 'datetime.timedelta', 'td', ([], {'days': '(1)'}), '(days=1)\n', (12051, 12059), True, 'from datetime import timedelta as td\n'), ((12081, 12091), 'datetime.timedelta', 'td', ([], {'days': '(1)'}), '(days=1)\n', (12083, 12091), True, 'from datetime import timedelta as td\n'), ((12776, 12842), 'warnings.warn', 'warnings.warn', (['"""sftlf has no units attribute, must be fractional."""'], {}), "('sftlf has no units attribute, must be fractional.')\n", (12789, 12842), False, 'import warnings\n'), ((13106, 13166), 'warnings.warn', 'warnings.warn', (['"""tos has no units attribute, must be Kelvin!"""'], {}), "('tos has no units attribute, must be Kelvin!')\n", (13119, 13166), False, 'import warnings\n'), ((13445, 13505), 'warnings.warn', 'warnings.warn', (['"""orog has no units attribute, must be m2 s-2"""'], {}), "('orog has no units attribute, must be m2 s-2')\n", (13458, 13505), False, 'import warnings\n')] |
import networkx as nx
import numpy as np
from networkx.algorithms import tree_all_pairs_lowest_common_ancestor
def obtain_ranks(fitting_scores: np.ndarray, labels: np.ndarray):
"""
fitting_scores : ndarray of size (batch_size, seed_size), calculated fitting scores
labels : ndarray of size (batch_size, seed_size), labels
rankings: ndarray of size(batch_size, 1), fitting score rankings of ground truth anchor
"""
gt_score = fitting_scores[labels == 1]
assert gt_score.shape[0] == fitting_scores.shape[0], 'Each node should have one and only one parent'
for i in range(fitting_scores.shape[0]):
assert np.in1d(gt_score[i], fitting_scores[i, ...].squeeze())
rankings = np.sum(fitting_scores > gt_score[..., np.newaxis], axis=1) + 1
seeds_count = np.array([fitting_scores.shape[1]])
rankings2 = seeds_count - np.sum(fitting_scores <= gt_score[..., np.newaxis], axis=1) + 1
print(rankings)
print(rankings2)
return rankings
def micro_mr(all_ranks: np.ndarray):
return np.mean(all_ranks)
def hit_at_1(all_ranks: np.ndarray):
return np.sum(all_ranks <= 1) / all_ranks.size
def hit_at_3(all_ranks: np.ndarray):
return np.sum(all_ranks <= 3) / all_ranks.size
def hit_at_5(all_ranks: np.ndarray):
return np.sum(all_ranks <= 5) / all_ranks.size
def mrr(all_ranks: np.ndarray):
return np.mean(1.0 / all_ranks)
def mrr_scaled_10(all_ranks: np.ndarray):
# Scaled MRR score, check eq. (2) in the PinSAGE paper: https://arxiv.org/pdf/1806.01973.pdf
return np.mean(1.0 / np.ceil(all_ranks / 10))
def wu_palmer(fitting_scores: np.ndarray, labels: np.ndarray, seed: nx.DiGraph):
seed_nodes = list(seed.nodes)
pred_indices = fitting_scores.argmax(axis=1)
gt_indices = labels.argmax(axis=1)
node_pairs = [(seed_nodes[pred_idx], seed_nodes[gt_idx]) for pred_idx, gt_idx in zip(pred_indices, gt_indices)]
lcas = tree_all_pairs_lowest_common_ancestor(seed, pairs=node_pairs)
def calc_wu_palmer(y, y_star, lca):
return 2.0 * lca.level / (y.level + y_star.level + 0.000001)
ret = [calc_wu_palmer(*pair, lca) for pair, lca in lcas]
return np.array(ret).mean()
def combined_metrics(all_ranks):
# combination of three metrics, used in early stop
score = micro_mr(all_ranks) * (1.0 / max(mrr_scaled_10(all_ranks), 0.0001)) * (
1.0 / max(hit_at_3(all_ranks), 0.0001)) * (1.0 / max(hit_at_1(all_ranks), 0.0001))
return score
| [
"numpy.sum",
"numpy.ceil",
"numpy.mean",
"numpy.array",
"networkx.algorithms.tree_all_pairs_lowest_common_ancestor"
] | [((797, 832), 'numpy.array', 'np.array', (['[fitting_scores.shape[1]]'], {}), '([fitting_scores.shape[1]])\n', (805, 832), True, 'import numpy as np\n'), ((1038, 1056), 'numpy.mean', 'np.mean', (['all_ranks'], {}), '(all_ranks)\n', (1045, 1056), True, 'import numpy as np\n'), ((1372, 1396), 'numpy.mean', 'np.mean', (['(1.0 / all_ranks)'], {}), '(1.0 / all_ranks)\n', (1379, 1396), True, 'import numpy as np\n'), ((1920, 1981), 'networkx.algorithms.tree_all_pairs_lowest_common_ancestor', 'tree_all_pairs_lowest_common_ancestor', (['seed'], {'pairs': 'node_pairs'}), '(seed, pairs=node_pairs)\n', (1957, 1981), False, 'from networkx.algorithms import tree_all_pairs_lowest_common_ancestor\n'), ((716, 774), 'numpy.sum', 'np.sum', (['(fitting_scores > gt_score[..., np.newaxis])'], {'axis': '(1)'}), '(fitting_scores > gt_score[..., np.newaxis], axis=1)\n', (722, 774), True, 'import numpy as np\n'), ((1107, 1129), 'numpy.sum', 'np.sum', (['(all_ranks <= 1)'], {}), '(all_ranks <= 1)\n', (1113, 1129), True, 'import numpy as np\n'), ((1197, 1219), 'numpy.sum', 'np.sum', (['(all_ranks <= 3)'], {}), '(all_ranks <= 3)\n', (1203, 1219), True, 'import numpy as np\n'), ((1287, 1309), 'numpy.sum', 'np.sum', (['(all_ranks <= 5)'], {}), '(all_ranks <= 5)\n', (1293, 1309), True, 'import numpy as np\n'), ((863, 922), 'numpy.sum', 'np.sum', (['(fitting_scores <= gt_score[..., np.newaxis])'], {'axis': '(1)'}), '(fitting_scores <= gt_score[..., np.newaxis], axis=1)\n', (869, 922), True, 'import numpy as np\n'), ((1563, 1586), 'numpy.ceil', 'np.ceil', (['(all_ranks / 10)'], {}), '(all_ranks / 10)\n', (1570, 1586), True, 'import numpy as np\n'), ((2165, 2178), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (2173, 2178), True, 'import numpy as np\n')] |
import numpy as np
from scipy.integrate import odeint
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
# Solve the ODE of Lorenz system
def ode(s, t):
sigma = 10
beta = 2.667
rho = 28
x, y, z = s
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
t = np.arange(0, 20, 0.1)
y0 = np.array([0, 1, 1.05])
y = odeint(ode, y0, t)
fig = plt.figure(figsize=(8, 4))
plt.plot(t, y)
plt.xlabel('Time', fontsize=24, labelpad=10)
plt.ylabel('X(t)', fontsize=24, labelpad=10)
plt.legend(["A", "B", "C"], fontsize=14, loc="upper right")
plt.tight_layout()
plt.show()
plt.savefig("lorenz-time-series.png", dpi=300) | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] | [((385, 406), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(0.1)'], {}), '(0, 20, 0.1)\n', (394, 406), True, 'import numpy as np\n'), ((412, 434), 'numpy.array', 'np.array', (['[0, 1, 1.05]'], {}), '([0, 1, 1.05])\n', (420, 434), True, 'import numpy as np\n'), ((439, 457), 'scipy.integrate.odeint', 'odeint', (['ode', 'y0', 't'], {}), '(ode, y0, t)\n', (445, 457), False, 'from scipy.integrate import odeint\n'), ((466, 492), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (476, 492), True, 'import matplotlib.pyplot as plt\n'), ((493, 507), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y'], {}), '(t, y)\n', (501, 507), True, 'import matplotlib.pyplot as plt\n'), ((508, 552), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(24)', 'labelpad': '(10)'}), "('Time', fontsize=24, labelpad=10)\n", (518, 552), True, 'import matplotlib.pyplot as plt\n'), ((553, 597), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X(t)"""'], {'fontsize': '(24)', 'labelpad': '(10)'}), "('X(t)', fontsize=24, labelpad=10)\n", (563, 597), True, 'import matplotlib.pyplot as plt\n'), ((598, 657), 'matplotlib.pyplot.legend', 'plt.legend', (["['A', 'B', 'C']"], {'fontsize': '(14)', 'loc': '"""upper right"""'}), "(['A', 'B', 'C'], fontsize=14, loc='upper right')\n", (608, 657), True, 'import matplotlib.pyplot as plt\n'), ((658, 676), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (674, 676), True, 'import matplotlib.pyplot as plt\n'), ((677, 687), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (685, 687), True, 'import matplotlib.pyplot as plt\n'), ((688, 734), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""lorenz-time-series.png"""'], {'dpi': '(300)'}), "('lorenz-time-series.png', dpi=300)\n", (699, 734), True, 'import matplotlib.pyplot as plt\n')] |
"""
Basic actor critic algorithm based on Pytorch tutorial in
https://github.com/pytorch/examples/blob/master/reinforcement_learning/actor_critic.py.
"""
import argparse
import gym
import numpy as np
from itertools import count
from ac import Policy, select_action
import torch
import torch.nn.functional as F
import torch.optim as optim
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 543)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
args = parser.parse_args()
# env = gym.make('maze2d-open-v0')
env = gym.make('CartPole-v0')
env.seed(args.seed)
torch.manual_seed(args.seed)
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=3e-2)
eps = np.finfo(np.float32).eps.item()
def finish_episode():
"""
Training code. Calculates actor and critic loss and performs backprop.
"""
R = 0
saved_actions = model.saved_actions
policy_losses = [] # list to save actor (policy) loss
value_losses = [] # list to save critic (value) loss
returns = [] # list to save the true values
# calculate the true value using rewards returned from the environment
for r in model.rewards[::-1]:
# calculate the discounted value
R = r + args.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
for (log_prob, value), R in zip(saved_actions, returns):
advantage = R - value.item()
# calculate actor (policy) loss
policy_losses.append(-log_prob * advantage)
# calculate critic (value) loss using L1 smooth loss
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R])))
# reset gradients
optimizer.zero_grad()
# sum up all the values of policy_losses and value_losses
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
# perform backprop
loss.backward()
optimizer.step()
# reset rewards and action buffer
del model.rewards[:]
del model.saved_actions[:]
def main():
running_reward = 10
# run inifinitely many episodes
for i_episode in count(1):
# reset environment and episode reward
state = env.reset()
ep_reward = 0
# for each episode, only run 9999 steps so that we don't
# infinite loop while learning
for t in range(1, 10000):
# select action from policy
action = select_action(state, model)
# take the action
state, reward, done, _ = env.step(action)
if args.render:
env.render()
model.rewards.append(reward)
ep_reward += reward
if done:
break
# update cumulative reward
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
# perform backprop
finish_episode()
# log results
if i_episode % args.log_interval == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, ep_reward, running_reward))
# check if we have "solved" the cart pole problem
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
torch.save(model.state_dict(), 'a2c.pt')
break
if __name__ == '__main__':
main() | [
"ac.select_action",
"ac.Policy",
"gym.make",
"argparse.ArgumentParser",
"torch.stack",
"torch.manual_seed",
"itertools.count",
"numpy.finfo",
"torch.tensor"
] | [((352, 419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch actor-critic example"""'}), "(description='PyTorch actor-critic example')\n", (375, 419), False, 'import argparse\n'), ((997, 1020), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (1005, 1020), False, 'import gym\n'), ((1041, 1069), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1058, 1069), False, 'import torch\n'), ((1080, 1088), 'ac.Policy', 'Policy', ([], {}), '()\n', (1086, 1088), False, 'from ac import Policy, select_action\n'), ((1733, 1754), 'torch.tensor', 'torch.tensor', (['returns'], {}), '(returns)\n', (1745, 1754), False, 'import torch\n'), ((2593, 2601), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (2598, 2601), False, 'from itertools import count\n'), ((1147, 1167), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1155, 1167), True, 'import numpy as np\n'), ((2903, 2930), 'ac.select_action', 'select_action', (['state', 'model'], {}), '(state, model)\n', (2916, 2930), False, 'from ac import Policy, select_action\n'), ((2127, 2144), 'torch.tensor', 'torch.tensor', (['[R]'], {}), '([R])\n', (2139, 2144), False, 'import torch\n'), ((2270, 2296), 'torch.stack', 'torch.stack', (['policy_losses'], {}), '(policy_losses)\n', (2281, 2296), False, 'import torch\n'), ((2305, 2330), 'torch.stack', 'torch.stack', (['value_losses'], {}), '(value_losses)\n', (2316, 2330), False, 'import torch\n')] |
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: <NAME>
# @Date: 2017-04-28 11:34:06
# @Last modified by: <NAME>
# @Last Modified time: 2017-07-30 19:46:40
from __future__ import print_function, division, absolute_import
import pytest
from marvin.web import create_app
from marvin.web.settings import TestConfig, CustomConfig
from marvin.api.api import Interaction
from marvin import marvindb, config
from marvin.web.extensions import limiter
from flask import template_rendered, templating
from contextlib import contextmanager
import os
import numpy as np
try:
from urllib.parse import urlparse, urljoin
except ImportError:
from urlparse import urlparse, urljoin
# @pytest.fixture(scope='session')
# def drpver(release):
# drpver, dapver = config.lookUpVersions(release)
# return drpver
# @pytest.fixture(scope='session')
# def dapver(release):
# drpver, dapver = config.lookUpVersions(release)
# return dapver
@pytest.fixture(scope='session')
def app():
object_config = type('Config', (TestConfig, CustomConfig), dict())
app = create_app(debug=True, local=True, object_config=object_config)
limiter.enabled = False
return app
# def set_sasurl(loc='local', port=None):
# if not port:
# port = int(os.environ.get('LOCAL_MARVIN_PORT', 5000))
# istest = True if loc == 'utah' else False
# config.switchSasUrl(loc, test=istest, port=port)
# response = Interaction('api/general/getroutemap', request_type='get')
# config.urlmap = response.getRouteMap()
# @pytest.fixture()
# def saslocal():
# set_sasurl(loc='local')
def test_db_stuff():
assert marvindb is not None
assert marvindb.datadb is not None
assert marvindb.sampledb is not None
assert marvindb.dapdb is not None
assert 'local' == marvindb.dbtype
@pytest.fixture(scope='function')
def init_web(monkeypatch, set_config):
config.forceDbOn()
# monkeypath the render templating to nothing
def _empty_render(template, context, app):
template_rendered.send(app, template=template, context=context)
return ""
monkeypatch.setattr(templating, '_render', _empty_render)
@pytest.fixture(scope='function', autouse=True)
def inspection(monkeypatch):
from brain.core.inspection import Inspection
try:
monkeypatch.setattr('inspection.marvin.Inspection', Inspection)
except Exception as e:
pass
@pytest.mark.usefixtures('app, get_templates')
class Page(object):
''' Object representing a Web Page '''
def __init__(self, client, blue, endpoint):
self.app = app()
self.url = self.get_url(blue, endpoint)
self.json = None
self.data = None
self.response = None
self.client = client
def get_url(self, blue, endpoint):
return config.urlmap[blue][endpoint]['url']
def load_page(self, reqtype, page, params=None):
if reqtype == 'get':
self.response = self.client.get(page, query_string=params)
elif reqtype == 'post':
self.response = self.client.post(page, data=params, content_type='application/x-www-form-urlencoded')
self.load_data()
def load_data(self):
try:
self.json = self.response.json
except ValueError as e:
self.json = None
self.data = self.json['data'] if self.json and 'data' in self.json else ''
def assert_webjson_success(self, expdata):
self.assert200(message='response status should be 200 for ok')
if isinstance(expdata, str):
assert expdata in self.json['result']
elif isinstance(expdata, dict):
assert self.json['result']['status'] == 1
elif isinstance(expdata, list):
self.assertListIn(expdata, self.json['result'])
def route_no_valid_webparams(self, template, context, noparam, reqtype='get', params=None, errmsg=None):
self.assert422(message='response status should be 422 for invalid params')
assert 'errors/unprocessable_entity.html' == template.name, 'template name should be unprocessable_entity'
noparam = [noparam] if not isinstance(noparam, list) else noparam
invalid = {p: [errmsg] for p in noparam}
assert context['data'] == invalid, 'response should contain validation error dictionary'
# Assert definitions from Flask-Testing
def assertListIn(self, a, b):
''' assert all items in list a are in b '''
for item in a:
assert item in b
@staticmethod
def _compare_values_is_subset(aa, bb):
"""Checks if one value or list is a subset of other."""
if not hasattr(aa, '__iter__') and not hasattr(aa, '__getitem__'):
if aa != bb and not np.isclose(aa, bb):
return False
else:
# Checks whether the elements are a list of lists. If so, recursively calls itself.
try:
if not set(aa).issubset(set(bb)):
return False
except Exception:
if len(aa) > len(bb):
return False
else:
for ii in range(len(aa)):
return Page._compare_values_is_subset(aa[ii], bb[ii])
return True
def assert_dict_contains_subset(self, subset, dictionary):
"""Asserts whether a dictionary is a subset of other."""
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif not self._compare_values_is_subset(value, dictionary[key]):
mismatched.append((key, (value, dictionary[key])))
assert not (missing or mismatched), \
'{0} dictionary should be subset of {1}'.format(subset, dictionary)
def assert_status(self, status_code, message=None):
message = message or 'HTTP Status {0} expected but got {1}'.format(status_code, self.response.status_code)
assert self.response.status_code == status_code, message
def assert200(self, message=None):
self.assert_status(200, message)
def assert400(self, message=None):
self.assert_status(400, message)
def assert401(self, message=None):
self.assert_status(401, message)
def assert403(self, message=None):
self.assert_status(403, message)
def assert404(self, message=None):
self.assert_status(404, message)
def assert405(self, message=None):
self.assert_status(405, message)
def assert422(self, message=None):
self.assert_status(422, message)
def assert500(self, message=None):
self.assert_status(500, message)
def assert_redirects(self, location, message=None):
parts = urlparse(location)
if parts.netloc:
expected_location = location
else:
server_name = self.app.config.get('SERVER_NAME') or 'localhost'
expected_location = urljoin('http://{0}'.format(server_name), location)
valid_status_codes = (301, 302, 303, 305, 307)
valid_status_code_str = ', '.join(str(code) for code in valid_status_codes)
not_redirect = "HTTP Status {0} expected but got {1}".format(valid_status_code_str, self.response.status_code)
assert self.response.status_code in valid_status_codes, message or not_redirect
assert self.response.location == expected_location, message
@pytest.fixture()
def page(client, config, request, init_web):
blue, endpoint = request.param
page = Page(client, blue, endpoint)
yield page
@contextmanager
def captured_templates(app):
''' Records which templates are used '''
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
''' Fixture that returns which jinja template used '''
with captured_templates(app) as templates:
yield templates
| [
"flask.template_rendered.disconnect",
"pytest.fixture",
"marvin.web.create_app",
"flask.template_rendered.send",
"numpy.isclose",
"urlparse.urlparse",
"marvin.config.forceDbOn",
"flask.template_rendered.connect",
"pytest.mark.usefixtures"
] | [((1003, 1034), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1017, 1034), False, 'import pytest\n'), ((1869, 1901), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1883, 1901), False, 'import pytest\n'), ((2217, 2263), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'autouse': '(True)'}), "(scope='function', autouse=True)\n", (2231, 2263), False, 'import pytest\n'), ((2466, 2511), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""app, get_templates"""'], {}), "('app, get_templates')\n", (2489, 2511), False, 'import pytest\n'), ((7516, 7532), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (7530, 7532), False, 'import pytest\n'), ((7975, 7991), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (7989, 7991), False, 'import pytest\n'), ((1127, 1190), 'marvin.web.create_app', 'create_app', ([], {'debug': '(True)', 'local': '(True)', 'object_config': 'object_config'}), '(debug=True, local=True, object_config=object_config)\n', (1137, 1190), False, 'from marvin.web import create_app\n'), ((1945, 1963), 'marvin.config.forceDbOn', 'config.forceDbOn', ([], {}), '()\n', (1961, 1963), False, 'from marvin import marvindb, config\n'), ((7878, 7911), 'flask.template_rendered.connect', 'template_rendered.connect', (['record'], {}), '(record)\n', (7903, 7911), False, 'from flask import template_rendered, templating\n'), ((7935, 7971), 'flask.template_rendered.disconnect', 'template_rendered.disconnect', (['record'], {}), '(record)\n', (7963, 7971), False, 'from flask import template_rendered, templating\n'), ((2070, 2133), 'flask.template_rendered.send', 'template_rendered.send', (['app'], {'template': 'template', 'context': 'context'}), '(app, template=template, context=context)\n', (2092, 2133), False, 'from flask import template_rendered, templating\n'), ((6839, 6857), 'urlparse.urlparse', 'urlparse', (['location'], {}), '(location)\n', (6847, 6857), False, 'from urlparse import urlparse, urljoin\n'), ((4792, 4810), 'numpy.isclose', 'np.isclose', (['aa', 'bb'], {}), '(aa, bb)\n', (4802, 4810), True, 'import numpy as np\n')] |
# Use python 3.0 syntax
from __future__ import division, print_function
import numpy
from math import sqrt, pow
from scipy.special import kv as kv
import itertools
import logging
import const
besselValues = {}
def besselFunction(chromosomeA, chromosomeB):
if numpy.array_equal(chromosomeA,chromosomeB):
return 0
distanceSqr = (chromosomeA[0] - chromosomeB[0]) ** 2 + \
(chromosomeA[1] - chromosomeB[1]) ** 2
try:
solution = besselValues[distanceSqr]
except:
distance = sqrt(distanceSqr) / const.lammda
solution = kv(0,distance)
besselValues[distanceSqr] = solution
return solution
def pinningForce (chromosome, anclaje):
distance = sqrt((chromosome[0]-anclaje[0]) ** 2 + (chromosome[1]-anclaje[1]) ** 2)
auxVal=0
if distance < const.min_distance:
auxVal = 1.0/200 * distance;
return auxVal
def calculateBesselValue(geometry, elementList, anclajeListNumpy, anclajesInfluence):
acumtest = 0
for i in range(len(elementList)-1):
for j in range(i+1, len(elementList)):
acumtest +=besselFunction (elementList[i], elementList[j])
if anclajesInfluence:
anclajesEnergy = sum(
[pinningForce(oneChromosome, oneAnclaje)
for oneChromosome in elementList
for oneAnclaje in anclajeListNumpy])
acumtest += anclajesEnergy
acumtest = acumtest * const.f_0
return acumtest
| [
"numpy.array_equal",
"scipy.special.kv",
"math.sqrt"
] | [((269, 312), 'numpy.array_equal', 'numpy.array_equal', (['chromosomeA', 'chromosomeB'], {}), '(chromosomeA, chromosomeB)\n', (286, 312), False, 'import numpy\n'), ((717, 792), 'math.sqrt', 'sqrt', (['((chromosome[0] - anclaje[0]) ** 2 + (chromosome[1] - anclaje[1]) ** 2)'], {}), '((chromosome[0] - anclaje[0]) ** 2 + (chromosome[1] - anclaje[1]) ** 2)\n', (721, 792), False, 'from math import sqrt, pow\n'), ((580, 595), 'scipy.special.kv', 'kv', (['(0)', 'distance'], {}), '(0, distance)\n', (582, 595), True, 'from scipy.special import kv as kv\n'), ((528, 545), 'math.sqrt', 'sqrt', (['distanceSqr'], {}), '(distanceSqr)\n', (532, 545), False, 'from math import sqrt, pow\n')] |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for HDF5 file"""
import os
import glob
import shutil
import tempfile
import numpy as np
import h5py
import pytest
import tensorflow as tf
import tensorflow_io as tfio
def test_hdf5():
"""test_hdf5: GitHub issue 841"""
def create_datasets(runpath, cnt=10):
os.makedirs(runpath, exist_ok=True)
for i in range(cnt):
f = h5py.File(f"{runpath}/file_{i}.h5", "w")
total_samples = np.random.randint(50000, 100000)
f.create_dataset("features", data=np.random.random((total_samples, 60)))
f.create_dataset("targets", data=np.random.random((total_samples, 3)))
f.close()
runpath = tempfile.mkdtemp()
create_datasets(runpath)
for i in range(2):
cnt = 0
for p in glob.glob(f"{runpath}/*.h5"):
try:
features = tfio.IODataset.from_hdf5(p, "/features")
targets = tfio.IODataset.from_hdf5(p, "/targets")
dataset = tf.data.Dataset.zip((features, targets))
for t in dataset:
cnt += t[0].shape[0]
except Exception as e:
print(f"Failed going through {p}")
raise e
print(f"Success going through {p}")
print(f"Iterated {cnt} items")
shutil.rmtree(runpath)
def test_hdf5_grouped():
"""test_hdf5 with grouped data: https://github.com/tensorflow/io/issues/1161"""
def create_datasets(runpath, cnt=10):
os.makedirs(runpath, exist_ok=True)
for i in range(cnt):
f = h5py.File(f"{runpath}/file_{i}.h5", "w")
total_samples = np.random.randint(50000, 100000)
grp = f.create_group("sample_group")
grp.create_dataset("features", data=np.random.random((total_samples, 60)))
grp.create_dataset("targets", data=np.random.random((total_samples, 3)))
f.close()
runpath = tempfile.mkdtemp()
create_datasets(runpath)
for i in range(2):
cnt = 0
for p in glob.glob(f"{runpath}/*.h5"):
try:
features = tfio.IODataset.from_hdf5(p, "/sample_group/features")
targets = tfio.IODataset.from_hdf5(p, "/sample_group/targets")
dataset = tf.data.Dataset.zip((features, targets))
for t in dataset:
cnt += t[0].shape[0]
except Exception as e:
print(f"Failed going through {p}")
raise e
print(f"Success going through {p}")
print(f"Iterated {cnt} items")
shutil.rmtree(runpath)
def test_hdf5_graph():
"""test_hdf5_graph: GitHub issue 898"""
def create_datasets(runpath, cnt=10):
filenames = [f"{runpath}/file_{i}.h5" for i in range(cnt)]
samples = [np.random.randint(50000, 100000) for _ in range(cnt)]
os.makedirs(runpath, exist_ok=True)
for filename, sample in zip(filenames, samples):
f = h5py.File(filename, "w")
f.create_dataset("features", data=np.random.random((sample, 60)))
f.create_dataset("targets", data=np.random.random((sample, 3)))
f.close()
return filenames, samples
runpath = tempfile.mkdtemp()
filenames, samples = create_datasets(runpath)
@tf.function(autograph=False)
def f(filename):
spec = {"/features": tf.float64, "/targets": tf.float64}
hdf5 = tfio.IOTensor.from_hdf5(filename, spec=spec)
return tf.shape(hdf5("/features").to_tensor())[0]
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.map(f, num_parallel_calls=4)
entries = [entry.numpy() for entry in dataset]
print("Iterated items")
for filename in filenames:
print(f"File: {filename}")
print(f"Samples: {samples}")
print(f"Entries: {entries}")
assert np.array_equal(entries, samples)
shutil.rmtree(runpath)
def test_hdf5_bool():
"""test_hdf5_bool: GitHub issue 1144"""
runpath = tempfile.mkdtemp()
boolean_data = np.asarray(
[True, False, True, False, True, False, True, False, True, False]
)
with h5py.File(f"{runpath}/my_data.h5", "w") as h5_obj:
h5_obj["my_bool_data"] = boolean_data
with h5py.File(f"{runpath}/my_data.h5", "r") as h5_obj:
print(h5_obj["my_bool_data"].shape, h5_obj["my_bool_data"].dtype)
spec = {"/my_bool_data": tf.TensorSpec(shape=(None,), dtype=tf.bool)}
h5_tensors = tfio.IOTensor.from_hdf5(f"{runpath}/my_data.h5", spec=spec)
print("H5 DATA: ", h5_tensors("/my_bool_data").to_tensor())
assert np.array_equal(boolean_data, h5_tensors("/my_bool_data").to_tensor())
mapping = {"SOLID": 0, "LIQUID": 1, "GAS": 2, "PLASMA": 3}
dtype = h5py.special_dtype(enum=(np.int16, mapping))
enum_data = np.asarray([0, 1, 2, 3])
with h5py.File(f"{runpath}/my_enum_data.h5", "w") as h5_obj:
dset = h5_obj.create_dataset("my_enum_data", [4], dtype=dtype)
dset = enum_data
with h5py.File(f"{runpath}/my_enum_data.h5", "r") as h5_obj:
print(h5_obj["my_enum_data"].shape, h5_obj["my_enum_data"].dtype)
spec = {"/my_enum_data": tf.TensorSpec(shape=(None,), dtype=tf.bool)}
with pytest.raises(
tf.errors.InvalidArgumentError, match=r".*unsupported data class for enum.*"
):
h5_tensors = tfio.IOTensor.from_hdf5(f"{runpath}/my_enum_data.h5", spec=spec)
shutil.rmtree(runpath)
if __name__ == "__main__":
test.main()
| [
"h5py.File",
"numpy.array_equal",
"os.makedirs",
"h5py.special_dtype",
"numpy.asarray",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow_io.IODataset.from_hdf5",
"tensorflow_io.IOTensor.from_hdf5",
"pytest.raises",
"tempfile.mkdtemp",
"numpy.random.randint",
"tensorflow.data.Dataset.z... | [((1365, 1383), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1381, 1383), False, 'import tempfile\n'), ((1994, 2016), 'shutil.rmtree', 'shutil.rmtree', (['runpath'], {}), '(runpath)\n', (2007, 2016), False, 'import shutil\n'), ((2620, 2638), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2636, 2638), False, 'import tempfile\n'), ((3275, 3297), 'shutil.rmtree', 'shutil.rmtree', (['runpath'], {}), '(runpath)\n', (3288, 3297), False, 'import shutil\n'), ((3917, 3935), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3933, 3935), False, 'import tempfile\n'), ((3992, 4020), 'tensorflow.function', 'tf.function', ([], {'autograph': '(False)'}), '(autograph=False)\n', (4003, 4020), True, 'import tensorflow as tf\n'), ((4240, 4285), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['filenames'], {}), '(filenames)\n', (4274, 4285), True, 'import tensorflow as tf\n'), ((4561, 4593), 'numpy.array_equal', 'np.array_equal', (['entries', 'samples'], {}), '(entries, samples)\n', (4575, 4593), True, 'import numpy as np\n'), ((4599, 4621), 'shutil.rmtree', 'shutil.rmtree', (['runpath'], {}), '(runpath)\n', (4612, 4621), False, 'import shutil\n'), ((4704, 4722), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4720, 4722), False, 'import tempfile\n'), ((4743, 4820), 'numpy.asarray', 'np.asarray', (['[True, False, True, False, True, False, True, False, True, False]'], {}), '([True, False, True, False, True, False, True, False, True, False])\n', (4753, 4820), True, 'import numpy as np\n'), ((5169, 5228), 'tensorflow_io.IOTensor.from_hdf5', 'tfio.IOTensor.from_hdf5', (['f"""{runpath}/my_data.h5"""'], {'spec': 'spec'}), "(f'{runpath}/my_data.h5', spec=spec)\n", (5192, 5228), True, 'import tensorflow_io as tfio\n'), ((5452, 5496), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'enum': '(np.int16, mapping)'}), '(enum=(np.int16, mapping))\n', (5470, 5496), False, 'import h5py\n'), ((5513, 5537), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (5523, 5537), True, 'import numpy as np\n'), ((6122, 6144), 'shutil.rmtree', 'shutil.rmtree', (['runpath'], {}), '(runpath)\n', (6135, 6144), False, 'import shutil\n'), ((977, 1012), 'os.makedirs', 'os.makedirs', (['runpath'], {'exist_ok': '(True)'}), '(runpath, exist_ok=True)\n', (988, 1012), False, 'import os\n'), ((1470, 1498), 'glob.glob', 'glob.glob', (['f"""{runpath}/*.h5"""'], {}), "(f'{runpath}/*.h5')\n", (1479, 1498), False, 'import glob\n'), ((2179, 2214), 'os.makedirs', 'os.makedirs', (['runpath'], {'exist_ok': '(True)'}), '(runpath, exist_ok=True)\n', (2190, 2214), False, 'import os\n'), ((2725, 2753), 'glob.glob', 'glob.glob', (['f"""{runpath}/*.h5"""'], {}), "(f'{runpath}/*.h5')\n", (2734, 2753), False, 'import glob\n'), ((3558, 3593), 'os.makedirs', 'os.makedirs', (['runpath'], {'exist_ok': '(True)'}), '(runpath, exist_ok=True)\n', (3569, 3593), False, 'import os\n'), ((4122, 4166), 'tensorflow_io.IOTensor.from_hdf5', 'tfio.IOTensor.from_hdf5', (['filename'], {'spec': 'spec'}), '(filename, spec=spec)\n', (4145, 4166), True, 'import tensorflow_io as tfio\n'), ((4845, 4884), 'h5py.File', 'h5py.File', (['f"""{runpath}/my_data.h5"""', '"""w"""'], {}), "(f'{runpath}/my_data.h5', 'w')\n", (4854, 4884), False, 'import h5py\n'), ((4952, 4991), 'h5py.File', 'h5py.File', (['f"""{runpath}/my_data.h5"""', '"""r"""'], {}), "(f'{runpath}/my_data.h5', 'r')\n", (4961, 4991), False, 'import h5py\n'), ((5107, 5150), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None,)', 'dtype': 'tf.bool'}), '(shape=(None,), dtype=tf.bool)\n', (5120, 5150), True, 'import tensorflow as tf\n'), ((5548, 5592), 'h5py.File', 'h5py.File', (['f"""{runpath}/my_enum_data.h5"""', '"""w"""'], {}), "(f'{runpath}/my_enum_data.h5', 'w')\n", (5557, 5592), False, 'import h5py\n'), ((5710, 5754), 'h5py.File', 'h5py.File', (['f"""{runpath}/my_enum_data.h5"""', '"""r"""'], {}), "(f'{runpath}/my_enum_data.h5', 'r')\n", (5719, 5754), False, 'import h5py\n'), ((5870, 5913), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None,)', 'dtype': 'tf.bool'}), '(shape=(None,), dtype=tf.bool)\n', (5883, 5913), True, 'import tensorflow as tf\n'), ((5924, 6019), 'pytest.raises', 'pytest.raises', (['tf.errors.InvalidArgumentError'], {'match': '""".*unsupported data class for enum.*"""'}), "(tf.errors.InvalidArgumentError, match=\n '.*unsupported data class for enum.*')\n", (5937, 6019), False, 'import pytest\n'), ((6052, 6116), 'tensorflow_io.IOTensor.from_hdf5', 'tfio.IOTensor.from_hdf5', (['f"""{runpath}/my_enum_data.h5"""'], {'spec': 'spec'}), "(f'{runpath}/my_enum_data.h5', spec=spec)\n", (6075, 6116), True, 'import tensorflow_io as tfio\n'), ((1058, 1098), 'h5py.File', 'h5py.File', (['f"""{runpath}/file_{i}.h5"""', '"""w"""'], {}), "(f'{runpath}/file_{i}.h5', 'w')\n", (1067, 1098), False, 'import h5py\n'), ((1127, 1159), 'numpy.random.randint', 'np.random.randint', (['(50000)', '(100000)'], {}), '(50000, 100000)\n', (1144, 1159), True, 'import numpy as np\n'), ((2260, 2300), 'h5py.File', 'h5py.File', (['f"""{runpath}/file_{i}.h5"""', '"""w"""'], {}), "(f'{runpath}/file_{i}.h5', 'w')\n", (2269, 2300), False, 'import h5py\n'), ((2329, 2361), 'numpy.random.randint', 'np.random.randint', (['(50000)', '(100000)'], {}), '(50000, 100000)\n', (2346, 2361), True, 'import numpy as np\n'), ((3496, 3528), 'numpy.random.randint', 'np.random.randint', (['(50000)', '(100000)'], {}), '(50000, 100000)\n', (3513, 3528), True, 'import numpy as np\n'), ((3667, 3691), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (3676, 3691), False, 'import h5py\n'), ((1544, 1584), 'tensorflow_io.IODataset.from_hdf5', 'tfio.IODataset.from_hdf5', (['p', '"""/features"""'], {}), "(p, '/features')\n", (1568, 1584), True, 'import tensorflow_io as tfio\n'), ((1611, 1650), 'tensorflow_io.IODataset.from_hdf5', 'tfio.IODataset.from_hdf5', (['p', '"""/targets"""'], {}), "(p, '/targets')\n", (1635, 1650), True, 'import tensorflow_io as tfio\n'), ((1677, 1717), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(features, targets)'], {}), '((features, targets))\n', (1696, 1717), True, 'import tensorflow as tf\n'), ((2799, 2852), 'tensorflow_io.IODataset.from_hdf5', 'tfio.IODataset.from_hdf5', (['p', '"""/sample_group/features"""'], {}), "(p, '/sample_group/features')\n", (2823, 2852), True, 'import tensorflow_io as tfio\n'), ((2879, 2931), 'tensorflow_io.IODataset.from_hdf5', 'tfio.IODataset.from_hdf5', (['p', '"""/sample_group/targets"""'], {}), "(p, '/sample_group/targets')\n", (2903, 2931), True, 'import tensorflow_io as tfio\n'), ((2958, 2998), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(features, targets)'], {}), '((features, targets))\n', (2977, 2998), True, 'import tensorflow as tf\n'), ((1206, 1243), 'numpy.random.random', 'np.random.random', (['(total_samples, 60)'], {}), '((total_samples, 60))\n', (1222, 1243), True, 'import numpy as np\n'), ((1290, 1326), 'numpy.random.random', 'np.random.random', (['(total_samples, 3)'], {}), '((total_samples, 3))\n', (1306, 1326), True, 'import numpy as np\n'), ((2459, 2496), 'numpy.random.random', 'np.random.random', (['(total_samples, 60)'], {}), '((total_samples, 60))\n', (2475, 2496), True, 'import numpy as np\n'), ((2545, 2581), 'numpy.random.random', 'np.random.random', (['(total_samples, 3)'], {}), '((total_samples, 3))\n', (2561, 2581), True, 'import numpy as np\n'), ((3738, 3768), 'numpy.random.random', 'np.random.random', (['(sample, 60)'], {}), '((sample, 60))\n', (3754, 3768), True, 'import numpy as np\n'), ((3815, 3844), 'numpy.random.random', 'np.random.random', (['(sample, 3)'], {}), '((sample, 3))\n', (3831, 3844), True, 'import numpy as np\n')] |
from collections import defaultdict
import noise
import numpy as np
from scipy import spatial
from typing import Optional, List
from tales.worldmap.dataclasses import Adjacency, MapParameters, ndarr, IntListDict
class Mesh:
def __init__(self, map_params: MapParameters):
self.map_params = map_params
self.number_points = self.map_params.number_points
self.center_points = self._generate_points(self.map_params.point_smoothing)
# points
# Coordinates of input points.
# vertices
# Coordinates of the Voronoi vertices.
# ridge_points
# Indices of the points between which each Voronoi ridge lies.
# ridge_vertices
# Indices of the Voronoi vertices forming each Voronoi ridge.
# regions
# Indices of the Voronoi vertices forming each Voronoi region.
# -1 indicates vertex outside the Voronoi diagram.
# point_region
# Index of the Voronoi region for each input point.
# If qhull option “Qc” was not specified, the list will contain -1 for points
# that are not associated with a Voronoi region.
self.vor = spatial.Voronoi(self.center_points)
# v_regions map the index of a point in self.center_points to a region
self.v_regions: List[List[int]] = [self.vor.regions[idx] for idx in self.vor.point_region]
self.v_number_vertices = self.vor.vertices.shape[0]
# adjacencies give us maps that we can use to quickly look up nodes that belong together
self.v_adjacencies = self._calculate_adjacencies()
# all the vertices we need, aka the points that separate one region (based on center points) from others
self.v_vertices = self._remove_outliers(self.vor.vertices)
self.v_vertice_noise = self._vertice_noise(self.v_vertices)
def _generate_points(self, iterations: int) -> ndarr:
points = np.random.random((self.number_points, 2))
# Moves points a little further away from each other to make all 'tiles' more equal in size and spread
for _ in range(iterations):
vor = spatial.Voronoi(points)
newpts = []
for idx in range(len(vor.points)):
pt = vor.points[idx, :]
region = vor.regions[vor.point_region[idx]]
if -1 in region:
newpts.append(pt)
else:
vxs = np.asarray([vor.vertices[i, :] for i in region])
vxs[vxs < 0] = 0
vxs[vxs > 1] = 1
newpt = np.mean(vxs, 0)
newpts.append(newpt)
points = np.asarray(newpts)
return points
def _calculate_adjacencies(self) -> Adjacency:
adjacent_points: IntListDict = defaultdict(list)
adjacent_vertices: IntListDict = defaultdict(list)
region_idx_to_point_idx: IntListDict = defaultdict(list)
adjacency_map = np.zeros((self.v_number_vertices, 3), np.int32) - 1
# find all points that are neighbouring a different point
for p1, p2 in self.vor.ridge_points:
adjacent_points[p1].append(p2)
adjacent_points[p2].append(p1)
# find all ridge vertices that are neighbouring a different ridge vertice
for v1, v2 in self.vor.ridge_vertices:
adjacent_vertices[v1].append(v2)
adjacent_vertices[v2].append(v1)
for k, v in adjacent_vertices.items():
if k != -1:
adjacency_map[k, :] = v
# build a region-point-index to point-index map
for point_idx in range(self.number_points):
region = self.v_regions[point_idx]
for region_point_idx in region:
if region_point_idx == -1:
continue
region_idx_to_point_idx[region_point_idx].append(point_idx)
return Adjacency(
adjacent_points,
adjacent_vertices,
region_idx_to_point_idx,
adjacency_map,
self._calculate_edges(adjacent_vertices),
)
def _remove_outliers(self, vertices: ndarr) -> ndarr:
# The Voronoi algorithm will create points outside of [0, 1] at the very edges
# we want to remove them or the map might extend far, far beyond its borders
vertices = vertices.copy()
for vertex_idx in range(self.v_number_vertices):
point = self.center_points[
self.v_adjacencies.region_idx_to_point_idx[vertex_idx]
]
vertices[vertex_idx, :] = np.mean(point, 0)
return vertices
def _calculate_edges(self, adjacent_vertices: IntListDict) -> ndarr:
n = self.v_number_vertices
edges = np.zeros(n, np.bool)
for vertex_idx in range(n):
adjs = adjacent_vertices[vertex_idx]
if -1 in adjs:
edges[vertex_idx] = True
return edges
def _vertice_noise(self, vertices: ndarr) -> ndarr:
assert vertices.shape[1] == 2
vertice_noise = self.v_vertices.copy()
# perlin noise on vertices
base = np.random.randint(1000)
noises = np.array(
[
noise.pnoise2(x, y, lacunarity=1.7, octaves=3, base=base)
for x, y in vertices
]
)
vertice_noise[:, 0] += noises
vertice_noise[:, 1] += noises
return vertice_noise
| [
"numpy.asarray",
"numpy.zeros",
"scipy.spatial.Voronoi",
"collections.defaultdict",
"numpy.random.randint",
"numpy.random.random",
"numpy.mean",
"noise.pnoise2"
] | [((1219, 1254), 'scipy.spatial.Voronoi', 'spatial.Voronoi', (['self.center_points'], {}), '(self.center_points)\n', (1234, 1254), False, 'from scipy import spatial\n'), ((1978, 2019), 'numpy.random.random', 'np.random.random', (['(self.number_points, 2)'], {}), '((self.number_points, 2))\n', (1994, 2019), True, 'import numpy as np\n'), ((2862, 2879), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2873, 2879), False, 'from collections import defaultdict\n'), ((2921, 2938), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2932, 2938), False, 'from collections import defaultdict\n'), ((2986, 3003), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2997, 3003), False, 'from collections import defaultdict\n'), ((4826, 4846), 'numpy.zeros', 'np.zeros', (['n', 'np.bool'], {}), '(n, np.bool)\n', (4834, 4846), True, 'import numpy as np\n'), ((5213, 5236), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (5230, 5236), True, 'import numpy as np\n'), ((2186, 2209), 'scipy.spatial.Voronoi', 'spatial.Voronoi', (['points'], {}), '(points)\n', (2201, 2209), False, 'from scipy import spatial\n'), ((2729, 2747), 'numpy.asarray', 'np.asarray', (['newpts'], {}), '(newpts)\n', (2739, 2747), True, 'import numpy as np\n'), ((3028, 3075), 'numpy.zeros', 'np.zeros', (['(self.v_number_vertices, 3)', 'np.int32'], {}), '((self.v_number_vertices, 3), np.int32)\n', (3036, 3075), True, 'import numpy as np\n'), ((4659, 4676), 'numpy.mean', 'np.mean', (['point', '(0)'], {}), '(point, 0)\n', (4666, 4676), True, 'import numpy as np\n'), ((5294, 5351), 'noise.pnoise2', 'noise.pnoise2', (['x', 'y'], {'lacunarity': '(1.7)', 'octaves': '(3)', 'base': 'base'}), '(x, y, lacunarity=1.7, octaves=3, base=base)\n', (5307, 5351), False, 'import noise\n'), ((2500, 2548), 'numpy.asarray', 'np.asarray', (['[vor.vertices[i, :] for i in region]'], {}), '([vor.vertices[i, :] for i in region])\n', (2510, 2548), True, 'import numpy as np\n'), ((2651, 2666), 'numpy.mean', 'np.mean', (['vxs', '(0)'], {}), '(vxs, 0)\n', (2658, 2666), True, 'import numpy as np\n')] |
import numpy as np
import pylab as pl
from ourgui import openFile
def plotline(maxx, minx=0, value=0, style="k-", plotfunc=pl.plot):
plotfunc([minx, maxx], [value, value], style)
def quickplot(filename):
data = np.loadtxt(filename, comments="#")
maxdata, mindata, stddata, meandata = np.max(data), np.min(data), np.std(data), np.mean(data)
n = len(data)
pl.subplot(211)
pl.plot(data,'k.')
plotline(n, value=maxdata, style="g-")
plotline(n, value=mindata, style="r-")
plotline(n, value=meandata, style="k-")
plotline(n, value=(meandata+stddata), style="b-")
plotline(n, value=(meandata-stddata), style="b-")
pl.xlabel('data points')
pl.ylabel('voltage (V)')
pl.title("Voltage: %f (+- %f) V" %(meandata, stddata))
pl.subplot(212)
n, bins, patches = pl.hist(data, 100, normed=1, facecolor='green', alpha=0.75)
pl.xlabel('voltage')
pl.ylabel('distribution')
pl.show()
filename = openFile("log")
if filename:
quickplot(filename)
| [
"pylab.hist",
"pylab.title",
"pylab.show",
"ourgui.openFile",
"numpy.std",
"pylab.ylabel",
"pylab.subplot",
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.loadtxt",
"pylab.xlabel",
"pylab.plot"
] | [((989, 1004), 'ourgui.openFile', 'openFile', (['"""log"""'], {}), "('log')\n", (997, 1004), False, 'from ourgui import openFile\n'), ((229, 263), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'comments': '"""#"""'}), "(filename, comments='#')\n", (239, 263), True, 'import numpy as np\n'), ((389, 404), 'pylab.subplot', 'pl.subplot', (['(211)'], {}), '(211)\n', (399, 404), True, 'import pylab as pl\n'), ((410, 429), 'pylab.plot', 'pl.plot', (['data', '"""k."""'], {}), "(data, 'k.')\n", (417, 429), True, 'import pylab as pl\n'), ((679, 703), 'pylab.xlabel', 'pl.xlabel', (['"""data points"""'], {}), "('data points')\n", (688, 703), True, 'import pylab as pl\n'), ((709, 733), 'pylab.ylabel', 'pl.ylabel', (['"""voltage (V)"""'], {}), "('voltage (V)')\n", (718, 733), True, 'import pylab as pl\n'), ((739, 794), 'pylab.title', 'pl.title', (["('Voltage: %f (+- %f) V' % (meandata, stddata))"], {}), "('Voltage: %f (+- %f) V' % (meandata, stddata))\n", (747, 794), True, 'import pylab as pl\n'), ((801, 816), 'pylab.subplot', 'pl.subplot', (['(212)'], {}), '(212)\n', (811, 816), True, 'import pylab as pl\n'), ((841, 900), 'pylab.hist', 'pl.hist', (['data', '(100)'], {'normed': '(1)', 'facecolor': '"""green"""', 'alpha': '(0.75)'}), "(data, 100, normed=1, facecolor='green', alpha=0.75)\n", (848, 900), True, 'import pylab as pl\n'), ((906, 926), 'pylab.xlabel', 'pl.xlabel', (['"""voltage"""'], {}), "('voltage')\n", (915, 926), True, 'import pylab as pl\n'), ((932, 957), 'pylab.ylabel', 'pl.ylabel', (['"""distribution"""'], {}), "('distribution')\n", (941, 957), True, 'import pylab as pl\n'), ((965, 974), 'pylab.show', 'pl.show', ([], {}), '()\n', (972, 974), True, 'import pylab as pl\n'), ((307, 319), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (313, 319), True, 'import numpy as np\n'), ((321, 333), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (327, 333), True, 'import numpy as np\n'), ((335, 347), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (341, 347), True, 'import numpy as np\n'), ((349, 362), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (356, 362), True, 'import numpy as np\n')] |
# This code is heavily based on Phil Tabor Q learning:
# https://github.com/philtabor/Deep-Q-Learning-Paper-To-Code/blob/master/q_learning/q_learning_agent.py
# Here it were added a DeepQ network also based on <NAME>abor's code:
# https://github.com/philtabor/Deep-Q-Learning-Paper-To-Code/blob/master/DQN/deep_q_network.py
# Some hints on how to train the Neural Networks were obtained in this example from begooboi Reddit
# user (based on Keras):
# https://www.reddit.com/r/learnmachinelearning/comments/9vasqm/review_my_code_dqn_for_gym_frozenlake/
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch as T
from collections import deque
import random
class LinearDeepQNetwork(nn.Module):
def __init__(self, lr, input, n_actions):
super(LinearDeepQNetwork, self).__init__()
self.fc1 = nn.Linear(input, 128)
self.fc2 = nn.Linear(128, n_actions)
# <NAME>: self.parameters() from inherited class Module
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.loss = nn.MSELoss()
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
# <NAME>: pytorch have different tensors for cuda/cpu devices
self.to(self.device)
def forward(self, state):
layer1 = F.relu(self.fc1(state))
# <NAME>: MSELoss will take care of activation for us...
actions = self.fc2(layer1)
return actions
class Agent():
def __init__(self, lr, state_space, action_space, gamma=0.90,
epsilon=1.0, eps_dec=1e-5, eps_min=0.01):
""" Agent init takes:
--
lr - alpha learning rate factor
state_space - environment state space dimension
action_space - environment actions space dimension
gamma - discount factor on MDP rewards
epsilon - Epsilon Greedy initial value (exploration threshold)
eps_dec - Epsilon Greedy decrease factor
eps_min - Epsilon Greedy minimum, final value (must be > 0)
"""
self.lr = lr
self.input_dims = state_space
self.n_actions = action_space
self.gamma = gamma
self.epsilon = epsilon
self.eps_dec = eps_dec
self.eps_min = eps_min
self.action_space = [i for i in range(self.n_actions)]
self.np_arrays = []
for i in range(self.input_dims):
self.np_arrays.append(self.one_hot_state(i))
self.memory = deque(maxlen=2000)
self.Q = LinearDeepQNetwork(self.lr, self.input_dims, self.n_actions)
# print_snapshot constants
self.action_str = ['<', '.', '>', '^']
self.map_str = []
self.map_str.append(['S', 'F', 'F', 'F'])
self.map_str.append(['F', 'H', 'F', 'H'])
self.map_str.append(['F', 'F', 'F', 'H'])
self.map_str.append(['H', 'F', 'F', 'G'])
def one_hot_state(self, state):
state_m = np.zeros((1, self.input_dims))
state_m[0][state] = 1
return state_m
def choose_action(self, observation):
''' Choose Epsilon Greedy action for a given state '''
rand_ = np.random.random()
if rand_ > self.epsilon:
state = T.tensor(self.np_arrays[observation], dtype=T.float).to(self.Q.device)
# https://stackoverflow.com/questions/64192810/runtime-error-both-arguments-to-matmul-need-to-be-at-least-1d-but-they-are-0d
actions = self.Q.forward(state.unsqueeze(dim=0))
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def decrement_epsilon(self):
''' Epsilon decrease function (linear) '''
# Look: my beloved C ternary in python terms!
self.epsilon = self.epsilon - self.eps_dec \
if self.epsilon > self.eps_min else self.eps_min
def learn(self, state, action, reward, state_, done):
""" Off Policy (always Greedy) Learn function
--
Here defined as plain Bellman equation, state_ is state'
"""
self.Q.optimizer.zero_grad()
state_T = T.tensor(self.np_arrays[state_], dtype=T.float).to(self.Q.device)
if not done:
actions_T = self.Q.forward(state_T.unsqueeze(dim=0))
rewardT = reward + self.gamma * T.max(actions_T)
else:
rewardT = T.tensor(reward, dtype=T.float).to(self.Q.device)
stateT = T.tensor(self.np_arrays[state], dtype=T.float).to(self.Q.device)
q_pred = self.Q.forward(stateT.unsqueeze(dim=0))
q_target = self.Q.forward(stateT.unsqueeze(dim=0))
q_target[0][0][action] = rewardT
loss = self.Q.loss(q_pred, q_target).to(self.Q.device)
# <NAME>: Backpropagate cost and add a step on our optimizer.
# These two calls are critical for learn loop.
loss.backward()
self.Q.optimizer.step()
self.decrement_epsilon()
def batch_learn(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
self.learn(state, action, reward, next_state, done)
def print_learn_snapshot(self):
"""
Print a snapshot of learning situation.
Sample:
--
Learn snapshot:
|S(<) 0.061||F(^) 0.095||F(<)-0.036||F(<) 0.049|
|F(^) 0.074||H(0) ~~~~ ||F(>)-0.018||H(0) ~~~~ |
|F(<) 0.000||F(^) 0.043||F(>) 0.037||H(0) ~~~~ |
|H(0) ~~~~ ||F(<) 0.066||F(<) 0.072||G(1) \o/ |
--
Cell format: <status>(<best_action>)<best_action_value>
status - 'S'=start, 'G'=goal, 'F'=frozen, 'H'=hole
best_action - '<'=start, '.'=down, '>'=right, '^'=up
- '1'=reward
best_value - Extracted value for best_action from NN tensor
End cells:
bad ending - ~~~~ (water)
good ending - \o/ (happy)
"""
print('--\nLearn snapshot: ')
for line in range(4):
for col in range(4):
stateT = T.tensor(self.np_arrays[line * 4 + col], dtype=T.float).to(self.Q.device)
actionsT = self.Q.forward(stateT.unsqueeze(dim=0))
if self.map_str[line][col] == 'F' or self.map_str[line][col] == 'S':
action_max = self.action_str[T.argmax(actionsT).item()]
action_max_value = f'{T.max(actionsT).item(): 4.3f}'
elif self.map_str[line][col] == 'H':
action_max = ' '
action_max_value = ' ~~~~ '
else:
action_max = '1'
action_max_value = ' \o/ '
print(f'|{self.map_str[line][col]}({action_max}){action_max_value}|', end='')
print('')
print('--\n')
| [
"numpy.random.choice",
"torch.nn.MSELoss",
"random.sample",
"torch.argmax",
"numpy.zeros",
"numpy.random.random",
"torch.cuda.is_available",
"torch.max",
"torch.nn.Linear",
"torch.tensor",
"collections.deque"
] | [((877, 898), 'torch.nn.Linear', 'nn.Linear', (['input', '(128)'], {}), '(input, 128)\n', (886, 898), True, 'import torch.nn as nn\n'), ((918, 943), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'n_actions'], {}), '(128, n_actions)\n', (927, 943), True, 'import torch.nn as nn\n'), ((1091, 1103), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1101, 1103), True, 'import torch.nn as nn\n'), ((2491, 2509), 'collections.deque', 'deque', ([], {'maxlen': '(2000)'}), '(maxlen=2000)\n', (2496, 2509), False, 'from collections import deque\n'), ((2953, 2983), 'numpy.zeros', 'np.zeros', (['(1, self.input_dims)'], {}), '((1, self.input_dims))\n', (2961, 2983), True, 'import numpy as np\n'), ((3159, 3177), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3175, 3177), True, 'import numpy as np\n'), ((5043, 5081), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (5056, 5081), False, 'import random\n'), ((3582, 3617), 'numpy.random.choice', 'np.random.choice', (['self.action_space'], {}), '(self.action_space)\n', (3598, 3617), True, 'import numpy as np\n'), ((1147, 1168), 'torch.cuda.is_available', 'T.cuda.is_available', ([], {}), '()\n', (1166, 1168), True, 'import torch as T\n'), ((4151, 4198), 'torch.tensor', 'T.tensor', (['self.np_arrays[state_]'], {'dtype': 'T.float'}), '(self.np_arrays[state_], dtype=T.float)\n', (4159, 4198), True, 'import torch as T\n'), ((4469, 4515), 'torch.tensor', 'T.tensor', (['self.np_arrays[state]'], {'dtype': 'T.float'}), '(self.np_arrays[state], dtype=T.float)\n', (4477, 4515), True, 'import torch as T\n'), ((3232, 3284), 'torch.tensor', 'T.tensor', (['self.np_arrays[observation]'], {'dtype': 'T.float'}), '(self.np_arrays[observation], dtype=T.float)\n', (3240, 3284), True, 'import torch as T\n'), ((3522, 3539), 'torch.argmax', 'T.argmax', (['actions'], {}), '(actions)\n', (3530, 3539), True, 'import torch as T\n'), ((4348, 4364), 'torch.max', 'T.max', (['actions_T'], {}), '(actions_T)\n', (4353, 4364), True, 'import torch as T\n'), ((4401, 4432), 'torch.tensor', 'T.tensor', (['reward'], {'dtype': 'T.float'}), '(reward, dtype=T.float)\n', (4409, 4432), True, 'import torch as T\n'), ((6142, 6197), 'torch.tensor', 'T.tensor', (['self.np_arrays[line * 4 + col]'], {'dtype': 'T.float'}), '(self.np_arrays[line * 4 + col], dtype=T.float)\n', (6150, 6197), True, 'import torch as T\n'), ((6417, 6435), 'torch.argmax', 'T.argmax', (['actionsT'], {}), '(actionsT)\n', (6425, 6435), True, 'import torch as T\n'), ((6486, 6501), 'torch.max', 'T.max', (['actionsT'], {}), '(actionsT)\n', (6491, 6501), True, 'import torch as T\n')] |
import scipy.linalg as linalg
import numpy as np
from math import cos, sin
class LinearDynamic:
# TODO: change the API
def jacobi_wrt_state(self, state, controls):
return np.array([
[1, 0],
[0, 1.],
])
def jacobi_wrt_controls(self, state, controls):
return np.array([
[2, 0],
[0, 1.],
])
def f_function(self, state, controls):
x, y = state
ux, uy = controls
return np.array([x + 2 * ux,
y + uy])
class NonlinearDynamic:
# TODO: change the API
def jacobi_wrt_state(self, state, controls):
return np.array([
[1, 0],
[0, 1.],
])
def jacobi_wrt_controls(self, state, controls):
ux, uy = controls
return np.array([
[cos(ux), 0],
[0, 1],
])
def f_function(self, state, controls):
x, y = state
ux, uy = controls
return np.array([x + sin(ux),
y + uy])
class TargetCost:
def __init__(self, state, prior):
self.state = state.copy()
self.prior = prior.copy()
def residual(self):
x, y = self.state
px, py = self.prior
return np.array([
[x - px],
[y - py],
])
def cost(self):
r = self.residual()
return r.T @ r
def jacobi(self):
return np.identity(2)
def weight(self):
return np.identity(2)
def quad_weight(self):
J = self.jacobi()
W = self.weight()
return J.T @ W @ J
def quad_mean(self):
return self.prior
| [
"numpy.array",
"numpy.identity",
"math.sin",
"math.cos"
] | [((189, 217), 'numpy.array', 'np.array', (['[[1, 0], [0, 1.0]]'], {}), '([[1, 0], [0, 1.0]])\n', (197, 217), True, 'import numpy as np\n'), ((320, 348), 'numpy.array', 'np.array', (['[[2, 0], [0, 1.0]]'], {}), '([[2, 0], [0, 1.0]])\n', (328, 348), True, 'import numpy as np\n'), ((490, 520), 'numpy.array', 'np.array', (['[x + 2 * ux, y + uy]'], {}), '([x + 2 * ux, y + uy])\n', (498, 520), True, 'import numpy as np\n'), ((663, 691), 'numpy.array', 'np.array', (['[[1, 0], [0, 1.0]]'], {}), '([[1, 0], [0, 1.0]])\n', (671, 691), True, 'import numpy as np\n'), ((1272, 1302), 'numpy.array', 'np.array', (['[[x - px], [y - py]]'], {}), '([[x - px], [y - py]])\n', (1280, 1302), True, 'import numpy as np\n'), ((1448, 1462), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (1459, 1462), True, 'import numpy as np\n'), ((1501, 1515), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (1512, 1515), True, 'import numpy as np\n'), ((844, 851), 'math.cos', 'cos', (['ux'], {}), '(ux)\n', (847, 851), False, 'from math import cos, sin\n'), ((1009, 1016), 'math.sin', 'sin', (['ux'], {}), '(ux)\n', (1012, 1016), False, 'from math import cos, sin\n')] |
import numpy as np
alg_colors = {
"unconstrained": "#AA5D1F",
"LR": "#BA2DC1",
"RSPO": "#6C2896",
"SQRL": "#D43827",
"RP": "#4899C5",
"RCPO": "#34539C",
"RRL_MF": "#60CC38",
"RRL_MB": "#349C26"
}
alg_names = {
"unconstrained": "Unconstrained",
"LR": "LR",
"RSPO": "RSPO",
"SQRL": "SQRL",
"RP": "RP",
"RCPO": "RCPO",
"RRL_MF": "Ours: Recovery RL (MF Recovery)",
"RRL_MB": "Ours: Recovery RL (MB Recovery)"
}
def get_color(algname, alt_color_map={}):
if algname in alg_colors:
return alg_colors[algname]
elif algname in alt_color_map:
return alt_color_map[algname]
else:
return np.random.rand(3, )
def get_legend_name(algname, alt_name_map={}):
if algname in alg_names:
return alg_names[algname]
elif algname in alt_name_map:
return alt_name_map[algname]
else:
return algname
| [
"numpy.random.rand"
] | [((680, 697), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (694, 697), True, 'import numpy as np\n')] |
import numpy as np
from .simulator import Simulator
def get_final_score(simulator):
num_tiles = simulator.shape[0] * simulator.shape[1]
contamination = np.sum(simulator.contamination)
fuel = simulator.fuel_expended
deployed = len(simulator.robots)
stranded = 0
for rob in simulator.robots.values():
if rob.pos not in simulator.stations:
stranded += 1
numerator = 20 * num_tiles - (0.5 * contamination + 2 * fuel + 15 * deployed + 50 * stranded)
return max(numerator / (20 * num_tiles), 0)
def evaluate(problem, solution):
sim = Simulator(problem, solution.robots)
sim.simulate(solution.actions)
return get_final_score(sim)
| [
"numpy.sum"
] | [((163, 194), 'numpy.sum', 'np.sum', (['simulator.contamination'], {}), '(simulator.contamination)\n', (169, 194), True, 'import numpy as np\n')] |
"""
SCRIPT FOR TRAINING 2DCNN MODELS
Run with two arguments - arg1=region, arg2=model type
"""
import os, sys
import torch
import numpy as np
import time
from CNN import *
from Training import *
from Data_maker_loader import *
from random import randint, uniform, choice
if sys.argv[2] == "2D":
from CNN import *
else:
from ConvRNN import *
random.seed(200)
if sys.argv[1]:
region = sys.argv[1]
else:
region = "Junin"
print("REGION: ", region)
if sys.argv[2]:
modeltype = sys.argv[2]
else:
modeltype = "3D"
print("MODEL TYPE: ", modeltype)
start = time.time()
server = "/rds/general/user/jgb116/home/satellite/satellite/junin"
# server = '/rds/general/project/aandedemand/live/satellite/junin'
# WHERE TO IMPORT DATA FROM
# wherepath = server + '/data_reduced/tensors'
# savepath = server + '/data_reduced/tensors'
wherepath = server + "/data/" + region
savepath = server + "/data/" + region + "/out"
if not os.path.exists(savepath):
os.makedirs(savepath)
# WHERE TO SAVE MODEL CHECKPOINT
modelpath = server + "/models/" + region + "_models/" + modeltype
if not os.path.exists(modelpath):
os.makedirs(modelpath)
# WHERE TO SAVE IMAGES TRACKING TRAINING PROCESS
picspath = server + "/models/" + region + "_models/" + modeltype + "/pics"
if not os.path.exists(picspath):
os.makedirs(picspath)
# WHERE TO SAVE MODEL PERFORMANCE OF EACH JOB FOR TRAIN, VAL AND TEST DATA
file = (
server
+ "/models/"
+ region
+ "_models/"
+ modeltype
+ "/grid_summary/"
+ modeltype
+ ".txt"
)
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
if __name__ == "__main__":
# Set training time period
start_year = 14
end_year = 17
# set CNN model parameters
# size = 45
sizes = [45, 49, 55, 59]
DSM = False
# CHOOSE THE INPUT DIMENSIONS - No DSM is (2,8). With DSM is (3,8)
if DSM:
input_dim = 11
else:
input_dim = 10
# Need 4 for 2DCNN
hidden_dim = [64, 128, 128]
# Different for 2D/3D models
# kernel_size = [(3, 3), (2, 3, 3), (3, 3)]
kernel_size = [(5, 5), (5, 5), (3, 3), (3, 3)]
stride = [(2, 2), (1, 1), (1, 1), (1, 1)]
padding = [0, 0, 0, 0]
# dropout = 0.4
dropouts = [0.2, 0.3, 0.4, 0.5] # 3 options
levels = [10]
# set ratios of 0:1 labels in Train and Validation data sets
train_times = 4
test_times = 4
# set criteria for Early stopping
AUC = True
BCE_Wloss = False
FNcond = False
# set parameters for the cost of the confusion matrix
w = 10 # weights on the False Negative Rate
perc = (100 * train_times) / (
train_times + 1
) # the percentile to for treshold selection. Advisable to be 100*times/(times+1)
# Weight parameter for the weighted BCE/CE loss
pos_weight = 2
# pos_weight = torch.tensor([1, pos_weight], dtype=torch.float, device="cuda:0")
# Adam optimiser parameters:
lrs = [0.000005, 0.00001, 0.00003, 0.00006, 0.00008, 0.0001, 0.0003, 0.001]
weight_decay = 0
# Early Stopping parameters
n_splits = 5
n_epochs = 20
patience = 7
training_time = (
23.5 # Time in hours (needs to be less than 24 for GPUs in Imperial HPC)
)
# train_model parameters for debbuging and time regulations
stop_batch = None
print_batch = 1000
batch_size = 32
# batch_size = 1024
# To explote different parameters in parallel
if "PBS_ARRAY_INDEX" in os.environ:
job = int(os.environ["PBS_ARRAY_INDEX"])
else:
job = 3
if "PBS_JOBID" in os.environ:
job_id = str(os.environ["PBS_JOBID"])
else:
job_id = "1"
if job == 1:
print("default params")
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 2:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 3:
# end_year = 17
# print("Settings to make it run a lot faster for debugging purposes...")
# input_dim=(3,8)
# hidden_dim=(16,32,32)
# kernel_size=((5,5),(2,5,5),(5,5))
# levels=(10,)
# training_time = 30
# stop_batch = 10
# print_batch = 1
# batch_size = 64
# stride = [(2,2),(1,1),(1,1),(1,1)]
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 4:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 5:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 6:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 7:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 8:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 9:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 10:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 11:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 12:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
model = CNNmodel(
input_dim=input_dim,
hidden_dim=hidden_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dropout=dropout,
levels=levels,
)
model = torch.nn.DataParallel(model)
# Set loss criterion and optimiser type
# criterion = torch.nn.CrossEntropyLoss(reduction='mean', weight = pos_weight)
criterion = torch.nn.BCEWithLogitsLoss(
reduction="mean", pos_weight=torch.tensor(pos_weight)
)
optimiser = torch.optim.Adam(
params=model.parameters(), lr=lr, weight_decay=weight_decay
)
# Load data
Data = with_DSM(
size=int(size / 2),
start_year=start_year,
end_year=end_year,
wherepath=wherepath,
DSM=DSM,
type=modeltype,
)
if not (
os.path.isfile(wherepath + "/" + "Train_idx%d.npy" % (end_year))
& os.path.isfile(wherepath + "/" + "Test_idx%d.npy" % (end_year))
):
print("Creating indexes split")
train_idx, test_idx = train_test_split(
np.arange(len(Data.labels)),
test_size=0.2,
random_state=42,
shuffle=True,
stratify=Data.labels,
)
np.save(wherepath + "/" + "Train_idx%d.npy" % (end_year), train_idx)
np.save(wherepath + "/" + "Test_idx%d.npy" % (end_year), test_idx)
else:
print("loading: " + wherepath + "/" + "Train_idx%d.npy" % (end_year))
train_idx = np.load(wherepath + "/" + "Train_idx%d.npy" % (end_year))
test_idx = np.load(wherepath + "/" + "Test_idx%d.npy" % (end_year))
train_sampler = ImbalancedDatasetUnderSampler(
labels=Data.labels, indices=train_idx, times=train_times
)
test_sampler = ImbalancedDatasetUnderSampler(
labels=Data.labels, indices=test_idx, times=test_times
)
# Print model and training details
print(
"Model:",
str(type(model))[8:-2],
"\nPeriod 20%d-20%d -> 20%d" % (start_year, end_year, end_year + 1),
)
print(
"\t% deforested pixels in train:",
train_sampler.count[1] / sum(train_sampler.count),
)
print(
"\t% deforested pixels in val:", test_sampler.count[1] / sum(test_sampler.count)
)
print("Job: ", job_id)
print("DSM:", DSM)
print("\nHyperparameters: ")
print("\tImage size: %d" % (size))
print("\tHidden dim: ", hidden_dim)
print("\tDropout: ", dropout)
print(
"\tTrain and Val ratios of 0:1 labels: 1:%d ; 1:%d " % (train_times, test_times)
)
print(
"\tADAM optimizer parameters: lr=%.7f, weight decay=%.2f, batch size=%d"
% (lr, weight_decay, batch_size)
)
print("\tBCEWithLogitsLoss pos_weights = %.2f" % (pos_weight))
print("\tn_epochs = %d with patience of %d epochs" % (n_epochs, patience))
print("\tCross Validation with n_splits = %d " % (n_splits))
print(
"\tIf to use BCEWithLogitsLoss as an early stop criterion :",
((not AUC) & (not FNcond)),
)
print("\tIf to use AUC as an early stop criterion :", AUC)
print("\tIf to use cost = FP+w*FN / TP+FP+w*FN+TN as an early stop criterion")
print(
"\twith w = %d and treshhold = the %d percentile of the output" % (w, perc),
FNcond,
)
print("\nModel: \n", model)
print("\nCriterion: \n", criterion)
print("\nOptimiser: \n", optimiser)
# Initiate training routine
(
model,
train_loss,
valid_loss,
AUCs_train,
AUCs_val,
costs_train,
costs_val,
name,
) = train_model(
Data=Data,
model=model,
sampler=train_sampler,
criterion=criterion,
optimiser=optimiser,
patience=patience,
n_epochs=n_epochs,
n_splits=n_splits,
batch_size=batch_size,
stop_batch=stop_batch,
print_batch=print_batch,
training_time=training_time,
w=w,
FNcond=FNcond,
AUC=AUC,
job=job_id,
path=modelpath,
)
# Produce graphs
visualize(
train=train_loss,
valid=valid_loss,
name="BCEloss",
modelname=name,
best="min",
path=picspath,
)
visualize(
train=AUCs_train,
valid=AUCs_val,
name="AUC",
modelname=name,
best="max",
path=picspath,
)
visualize(
train=costs_train,
valid=costs_val,
name="Cost",
modelname=name,
best="min",
path=picspath,
)
test_loss, test_AUC, test_cost = test_model(
model=model,
Data=Data,
criterion=criterion,
w=w,
perc=perc,
test_sampler=test_sampler,
batch_size=batch_size,
stop_batch=stop_batch,
name=name,
path=picspath,
)
write_report(
name=name,
job_id=job_id,
train_loss=train_loss,
valid_loss=valid_loss,
test_loss=test_loss,
AUCs_train=AUCs_train,
AUCs_val=AUCs_val,
test_AUC=test_AUC,
costs_train=costs_train,
costs_val=costs_val,
test_cost=test_cost,
file=file,
FNcond=FNcond,
AUC=AUC,
)
print("\n\nEND!Total time (in h):", (time.time() - start) / 3600)
| [
"numpy.load",
"numpy.save",
"os.makedirs",
"random.randint",
"random.uniform",
"os.path.dirname",
"os.path.exists",
"random.choice",
"time.time",
"os.path.isfile",
"torch.nn.DataParallel",
"torch.tensor"
] | [((579, 590), 'time.time', 'time.time', ([], {}), '()\n', (588, 590), False, 'import time\n'), ((940, 964), 'os.path.exists', 'os.path.exists', (['savepath'], {}), '(savepath)\n', (954, 964), False, 'import os, sys\n'), ((970, 991), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (981, 991), False, 'import os, sys\n'), ((1099, 1124), 'os.path.exists', 'os.path.exists', (['modelpath'], {}), '(modelpath)\n', (1113, 1124), False, 'import os, sys\n'), ((1130, 1152), 'os.makedirs', 'os.makedirs', (['modelpath'], {}), '(modelpath)\n', (1141, 1152), False, 'import os, sys\n'), ((1285, 1309), 'os.path.exists', 'os.path.exists', (['picspath'], {}), '(picspath)\n', (1299, 1309), False, 'import os, sys\n'), ((1315, 1336), 'os.makedirs', 'os.makedirs', (['picspath'], {}), '(picspath)\n', (1326, 1336), False, 'import os, sys\n'), ((5970, 5998), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (5991, 5998), False, 'import torch\n'), ((1572, 1593), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (1587, 1593), False, 'import os, sys\n'), ((1612, 1633), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (1627, 1633), False, 'import os, sys\n'), ((3747, 3758), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (3753, 3758), False, 'from random import randint, uniform, choice\n'), ((3877, 3888), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (3883, 3888), False, 'from random import randint, uniform, choice\n'), ((4467, 4478), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (4473, 4478), False, 'from random import randint, uniform, choice\n'), ((4597, 4608), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (4603, 4608), False, 'from random import randint, uniform, choice\n'), ((4727, 4738), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (4733, 4738), False, 'from random import randint, uniform, choice\n'), ((4857, 4868), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (4863, 4868), False, 'from random import randint, uniform, choice\n'), ((4987, 4998), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (4993, 4998), False, 'from random import randint, uniform, choice\n'), ((5117, 5128), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (5123, 5128), False, 'from random import randint, uniform, choice\n'), ((5247, 5258), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (5253, 5258), False, 'from random import randint, uniform, choice\n'), ((5378, 5389), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (5384, 5389), False, 'from random import randint, uniform, choice\n'), ((5509, 5520), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (5515, 5520), False, 'from random import randint, uniform, choice\n'), ((5640, 5651), 'random.choice', 'choice', (['lrs'], {}), '(lrs)\n', (5646, 5651), False, 'from random import randint, uniform, choice\n'), ((6978, 7044), 'numpy.save', 'np.save', (["(wherepath + '/' + 'Train_idx%d.npy' % end_year)", 'train_idx'], {}), "(wherepath + '/' + 'Train_idx%d.npy' % end_year, train_idx)\n", (6985, 7044), True, 'import numpy as np\n'), ((7055, 7119), 'numpy.save', 'np.save', (["(wherepath + '/' + 'Test_idx%d.npy' % end_year)", 'test_idx'], {}), "(wherepath + '/' + 'Test_idx%d.npy' % end_year, test_idx)\n", (7062, 7119), True, 'import numpy as np\n'), ((7230, 7285), 'numpy.load', 'np.load', (["(wherepath + '/' + 'Train_idx%d.npy' % end_year)"], {}), "(wherepath + '/' + 'Train_idx%d.npy' % end_year)\n", (7237, 7285), True, 'import numpy as np\n'), ((7307, 7361), 'numpy.load', 'np.load', (["(wherepath + '/' + 'Test_idx%d.npy' % end_year)"], {}), "(wherepath + '/' + 'Test_idx%d.npy' % end_year)\n", (7314, 7361), True, 'import numpy as np\n'), ((3824, 3841), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (3831, 3841), False, 'from random import randint, uniform, choice\n'), ((3954, 3971), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (3961, 3971), False, 'from random import randint, uniform, choice\n'), ((4544, 4561), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (4551, 4561), False, 'from random import randint, uniform, choice\n'), ((4674, 4691), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (4681, 4691), False, 'from random import randint, uniform, choice\n'), ((4804, 4821), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (4811, 4821), False, 'from random import randint, uniform, choice\n'), ((4934, 4951), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (4941, 4951), False, 'from random import randint, uniform, choice\n'), ((5064, 5081), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (5071, 5081), False, 'from random import randint, uniform, choice\n'), ((5194, 5211), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (5201, 5211), False, 'from random import randint, uniform, choice\n'), ((5324, 5341), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (5331, 5341), False, 'from random import randint, uniform, choice\n'), ((5455, 5472), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (5462, 5472), False, 'from random import randint, uniform, choice\n'), ((5586, 5603), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (5593, 5603), False, 'from random import randint, uniform, choice\n'), ((5717, 5734), 'random.uniform', 'uniform', (['(0.1)', '(0.8)'], {}), '(0.1, 0.8)\n', (5724, 5734), False, 'from random import randint, uniform, choice\n'), ((6208, 6232), 'torch.tensor', 'torch.tensor', (['pos_weight'], {}), '(pos_weight)\n', (6220, 6232), False, 'import torch\n'), ((6569, 6631), 'os.path.isfile', 'os.path.isfile', (["(wherepath + '/' + 'Train_idx%d.npy' % end_year)"], {}), "(wherepath + '/' + 'Train_idx%d.npy' % end_year)\n", (6583, 6631), False, 'import os, sys\n'), ((6644, 6705), 'os.path.isfile', 'os.path.isfile', (["(wherepath + '/' + 'Test_idx%d.npy' % end_year)"], {}), "(wherepath + '/' + 'Test_idx%d.npy' % end_year)\n", (6658, 6705), False, 'import os, sys\n'), ((3779, 3794), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (3786, 3794), False, 'from random import randint, uniform, choice\n'), ((3909, 3924), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (3916, 3924), False, 'from random import randint, uniform, choice\n'), ((4499, 4514), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (4506, 4514), False, 'from random import randint, uniform, choice\n'), ((4629, 4644), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (4636, 4644), False, 'from random import randint, uniform, choice\n'), ((4759, 4774), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (4766, 4774), False, 'from random import randint, uniform, choice\n'), ((4889, 4904), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (4896, 4904), False, 'from random import randint, uniform, choice\n'), ((5019, 5034), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (5026, 5034), False, 'from random import randint, uniform, choice\n'), ((5149, 5164), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (5156, 5164), False, 'from random import randint, uniform, choice\n'), ((5279, 5294), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (5286, 5294), False, 'from random import randint, uniform, choice\n'), ((5410, 5425), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (5417, 5425), False, 'from random import randint, uniform, choice\n'), ((5541, 5556), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (5548, 5556), False, 'from random import randint, uniform, choice\n'), ((5672, 5687), 'random.randint', 'randint', (['(20)', '(40)'], {}), '(20, 40)\n', (5679, 5687), False, 'from random import randint, uniform, choice\n'), ((11053, 11064), 'time.time', 'time.time', ([], {}), '()\n', (11062, 11064), False, 'import time\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from collections import Iterable
def line(x, m, b):
return m * x + b
def scale(x, y, xsc=1.0, ysc=1.0, **kwargs):
"""Scale data. For use with Transformer."""
return x * xsc, y * ysc
def translate(x, y, xtrans=1.0, ytrans=1.0, **kwargs):
"""Translate data. For use with Transformer."""
return x + xtrans, y + ytrans
def invertx(x, y, **kwargs):
"""Multiply x by -1"""
return -x, y
def inverty(x, y, **kwargs):
"""Multiply y by -1"""
return x, -y
def medfilt(x, y, ks=3, axis='y', **kwargs):
"""Use scipy.signal.medfilt to filter either the x or y data.
Args:
ks: and odd number that represents the width of the filter. See medfilt
for more detail.
axis: either 'x' or 'y'. Indicates which axis medfilt should be called
on.
"""
from scipy.signal import medfilt
_verify_axis(axis)
if axis == 'x':
x = medfilt(x, ks)
elif axis == 'y':
y = medfilt(y, ks)
return x, y
def wrapped_medfilt(x, y, ks=3, axis='y', **kwargs):
"""Use scipy.signal.medfilt to filter either the x or y data. Also loop the
filter around to prevent edge effects.
If the data forms a closed loop the medfilt should account for this,
otherwise there will be artifacts introduced in points near (less than
ks-1 / 2) the edge of the data. This version of medfilt accounts for that
by prepending the last ks data points and appending the first data points,
running medfilt, and then removing the pre/appended points.
Args:
ks: and odd number that represents the width of the filter. See medfilt
for more detail.
axis: either 'x' or 'y'. Indicates which axis medfilt should be called
on.
"""
from scipy.signal import medfilt
_verify_axis(axis)
x = np.concatenate((x[-ks:], x, x[:ks]))
y = np.concatenate((y[-ks:], y, y[:ks]))
if axis == 'x':
x = medfilt(x, ks)
elif axis == 'y':
y = medfilt(y, ks)
return x[ks:-ks], y[ks:-ks]
def remove_offset(x, y, axis='y', **kwargs):
"""Center data either horizontally or vertically (default to vertically).
This is done by the crude method of just doing y -= y.mean() (if axis='y').
Args:
axis: either 'x' or 'y'. Indicates which axis should be centered.
"""
_verify_axis(axis)
if axis == 'y':
y -= y.mean()
elif axis == 'x':
x -= x.mean()
return x, y
def center(x, y, axis='y', **kwargs):
"""Center data either horizontally or vertically (default to vertically).
Return y - average_of(y.max(), y.min())
Args:
axis: either 'x' or 'y'. Indicates which axis should be centered.
"""
_verify_axis(axis)
if axis == 'y':
y -= 0.5 * (y.max() + y.min())
elif axis == 'x':
x -= 0.5 * (x.max() + x.min())
return x, y
def unroll(x, y, axis='y', **kwargs):
"""Replace the x (y) data with np.arange(N) where N is the number of data
points.
Args:
axis: either 'x' or 'y'. Indicates which axis should be unrolled. So if
axis is 'y', the x data will be thrown out and replaced with
arange(len(y)).
"""
_verify_axis(axis)
if axis == 'y':
x = np.arange(len(x))
elif axis == 'x':
y = np.arange(len(y))
return x, y
def spline(x, y, axis='y', s=3.0, **kwargs):
"""Replace y (x) data with a spline fit.
See scipy.interpolate.UnivariateSpline for spline details.
Args:
axis: Either 'x' or 'y'. Indicates the axis to be fit.
"""
from scipy.interpolate import UnivariateSpline as Spline
_verify_axis(axis)
if axis == 'y':
xlin = np.arange(0, len(x))
spl = Spline(xlin, y)
spl.set_smoothing_factor(s)
return x, spl(xlin)
if axis == 'x':
ylin = np.arange(0, len(y))
spl = Spline(ylin, x)
spl.set_smoothing_factor(s)
return spl(ylin), y
def flatten_saturation(x, y, threshold=200, polarity='+', **kwargs):
"""Subtract a linear term from your data based on a fit to the saturation
region.
"""
from scipy.optimize import curve_fit
if polarity == '+':
mask = x > threshold
elif polarity == '-':
mask = x < threshold
popt, pcov = curve_fit(line, x[mask], y[mask])
return x, y - line(x, *popt)
def _verify_axis(axis):
if axis not in ('x', 'y'):
raise ValueError('Arg "axis" must be "x" or "y", not {}'.format(axis))
def second_half(x, y, **kwargs):
N = len(x)
half = int((N-1)/2)
return x[half:], y[half:]
def first_half(x, y, **kwargs):
N = len(x)
half = int((N-1)/2)
return x[:half], y[:half]
def middle(x, y, **kwargs):
N = len(x)
half = int((N-1)/2)
Nseg = 100
return x[half-Nseg:N-1-Nseg], y[half-Nseg:N-1-Nseg]
def ith_cycle(x, y, i, ncyc, delta=0, **kwargs):
N = len(x)
cycN = N/ncyc
start, end = i*cycN+delta, (i+1)*cycN+delta
return x[start:end], y[start:end]
def vertical_offset(x, y, dy=0.1, **kwargs):
if not hasattr(vertical_offset, 'offset'):
vertical_offset.offset = 0.0
vertical_offset.offset += dy
return x, y + vertical_offset.offset
def normalize(x, y, xlim=None, ylim=None, n_avg=1, **kwargs):
"""Move the data to fit in the box defined by xlim and ylim.
Args:
xlim: float or (float, float). The x data will be scaled and
translated to fit in the specified x window. If a float x0 is
passed, the window will by (-x0, x0). If left as None, there
will be no change to this axis.
ylim: Same as xlim, but for y data.
n_avg: Number of points to average over when looking for max/min of
data. For example, if n_avg=10 instead of using x.max() the average
of the 10 greatest points would be used.
"""
res = []
for u, lim in zip((x, y), (xlim, ylim)):
if lim is None:
res.append(u)
continue
if not isinstance(lim, Iterable):
lim = (-lim, lim)
center = (lim[0] + lim[1]) / 2.0
width = lim[1] - lim[0]
u -= u.mean()
uwidth = 2 * (_max_n_points(np.abs(u), n_avg).mean())
res.append(u * (width / uwidth) + center)
return res[0], res[1]
def simple_normalize(x, y, n_avg=1, axis='y', **kwargs):
_verify_axis(axis)
if axis == 'y':
return x, y/_max_n_points(np.abs(y), n_avg).mean()
else:
return x/_max_n_points(np.abs(x), n_avg).mean(), y
def saturation_normalize(x, y, thresh=1.0, axis='y', **kwargs):
return x, y / _saturation_level(x, y, thresh)
# return x[np.abs(x) > thresh], y[np.abs(x) > thresh]
def _max_n_points(arr, n=1):
return _n_nearest_points(arr, n, arr.max())
def _min_n_points(arr, n=1):
return _n_nearest_points(arr, n, arr.min())
def _n_nearest_points(arr, n=1, x0=0.0, other_arr=None):
"""Return the n nearest points to x0."""
asind = np.argsort(np.abs(arr - x0))
if other_arr is None:
return arr[asind][:n]
else:
return (arr[asind][:n], other_arr[asind][:n])
def _saturation_level(x, y, thresh):
return np.abs(y)[np.abs(x) > thresh].mean()
def threshold_crop(x, y, thresh=np.float('inf'), axis='x', **kwargs):
"""Clip of all points that are above thresh.
Args:
thresh: all points greater than thresh (in data coords, not an index)
will be cut out of the data, for both axes.
axis: Either 'x' or 'y', indicating which axis will be compared to
thresh
"""
ind = np.abs(x) < thresh
return x[ind], y[ind]
def amr_normalize(x, y, thresh=300.0, amr_mag=1.0, angle=0.0, **kwargs):
y -= _amr_tail_mean(x, y, thresh)
y/= amr_mag
y += np.cos(angle)**2
return x, y
def _amr_tail_mean(x, y, thresh):
return y[np.abs(x) > np.abs(thresh)].mean()
| [
"numpy.abs",
"scipy.interpolate.UnivariateSpline",
"numpy.float",
"scipy.signal.medfilt",
"scipy.optimize.curve_fit",
"numpy.cos",
"numpy.concatenate"
] | [((1888, 1924), 'numpy.concatenate', 'np.concatenate', (['(x[-ks:], x, x[:ks])'], {}), '((x[-ks:], x, x[:ks]))\n', (1902, 1924), True, 'import numpy as np\n'), ((1933, 1969), 'numpy.concatenate', 'np.concatenate', (['(y[-ks:], y, y[:ks])'], {}), '((y[-ks:], y, y[:ks]))\n', (1947, 1969), True, 'import numpy as np\n'), ((4368, 4401), 'scipy.optimize.curve_fit', 'curve_fit', (['line', 'x[mask]', 'y[mask]'], {}), '(line, x[mask], y[mask])\n', (4377, 4401), False, 'from scipy.optimize import curve_fit\n'), ((7351, 7366), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (7359, 7366), True, 'import numpy as np\n'), ((965, 979), 'scipy.signal.medfilt', 'medfilt', (['x', 'ks'], {}), '(x, ks)\n', (972, 979), False, 'from scipy.signal import medfilt\n'), ((2002, 2016), 'scipy.signal.medfilt', 'medfilt', (['x', 'ks'], {}), '(x, ks)\n', (2009, 2016), False, 'from scipy.signal import medfilt\n'), ((3803, 3818), 'scipy.interpolate.UnivariateSpline', 'Spline', (['xlin', 'y'], {}), '(xlin, y)\n', (3809, 3818), True, 'from scipy.interpolate import UnivariateSpline as Spline\n'), ((3953, 3968), 'scipy.interpolate.UnivariateSpline', 'Spline', (['ylin', 'x'], {}), '(ylin, x)\n', (3959, 3968), True, 'from scipy.interpolate import UnivariateSpline as Spline\n'), ((7092, 7108), 'numpy.abs', 'np.abs', (['(arr - x0)'], {}), '(arr - x0)\n', (7098, 7108), True, 'import numpy as np\n'), ((7696, 7705), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (7702, 7705), True, 'import numpy as np\n'), ((7882, 7895), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7888, 7895), True, 'import numpy as np\n'), ((1015, 1029), 'scipy.signal.medfilt', 'medfilt', (['y', 'ks'], {}), '(y, ks)\n', (1022, 1029), False, 'from scipy.signal import medfilt\n'), ((2052, 2066), 'scipy.signal.medfilt', 'medfilt', (['y', 'ks'], {}), '(y, ks)\n', (2059, 2066), False, 'from scipy.signal import medfilt\n'), ((7280, 7289), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (7286, 7289), True, 'import numpy as np\n'), ((7290, 7299), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (7296, 7299), True, 'import numpy as np\n'), ((7963, 7972), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (7969, 7972), True, 'import numpy as np\n'), ((7975, 7989), 'numpy.abs', 'np.abs', (['thresh'], {}), '(thresh)\n', (7981, 7989), True, 'import numpy as np\n'), ((6301, 6310), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (6307, 6310), True, 'import numpy as np\n'), ((6539, 6548), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (6545, 6548), True, 'import numpy as np\n'), ((6605, 6614), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (6611, 6614), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""D2 of the Rössler oscillator.
The estimates here match the "accepted" value of 1.991 quite closely.
"""
import numpy as np
import matplotlib.pyplot as plt
from nolitsa import d2, data, utils
x0 = [-3.2916983, -1.42162302, 0.02197593]
x = utils.rescale(data.roessler(length=5000, x0=x0)[1][:, 0])
dim = np.arange(1, 10 + 1)
tau = 14
plt.title(u'Local $D_2$ vs $r$ for Rössler oscillator')
plt.xlabel(r'Distance $r$')
plt.ylabel(r'Local $D_2$')
for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=50,
r=utils.gprange(0.001, 1.0, 100)):
plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
plt.semilogx(utils.gprange(0.001, 1.0, 100), 1.991 * np.ones(100),
color='#000000')
plt.show()
| [
"matplotlib.pyplot.title",
"nolitsa.d2.d2",
"matplotlib.pyplot.show",
"nolitsa.utils.gprange",
"numpy.ones",
"numpy.arange",
"nolitsa.data.roessler",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((356, 376), 'numpy.arange', 'np.arange', (['(1)', '(10 + 1)'], {}), '(1, 10 + 1)\n', (365, 376), True, 'import numpy as np\n'), ((387, 442), 'matplotlib.pyplot.title', 'plt.title', (['u"""Local $D_2$ vs $r$ for Rössler oscillator"""'], {}), "(u'Local $D_2$ vs $r$ for Rössler oscillator')\n", (396, 442), True, 'import matplotlib.pyplot as plt\n'), ((443, 469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance $r$"""'], {}), "('Distance $r$')\n", (453, 469), True, 'import matplotlib.pyplot as plt\n'), ((471, 496), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Local $D_2$"""'], {}), "('Local $D_2$')\n", (481, 496), True, 'import matplotlib.pyplot as plt\n'), ((768, 778), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (776, 778), True, 'import matplotlib.pyplot as plt\n'), ((684, 714), 'nolitsa.utils.gprange', 'utils.gprange', (['(0.001)', '(1.0)', '(100)'], {}), '(0.001, 1.0, 100)\n', (697, 714), False, 'from nolitsa import d2, data, utils\n'), ((581, 611), 'nolitsa.utils.gprange', 'utils.gprange', (['(0.001)', '(1.0)', '(100)'], {}), '(0.001, 1.0, 100)\n', (594, 611), False, 'from nolitsa import d2, data, utils\n'), ((640, 651), 'nolitsa.d2.d2', 'd2.d2', (['r', 'c'], {}), '(r, c)\n', (645, 651), False, 'from nolitsa import d2, data, utils\n'), ((724, 736), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (731, 736), True, 'import numpy as np\n'), ((305, 338), 'nolitsa.data.roessler', 'data.roessler', ([], {'length': '(5000)', 'x0': 'x0'}), '(length=5000, x0=x0)\n', (318, 338), False, 'from nolitsa import d2, data, utils\n')] |
import os
import time
import sys
import queue
import pandas as pd
import numpy as np
from multiprocessing import Process, Queue
from collections import defaultdict
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score, accuracy_score
from .data import categoric_to_numeric
class Dataset:
""" Dataset representation. """
def __init__(self, name, data, prepocess=None, isbinary=True):
self._name = name
self._data = data if prepocess is None else categoric_to_numeric(data)
if isbinary:
y = self._data['class']
yun = sorted(y.unique())
if -1 != yun[0]:
self._data.loc[y == yun[0], 'class'] = -1
if 1 != yun[1]:
self._data.loc[y == yun[1], 'class'] = 1
@property
def name(self):
return self._name
@property
def data(self):
return self._data
@property
def x(self):
x = self._data.filter(regex='^((?!class).)*$') # Remove `class` column.
x = (x - x.min()) / (x.max() - x.min() + 1e-20) # min-max normalization.
return x.values
@property
def y(self):
return self._data['class'].values
@property
def dimension(self):
try:
return self._data.shape
except:
return 0
class Benchmarking:
""" Class for benchmarking task. """
def __init__(self, preproces=('tonumeric', )):
self._datasets = {}
self._datapaths = {}
self._methods = {}
self._process = {}
self.prepocess = preproces
@property
def datasets(self):
return self._datasets
@property
def datapaths(self):
return list(self._datapaths.keys())
def add_dataset(self, name, data, preprocess):
""" Add a single dataset. """
if not issubclass(data.__class__, pd.DataFrame):
raise ValueError('`data` must be a pandas.DataFrame.')
self._datasets[name] = Dataset(name, data, preprocess)
def add_datapath(self, dpath, filetypes=('dat', 'data', 'csv'), sep=','):
""" Add a directory with only datasets to be used in the benchmarkig. """
if os.path.exists(dpath):
self._datapaths[dpath] = (filetypes, sep)
else:
raise FileNotFoundError('Directory not found.')
def add_method(self, name, method, params_init={}, params_fit={}):
""" Add a method for the bechmarking. """
if name in self._methods:
raise Exception('')
self._methods[name] = method, params_init, params_fit
def _load(self):
if len(self._datapaths) > 0:
for dpath, (filetypes, sep) in self._datapaths.items():
allfiles = os.listdir(dpath)
datasets = [file for file in allfiles
if os.path.isfile(os.path.join(dpath, file)) and
file.split('.')[-1] in filetypes]
for dnamef in datasets:
data = pd.read_csv(os.path.join(dpath, dnamef), sep=sep)
self.add_dataset(dnamef.split('.')[0], data, self.prepocess)
def run(self, dname=None, wait=5, maxprocess=None, folds=10, pathres=None):
""" Start the benchmarking. """
self._load()
if len(self._datasets) == 0:
raise Exception('No one dataset was added.')
maxprocess = os.cpu_count() if maxprocess is None else maxprocess
cprocess = 0
tprocess = 0
running = {}
ended = {}
methodsnames = list(self._methods.keys())
datasetsnames = list(self._datasets.keys())
endedall = False
cursorm = 0
cursord = 0
summary = pd.DataFrame()
kfold = KFold(n_splits=folds)
kfolds_index = {}
while not endedall:
while cursorm < len(methodsnames) and cprocess < maxprocess:
mname = methodsnames[cursorm]
method, params_init, params_fit = self._methods[mname]
while cprocess < maxprocess:
dname = datasetsnames[cursord]
dataset = self._datasets[dname]
exec_name = (mname + '|' + dname).upper()
x = dataset.x
y = dataset.y
if dname not in kfolds_index:
kfolds_index[dname] = list(kfold.split(x))
q = Queue()
process = Process(target=self._run, name=exec_name,
args=(method, x, y, params_init, params_fit,
kfolds_index[dname], q))
running[exec_name] = process, q
cprocess += 1
tprocess += 1
cursord += 1
print(f'Starting process: #{cprocess}/{tprocess}-{process.name}')
process.start()
if cursord == len(datasetsnames): # Has started for all datasets to the current method.
cursord = 0
cursorm += 1
break
time.sleep(wait) # Wait `wait` to eval if the each process has endend.
for m in list(running.keys()):
(p, q) = running[m]
if not p.is_alive():
meth, dat = m.split('|')
res = q.get()
ended[m] = (running.pop(m), res)
summary.loc[dat, meth] = res['result']
cprocess -= 1
# Save results in a csv file at current directory or, if given, `pathres`.
path = './'
if pathres is not None:
path = pathres
summary.to_csv(path + 'resultados.csv', index=True, index_label='Datasets')
print('Running: ', len(running), ' | Ended: ', len(ended))
if len(ended) == (len(self._methods) * len(self._datasets)):
endedall = True
print(summary)
def _run(self, method, X, Y, params_init, params_fit, kfolds_index, q):
""" Auxiliar method for multiprocessing. """
res = []
for (train_index, test_index) in kfolds_index:
xtrain, ytrain = X[train_index, :], Y[train_index]
xtest, ytest = X[test_index, :], Y[test_index]
method_instance = method(**params_init)
method_instance.fit(xtrain, ytrain, **params_fit)
ypred_test = method_instance.predict(xtest)
res.append(f1_score(ytest, ypred_test))
res = np.mean(res)
try:
q.put({'result': res})
except Exception as e:
print(e) | [
"pandas.DataFrame",
"os.path.exists",
"sklearn.model_selection.KFold",
"time.sleep",
"os.cpu_count",
"sklearn.metrics.f1_score",
"numpy.mean",
"multiprocessing.Queue",
"multiprocessing.Process",
"os.path.join",
"os.listdir"
] | [((2201, 2222), 'os.path.exists', 'os.path.exists', (['dpath'], {}), '(dpath)\n', (2215, 2222), False, 'import os\n'), ((3759, 3773), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3771, 3773), True, 'import pandas as pd\n'), ((3790, 3811), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'folds'}), '(n_splits=folds)\n', (3795, 3811), False, 'from sklearn.model_selection import KFold\n'), ((6709, 6721), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (6716, 6721), True, 'import numpy as np\n'), ((3435, 3449), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (3447, 3449), False, 'import os\n'), ((5219, 5235), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (5229, 5235), False, 'import time\n'), ((2758, 2775), 'os.listdir', 'os.listdir', (['dpath'], {}), '(dpath)\n', (2768, 2775), False, 'import os\n'), ((6665, 6692), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'ypred_test'], {}), '(ytest, ypred_test)\n', (6673, 6692), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((4484, 4491), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (4489, 4491), False, 'from multiprocessing import Process, Queue\n'), ((4522, 4637), 'multiprocessing.Process', 'Process', ([], {'target': 'self._run', 'name': 'exec_name', 'args': '(method, x, y, params_init, params_fit, kfolds_index[dname], q)'}), '(target=self._run, name=exec_name, args=(method, x, y, params_init,\n params_fit, kfolds_index[dname], q))\n', (4529, 4637), False, 'from multiprocessing import Process, Queue\n'), ((3057, 3084), 'os.path.join', 'os.path.join', (['dpath', 'dnamef'], {}), '(dpath, dnamef)\n', (3069, 3084), False, 'import os\n'), ((2880, 2905), 'os.path.join', 'os.path.join', (['dpath', 'file'], {}), '(dpath, file)\n', (2892, 2905), False, 'import os\n')] |
import unittest
import pandas as pd
import category_encoders as ce
import numpy as np
__author__ = 'willmcginnis'
class TestDist(unittest.TestCase):
"""
"""
def test_dist(self):
data = np.array([
['apple', None],
['peach', 'lemon']
])
encoder = ce.OrdinalEncoder(impute_missing=True)
encoder.fit(data)
a = encoder.transform(data)
print(a)
self.assertEqual(a.values[0, 1], -1)
self.assertEqual(a.values[1, 1], 0)
encoder = ce.OrdinalEncoder(impute_missing=False)
encoder.fit(data)
a = encoder.transform(data)
self.assertTrue(np.isnan(a.values[0, 1]))
self.assertEqual(a.values[1, 1], 0) | [
"numpy.isnan",
"numpy.array",
"category_encoders.OrdinalEncoder"
] | [((209, 256), 'numpy.array', 'np.array', (["[['apple', None], ['peach', 'lemon']]"], {}), "([['apple', None], ['peach', 'lemon']])\n", (217, 256), True, 'import numpy as np\n'), ((309, 347), 'category_encoders.OrdinalEncoder', 'ce.OrdinalEncoder', ([], {'impute_missing': '(True)'}), '(impute_missing=True)\n', (326, 347), True, 'import category_encoders as ce\n'), ((535, 574), 'category_encoders.OrdinalEncoder', 'ce.OrdinalEncoder', ([], {'impute_missing': '(False)'}), '(impute_missing=False)\n', (552, 574), True, 'import category_encoders as ce\n'), ((661, 685), 'numpy.isnan', 'np.isnan', (['a.values[0, 1]'], {}), '(a.values[0, 1])\n', (669, 685), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import argparse
import numpy as np
from matplotlib.widgets import CheckButtons
import file_reader
from angle_calc import *
def get_body_part_name(i):
bodyparts = ["nose", ""]
return bodyparts[i]
if __name__ == '__main__':
# 导入包
# 创建解析器
parser = argparse.ArgumentParser()
# 添加位置参数(positional arguments)
parser.add_argument('--file', help='input a filename')
args = parser.parse_args()
filename = args.file
print(filename)
t, xys = file_reader.read_x_y_score_list(args.file)
## python list -> np.array
xy1s = np.array(xys[1])
xy2s = np.array(xys[2])
xy3s = np.array(xys[3])
xy4s = np.array(xys[4])
xy8s = np.array(xys[8])
xy9s = np.array(xys[9])
xy10s = np.array(xys[10])
xy11s = np.array(xys[11])
xy22s = np.array(xys[22])
# 髋关节角度
hip_angle = calc_angle(xy1s, xy10s, xy9s)
# 大臂和躯干的角度
shoulder_angle = calc_angle(xy3s, xy9s, xy2s)
# 大臂小臂角度
elbow_angle = calc_angle(xy2s, xy4s, xy3s)
# 膝盖角度
knee_angle = calc_angle(xy9s, xy11s, xy10s)
# 脚踝角度
ankle_angle = calc_angle(xy10s, xy22s, xy11s)
# 肩膀高度
shoulder_height = np.subtract(720, xy2s[..., 1])
# 手腕高度
wrist_height = np.subtract(720, xy4s[..., 1])
# 手腕x
wrist_x = xy4s[..., 0]
# 膝盖高度
knee_height = np.subtract(720, xy10s[..., 1])
# 手腕高度
elbow_height = np.subtract(720, xy3s[..., 1])
# 髋高
hip_height = np.subtract(720, xy9s[..., 1])
# 脚踝高度
ankle_height = np.subtract(720, xy11s[..., 1])
# 作图
fig, ax = plt.subplots()
plt.title(filename)
l0, = ax.plot(t, shoulder_height, label='shoulder height')
l1, = ax.plot(t, wrist_height, label='wrist height')
l7, = ax.plot(t, hip_height, label='hip height')
l6, = ax.plot(t, knee_height, label='knee height')
l9, = ax.plot(t, elbow_height, label='elbow height')
l11, = ax.plot(t, ankle_height, label='ankle height')
l2, = ax.plot(t, hip_angle, label='hip angle', linestyle='dashed')
l3, = ax.plot(t, shoulder_angle, label='shoulder angle', linestyle='dashed')
l4, = ax.plot(t, elbow_angle, label='elbow angle', linestyle='dashed')
l5, = ax.plot(t, knee_angle, label='knee angle', linestyle='dashed')
l8, = ax.plot(t, wrist_x, label='wrist x', linestyle='dashdot')
l10, = ax.plot(t, ankle_angle, label='ankle angle', linestyle='dashdot')
l12, = ax.plot(t, knee_angle + hip_angle, label='knee + hip angle', linestyle='dashdot')
lines = [l0, l1, l6, l7, l9, l11, l2, l3, l4, l5, l8, l10]
ax.set_yticks(np.arange(0, 720, 50))
plt.legend()
plt.subplots_adjust(left=0.2)
plt.grid(True)
# Make checkbuttons with all plotted lines with correct visibility
rax = plt.axes([0.05, 0.4, 0.1, 0.15])
labels = [str(line.get_label()) for line in lines]
visibility = [line.get_visible() for line in lines]
check = CheckButtons(rax, labels, visibility)
def func(label):
index = labels.index(label)
lines[index].set_visible(not lines[index].get_visible())
plt.draw()
check.on_clicked(func)
plt.gcf().canvas.set_window_title(filename)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.widgets.CheckButtons",
"numpy.subtract",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.legend",
"file_reader.read_x_y_score_list",
"matplotlib.pyplot.draw",
"numpy.array",
"numpy.arange",
"matplotlib.pyp... | [((301, 326), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (324, 326), False, 'import argparse\n'), ((511, 553), 'file_reader.read_x_y_score_list', 'file_reader.read_x_y_score_list', (['args.file'], {}), '(args.file)\n', (542, 553), False, 'import file_reader\n'), ((596, 612), 'numpy.array', 'np.array', (['xys[1]'], {}), '(xys[1])\n', (604, 612), True, 'import numpy as np\n'), ((624, 640), 'numpy.array', 'np.array', (['xys[2]'], {}), '(xys[2])\n', (632, 640), True, 'import numpy as np\n'), ((652, 668), 'numpy.array', 'np.array', (['xys[3]'], {}), '(xys[3])\n', (660, 668), True, 'import numpy as np\n'), ((680, 696), 'numpy.array', 'np.array', (['xys[4]'], {}), '(xys[4])\n', (688, 696), True, 'import numpy as np\n'), ((708, 724), 'numpy.array', 'np.array', (['xys[8]'], {}), '(xys[8])\n', (716, 724), True, 'import numpy as np\n'), ((736, 752), 'numpy.array', 'np.array', (['xys[9]'], {}), '(xys[9])\n', (744, 752), True, 'import numpy as np\n'), ((765, 782), 'numpy.array', 'np.array', (['xys[10]'], {}), '(xys[10])\n', (773, 782), True, 'import numpy as np\n'), ((795, 812), 'numpy.array', 'np.array', (['xys[11]'], {}), '(xys[11])\n', (803, 812), True, 'import numpy as np\n'), ((825, 842), 'numpy.array', 'np.array', (['xys[22]'], {}), '(xys[22])\n', (833, 842), True, 'import numpy as np\n'), ((1179, 1209), 'numpy.subtract', 'np.subtract', (['(720)', 'xy2s[..., 1]'], {}), '(720, xy2s[..., 1])\n', (1190, 1209), True, 'import numpy as np\n'), ((1240, 1270), 'numpy.subtract', 'np.subtract', (['(720)', 'xy4s[..., 1]'], {}), '(720, xy4s[..., 1])\n', (1251, 1270), True, 'import numpy as np\n'), ((1337, 1368), 'numpy.subtract', 'np.subtract', (['(720)', 'xy10s[..., 1]'], {}), '(720, xy10s[..., 1])\n', (1348, 1368), True, 'import numpy as np\n'), ((1399, 1429), 'numpy.subtract', 'np.subtract', (['(720)', 'xy3s[..., 1]'], {}), '(720, xy3s[..., 1])\n', (1410, 1429), True, 'import numpy as np\n'), ((1456, 1486), 'numpy.subtract', 'np.subtract', (['(720)', 'xy9s[..., 1]'], {}), '(720, xy9s[..., 1])\n', (1467, 1486), True, 'import numpy as np\n'), ((1517, 1548), 'numpy.subtract', 'np.subtract', (['(720)', 'xy11s[..., 1]'], {}), '(720, xy11s[..., 1])\n', (1528, 1548), True, 'import numpy as np\n'), ((1572, 1586), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1584, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1610), 'matplotlib.pyplot.title', 'plt.title', (['filename'], {}), '(filename)\n', (1600, 1610), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2612), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2610, 2612), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2646), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.2)'}), '(left=0.2)\n', (2636, 2646), True, 'import matplotlib.pyplot as plt\n'), ((2651, 2665), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2659, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2780), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.05, 0.4, 0.1, 0.15]'], {}), '([0.05, 0.4, 0.1, 0.15])\n', (2756, 2780), True, 'import matplotlib.pyplot as plt\n'), ((2904, 2941), 'matplotlib.widgets.CheckButtons', 'CheckButtons', (['rax', 'labels', 'visibility'], {}), '(rax, labels, visibility)\n', (2916, 2941), False, 'from matplotlib.widgets import CheckButtons\n'), ((3163, 3173), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3171, 3173), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2594), 'numpy.arange', 'np.arange', (['(0)', '(720)', '(50)'], {}), '(0, 720, 50)\n', (2582, 2594), True, 'import numpy as np\n'), ((3072, 3082), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3080, 3082), True, 'import matplotlib.pyplot as plt\n'), ((3115, 3124), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3122, 3124), True, 'import matplotlib.pyplot as plt\n')] |
import os
import pandas as pd ; import numpy as np
import datetime as dt
###########################################################################################################
### USED FUNCTIONS
###########################################################################################################
###########################################################################################################
### LOADING DATA & format manipulations
###########################################################################################################
#os.chdir("/home/hubert/Downloads/Data Cleaned/best")
os.chdir("/home/hubert/data/Data Cleaned/best")
BCH_ob_best = pd.read_csv("BCH_ob_best", sep=',')
#os.chdir("/home/hubert/Downloads/Data Cleaned/raw")
os.chdir("/home/hubert/data/Data Cleaned/raw")
BCH_trade = pd.read_csv("BCH_trade.csv", sep=',')
# transfo colonne SELL en colonne DIR {-1 / +1}
BCH_trade.loc[BCH_trade.sell==True,"dir"] = -1
BCH_trade.loc[BCH_trade.sell==False,"dir"] = 1
# elimine les colonnes inutiles dans TRADES
BCH_trade.drop(labels = ["id","exchange","symbol","day","sell","hour"], axis = 1, inplace = True)
# change datetime columns into actual datetime-type
BCH_ob_best.datetime = pd.to_datetime(BCH_ob_best.datetime, format='%d/%m/%Y %H:%M:%S')
BCH_trade.datetime = pd.to_datetime(BCH_trade.datetime, format='%d/%m/%Y %H:%M:%S')
###########################################################################################################
### SORT both dataframes BY DATETIME
###########################################################################################################
BCH_ob_best.sort_values(by=['datetime'], inplace=True, ascending=True)
BCH_ob_best.reset_index(inplace=True)
BCH_ob_best.drop(labels = ["index"], axis = 1, inplace=True)
BCH_trade.sort_values(by=['datetime'], inplace=True, ascending=True)
BCH_trade.reset_index(inplace=True)
BCH_trade.drop(labels = ["index"], axis = 1, inplace=True)
###########################################################################################################
### Add column in TRADES : corresponding OB datettime to Trade datetime
###########################################################################################################
tt=np.array(BCH_trade.datetime)
ot=np.array(BCH_ob_best.datetime)
nt = np.zeros(tt.shape, dtype='datetime64[ns]')
cursor=0
for i, t in enumerate(tt):
if i%1000000==0:
print(i)
for j, o in enumerate(ot[cursor:]):
if t < o:
nt[i]=ot[cursor+j-1]
cursor+=j
break
elif t == o:
nt[i]=ot[cursor+j]
cursor+=j
break
nt=pd.Series(nt)
BCH_trade["corresp_OB_datetime"]=nt
# retirer les qqes derniers trades qui n'ont pas ob.datetime correspondant (25 derniers trades n'ont pas d'equivalent en ob)
BCH_trade = BCH_trade.loc[BCH_trade.corresp_OB_datetime!=np.datetime64('1970-01-01 00:00:00') , :]
###################################################################################################################
### DURATION of quotes
##################################################################################################################
# 1) we start by computing the delta_times between each consecutive pair of quotes
BCH_ob_best["delta_time"] = (BCH_ob_best.datetime.diff().fillna(0)).apply(pd.Timedelta.total_seconds)
# la premiere obs devrait poser probleme (pcq on fait une diff du current - precedent)
# donc nous retirerons les rows correspondant à la premiere date de OB dans les deux dataframes
# enregistrer la date qui pose probleme d abord
date_cassepied = BCH_ob_best.iloc[0].datetime
# retirer la premiere observation de TRADES car delta_time non-définie pour le premier element
BCH_ob_best = BCH_ob_best.iloc[1:]
BCH_ob_best.reset_index(drop=True, inplace=True)
# du coup retirer aussi les equivalents dans TRADES, en utilisant la date sauvegardée 'date_cassepied'
BCH_trade = BCH_trade.loc[BCH_trade.corresp_OB_datetime!=np.datetime64(date_cassepied) , :]
BCH_trade.reset_index(drop=True, inplace=True)
# retirer tous les trades qui n'ont pas un corresp ob time inferieur ou egal à la minute
L = list((BCH_ob_best.loc[BCH_ob_best.delta_time>60,"datetime"]))
idx = BCH_trade.loc[BCH_trade.corresp_OB_datetime.isin(L)].index
BCH_trade.drop(idx, inplace=True)
BCH_trade.reset_index(drop=True, inplace=True)
# 2) We check whether the quote has changed through time and we compute the DURATION
# for this we use our previously computed delta times & use numpy for faster execution
A = np.array(BCH_ob_best.PA01)
B = np.array(BCH_ob_best.PB01)
T = np.array(BCH_ob_best.datetime)
D = np.array(BCH_ob_best.delta_time)
TW = np.zeros(T.shape)
MAX_DELTA_TIME = 60 # max 60 seconds between consecutive order book times
for index, (a, b, t, d) in enumerate(zip(A, B, T, D)):
if index == 0:
prev_PB = b
prev_PA = a
tw = 0
block_count = 0
else:
if (a != prev_PA or b != prev_PB
or index == len(T) - 1 or d > MAX_DELTA_TIME):
if block_count > 1:
x = TW[index -1]
TW[index - block_count:index] = x
tw = 0
block_count = 0
prev_PB = b
prev_PA = a
if d <= MAX_DELTA_TIME:
tw += d
else: # d > MAX_DELTA_TIME
tw = MAX_DELTA_TIME
block_count += 1
TW[index] = tw
BCH_ob_best.insert(len(BCH_ob_best.columns), 'tw', TW)
BCH_ob_best.drop(labels = ["delta_time"], axis = 1, inplace=True)
#################################################################################################################
### Calcul des proxys de liquidité EX ANTE
##################################################################################################################
# D'abord calculer le midpoint quote price
BCH_ob_best["midpoint"] = (BCH_ob_best["PA01"] + BCH_ob_best["PB01"]) / 2
# depth
BCH_ob_best["DEPTH"] = (BCH_ob_best["QA01"] + BCH_ob_best["QB01"]) / 2
# PQS" Proportional (percent) Quoted Spread "
BCH_ob_best["PQS"] = (BCH_ob_best["PA01"] - BCH_ob_best["PB01"]) / BCH_ob_best["midpoint"]
####################################################################################################################
### MERGE TRADE & OB
####################################################################################################################
#remove duplicates on datetimes, by taking the median of every feature distrib
BCH_ob_group = BCH_ob_best.groupby(["datetime"], as_index=False).median()
# fusion des tables
BCH_merged = pd.merge(left = BCH_trade, right = BCH_ob_group, how = "left",left_on = "corresp_OB_datetime", right_on = "datetime" )
BCH_merged.drop(labels = ["datetime_y"], axis = 1, inplace = True)
BCH_merged = BCH_merged.rename(columns={"datetime_x":"datetime"})
####################################################################################################################
### Calcul des proxys de liquidité EX POST
####################################################################################################################
# Proportional (percent) Effective Spread
BCH_merged["PES"] = (2 * BCH_merged.dir * (BCH_merged.price - BCH_merged.midpoint)) / BCH_merged.midpoint
# Proportional (percent) Trade Spread
BCH_merged["PTS"] = BCH_merged.PES / 2
#########################################################################################################
### BUNDLE data into 5 min intervals
##########################################################################################################
# donc si nous prenons des intervalles de 5 minutes nous aurons
# au minimumm des intervalles avec 5 trades
def FiveMinClassifier(instant):
discard = dt.timedelta( minutes = instant.minute % 5, seconds = instant.second)
instant -= discard
if discard <= dt.timedelta(minutes=5):
instant += dt.timedelta(minutes=5)
return instant
# proceed to bundle by 5min
# example: 00:05:00 interval will hold contain all trades from 00:00:00 up to 00:05:00
# remove interval with less than 3 trades
BCH_merged["interval"] = BCH_merged.corresp_OB_datetime.apply(FiveMinClassifier)
a = pd.DataFrame(BCH_merged.groupby(["interval"]).count()["price"])
a.reset_index(level=0, inplace=True)
S = list(a[a.price<3].interval)
BCH_merged = BCH_merged.loc[~BCH_merged.interval.isin(S)]
# os.chdir("/home/hubert/Downloads/Data Cleaned/proxys")
os.chdir("/home/hubert/data/Data Cleaned/proxys")
BCH_medianized_proxys = BCH_merged.groupby(["interval"]).median()
BCH_medianized_proxys.to_csv("BCH_med_prox", index=True)
| [
"pandas.read_csv",
"numpy.datetime64",
"pandas.merge",
"numpy.zeros",
"pandas.to_datetime",
"numpy.array",
"pandas.Series",
"datetime.timedelta",
"os.chdir"
] | [((620, 667), 'os.chdir', 'os.chdir', (['"""/home/hubert/data/Data Cleaned/best"""'], {}), "('/home/hubert/data/Data Cleaned/best')\n", (628, 667), False, 'import os\n'), ((682, 717), 'pandas.read_csv', 'pd.read_csv', (['"""BCH_ob_best"""'], {'sep': '""","""'}), "('BCH_ob_best', sep=',')\n", (693, 717), True, 'import pandas as pd\n'), ((772, 818), 'os.chdir', 'os.chdir', (['"""/home/hubert/data/Data Cleaned/raw"""'], {}), "('/home/hubert/data/Data Cleaned/raw')\n", (780, 818), False, 'import os\n'), ((831, 868), 'pandas.read_csv', 'pd.read_csv', (['"""BCH_trade.csv"""'], {'sep': '""","""'}), "('BCH_trade.csv', sep=',')\n", (842, 868), True, 'import pandas as pd\n'), ((1234, 1298), 'pandas.to_datetime', 'pd.to_datetime', (['BCH_ob_best.datetime'], {'format': '"""%d/%m/%Y %H:%M:%S"""'}), "(BCH_ob_best.datetime, format='%d/%m/%Y %H:%M:%S')\n", (1248, 1298), True, 'import pandas as pd\n'), ((1321, 1383), 'pandas.to_datetime', 'pd.to_datetime', (['BCH_trade.datetime'], {'format': '"""%d/%m/%Y %H:%M:%S"""'}), "(BCH_trade.datetime, format='%d/%m/%Y %H:%M:%S')\n", (1335, 1383), True, 'import pandas as pd\n'), ((2270, 2298), 'numpy.array', 'np.array', (['BCH_trade.datetime'], {}), '(BCH_trade.datetime)\n', (2278, 2298), True, 'import numpy as np\n'), ((2302, 2332), 'numpy.array', 'np.array', (['BCH_ob_best.datetime'], {}), '(BCH_ob_best.datetime)\n', (2310, 2332), True, 'import numpy as np\n'), ((2338, 2380), 'numpy.zeros', 'np.zeros', (['tt.shape'], {'dtype': '"""datetime64[ns]"""'}), "(tt.shape, dtype='datetime64[ns]')\n", (2346, 2380), True, 'import numpy as np\n'), ((2682, 2695), 'pandas.Series', 'pd.Series', (['nt'], {}), '(nt)\n', (2691, 2695), True, 'import pandas as pd\n'), ((4582, 4608), 'numpy.array', 'np.array', (['BCH_ob_best.PA01'], {}), '(BCH_ob_best.PA01)\n', (4590, 4608), True, 'import numpy as np\n'), ((4613, 4639), 'numpy.array', 'np.array', (['BCH_ob_best.PB01'], {}), '(BCH_ob_best.PB01)\n', (4621, 4639), True, 'import numpy as np\n'), ((4644, 4674), 'numpy.array', 'np.array', (['BCH_ob_best.datetime'], {}), '(BCH_ob_best.datetime)\n', (4652, 4674), True, 'import numpy as np\n'), ((4679, 4711), 'numpy.array', 'np.array', (['BCH_ob_best.delta_time'], {}), '(BCH_ob_best.delta_time)\n', (4687, 4711), True, 'import numpy as np\n'), ((4717, 4734), 'numpy.zeros', 'np.zeros', (['T.shape'], {}), '(T.shape)\n', (4725, 4734), True, 'import numpy as np\n'), ((6609, 6722), 'pandas.merge', 'pd.merge', ([], {'left': 'BCH_trade', 'right': 'BCH_ob_group', 'how': '"""left"""', 'left_on': '"""corresp_OB_datetime"""', 'right_on': '"""datetime"""'}), "(left=BCH_trade, right=BCH_ob_group, how='left', left_on=\n 'corresp_OB_datetime', right_on='datetime')\n", (6617, 6722), True, 'import pandas as pd\n'), ((8471, 8520), 'os.chdir', 'os.chdir', (['"""/home/hubert/data/Data Cleaned/proxys"""'], {}), "('/home/hubert/data/Data Cleaned/proxys')\n", (8479, 8520), False, 'import os\n'), ((7777, 7841), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(instant.minute % 5)', 'seconds': 'instant.second'}), '(minutes=instant.minute % 5, seconds=instant.second)\n', (7789, 7841), True, 'import datetime as dt\n'), ((7888, 7911), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (7900, 7911), True, 'import datetime as dt\n'), ((7932, 7955), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (7944, 7955), True, 'import datetime as dt\n'), ((2915, 2951), 'numpy.datetime64', 'np.datetime64', (['"""1970-01-01 00:00:00"""'], {}), "('1970-01-01 00:00:00')\n", (2928, 2951), True, 'import numpy as np\n'), ((4020, 4049), 'numpy.datetime64', 'np.datetime64', (['date_cassepied'], {}), '(date_cassepied)\n', (4033, 4049), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import argparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import h5py
import qusp
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="more verbose output")
parser.add_argument("--plate", type=int, default=None,
help="plate number")
parser.add_argument("--mjd", type=int, default=None,
help="mjd")
parser.add_argument("--fiber", type=int, default=None,
help="fiber id")
parser.add_argument("--target", type=str, default=None,
help="target string")
parser.add_argument("--wave-min", type=float, default=3600,
help="wavelength min")
parser.add_argument("--wave-max", type=float, default=10500,
help="wavelength max")
parser.add_argument("--outdir", type=str, default=None,
help="output filename")
parser.add_argument("--tpcorr", type=str, default=None,
help="throughput correction filename")
parser.add_argument("--target-index", type=int, default=None)
qusp.paths.Paths.add_args(parser)
qusp.target.add_args(parser)
args = parser.parse_args()
# setup boss data directory path
paths = qusp.Paths(**qusp.Paths.from_args(args))
# wavelength range limits
wave_min = qusp.wavelength.Wavelength(args.wave_min)
wave_max = qusp.wavelength.Wavelength(args.wave_max)
# read target
if args.targets and args.target_index is not None:
target_list = qusp.target.load_target_list_from_args(args)
target = target_list[args.target_index]
elif args.target:
target = qusp.Target.from_string(args.target)
elif args.plate and args.mjd and args.fiber:
target = qusp.Target.from_plate_mjd_fiber(args.plate, args.mjd, args.fiber)
else:
raise RuntimeError('Invalid target specification.')
# load target's spectrum
combined = qusp.target.get_combined_spectrum(target, paths)
fig = plt.figure(figsize=(14, 6))
badpixels = np.where(combined.ivar.values == 0)
plt.plot(combined.wavelength, combined.flux.values, color='black', lw=.5)#, marker='+', markersize=3, lw=0)
if args.tpcorr:
import h5py
import scipy.interpolate
tpcorr = h5py.File(args.tpcorr)
tpcorr_wave = tpcorr['wave'].value
tpcorr_value = tpcorr['/'.join(target.to_string().split('-'))].value
correction = scipy.interpolate.interp1d(tpcorr_wave, tpcorr_value, kind='linear', copy=False)
corrected = combined.create_corrected(correction)
plt.plot(corrected.wavelength, corrected.flux.values, color='blue', lw=.5)#, marker='+', markersize=3, lw=0)
#y_err = 1/np.sqrt(combined.ivar.values)
#y_err_lower = combined.flux.values - y_err
#y_err_upper = combined.flux.values + y_err
#plt.fill_between(combined.wavelength, y_err_lower, y_err_upper, facecolor='gray', alpha=.5, lw=0)
#plt.errorbar(combined.wavelength, combined.flux.values, y_err, color='blue', marker='+', ls='None', lw=.2, mew=0)
plt.xlim([wave_min, wave_max])
ymin = 0
ymax = 60
plt.ylim([ymin, ymax])
# quasar_lines = qusp.wavelength.load_wavelengths('quasar')
# redshifted_quasar_lines = []
# for line in quasar_lines:
# redshifted_quasar_lines.append(qusp.wavelength.LabeledWavelength(line*(1+target['z']), line.label))
# qusp.wavelength.draw_lines(redshifted_quasar_lines, 0.895, -0.05, ls='--', color='black')
# qusp.wavelength.draw_lines(qusp.wavelength.load_wavelengths('sky', ignore_labels=True),
# 0.01, 0.1, color='magenta', alpha=.3)
plt.title(target.to_string())
plt.ylabel(r'Flux $(10^{-17} erg/cm^2/s/\AA)$')
plt.xlabel(r'Observed Wavlength $(\AA)$')
plt.grid()
filename = target.to_string()+'.png'
if args.outdir:
filename = os.path.join(args.output, filename)
fig.savefig(filename, bbox_inches='tight')
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"qusp.wavelength.Wavelength",
"matplotlib.pyplot.figure",
"qusp.Paths.from_args",
"qusp.target.add_args",
"qusp.Target.from_plate_mjd_fiber",
"h5py.File",
"matplotlib.pyplot.ylim",
"qusp.paths.Paths.add_args",
"qusp.target.get_combined_spectrum",
"qusp.target.load_targ... | [((84, 98), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (91, 98), True, 'import matplotlib as mpl\n'), ((219, 298), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (242, 298), False, 'import argparse\n'), ((1201, 1234), 'qusp.paths.Paths.add_args', 'qusp.paths.Paths.add_args', (['parser'], {}), '(parser)\n', (1226, 1234), False, 'import qusp\n'), ((1239, 1267), 'qusp.target.add_args', 'qusp.target.add_args', (['parser'], {}), '(parser)\n', (1259, 1267), False, 'import qusp\n'), ((1436, 1477), 'qusp.wavelength.Wavelength', 'qusp.wavelength.Wavelength', (['args.wave_min'], {}), '(args.wave_min)\n', (1462, 1477), False, 'import qusp\n'), ((1493, 1534), 'qusp.wavelength.Wavelength', 'qusp.wavelength.Wavelength', (['args.wave_max'], {}), '(args.wave_max)\n', (1519, 1534), False, 'import qusp\n'), ((2048, 2096), 'qusp.target.get_combined_spectrum', 'qusp.target.get_combined_spectrum', (['target', 'paths'], {}), '(target, paths)\n', (2081, 2096), False, 'import qusp\n'), ((2108, 2135), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 6)'}), '(figsize=(14, 6))\n', (2118, 2135), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2188), 'numpy.where', 'np.where', (['(combined.ivar.values == 0)'], {}), '(combined.ivar.values == 0)\n', (2161, 2188), True, 'import numpy as np\n'), ((2194, 2268), 'matplotlib.pyplot.plot', 'plt.plot', (['combined.wavelength', 'combined.flux.values'], {'color': '"""black"""', 'lw': '(0.5)'}), "(combined.wavelength, combined.flux.values, color='black', lw=0.5)\n", (2202, 2268), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3212), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[wave_min, wave_max]'], {}), '([wave_min, wave_max])\n', (3190, 3212), True, 'import matplotlib.pyplot as plt\n'), ((3244, 3266), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[ymin, ymax]'], {}), '([ymin, ymax])\n', (3252, 3266), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux $(10^{-17} erg/cm^2/s/\\\\AA)$"""'], {}), "('Flux $(10^{-17} erg/cm^2/s/\\\\AA)$')\n", (3797, 3834), True, 'import matplotlib.pyplot as plt\n'), ((3839, 3880), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observed Wavlength $(\\\\AA)$"""'], {}), "('Observed Wavlength $(\\\\AA)$')\n", (3849, 3880), True, 'import matplotlib.pyplot as plt\n'), ((3886, 3896), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3894, 3896), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1675), 'qusp.target.load_target_list_from_args', 'qusp.target.load_target_list_from_args', (['args'], {}), '(args)\n', (1669, 1675), False, 'import qusp\n'), ((2393, 2415), 'h5py.File', 'h5py.File', (['args.tpcorr'], {}), '(args.tpcorr)\n', (2402, 2415), False, 'import h5py\n'), ((2704, 2779), 'matplotlib.pyplot.plot', 'plt.plot', (['corrected.wavelength', 'corrected.flux.values'], {'color': '"""blue"""', 'lw': '(0.5)'}), "(corrected.wavelength, corrected.flux.values, color='blue', lw=0.5)\n", (2712, 2779), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1388), 'qusp.Paths.from_args', 'qusp.Paths.from_args', (['args'], {}), '(args)\n', (1382, 1388), False, 'import qusp\n'), ((1763, 1799), 'qusp.Target.from_string', 'qusp.Target.from_string', (['args.target'], {}), '(args.target)\n', (1786, 1799), False, 'import qusp\n'), ((1866, 1932), 'qusp.Target.from_plate_mjd_fiber', 'qusp.Target.from_plate_mjd_fiber', (['args.plate', 'args.mjd', 'args.fiber'], {}), '(args.plate, args.mjd, args.fiber)\n', (1898, 1932), False, 'import qusp\n')] |
#!/usr/bin/env python
import pytest
from pytest import approx
import xarray
import numpy as np
from pathlib import Path
from datetime import datetime
import georinex as gr
#
R = Path(__file__).parent / 'data'
def test_blank(tmp_path):
fn = R/'blank3.10o'
obs = gr.load(fn)
assert obs is None
outdir = tmp_path
gr.load(fn, outdir)
times = gr.gettime(fn)
assert times is None
def test_minimal(tmp_path):
fn = R/'minimal3.10o'
obs = gr.load(fn)
assert isinstance(obs, xarray.Dataset), f'{type(obs)} should be xarray.Dataset'
outdir = tmp_path
gr.load(fn, outdir)
outfn = (outdir / (fn.name + '.nc'))
assert outfn.is_file()
assert obs.equals(gr.load(outfn)), f'{outfn} {fn}'
times = gr.gettime(fn)
assert np.isnan(times.interval)
def test_meas():
"""
test specifying specific measurements (usually only a few of the thirty or so are needed)
"""
fn = R/'demo3.10o'
obs = gr.load(fn)
for v in ['L1C', 'L2P', 'C1P', 'C2P', 'C1C', 'S1C', 'S1P', 'S2P']:
assert v in obs
assert len(obs.data_vars) == 8
# %% one measurement
obs = gr.load(fn, meas='C1C')
assert 'L1C' not in obs
C1C = obs['C1C']
assert C1C.shape == (2, 14) # two times, 14 SVs overall for all systems in this file
assert (C1C.sel(sv='G07') == approx([22227666.76, 25342359.37])).all()
# %% two NON-SEQUENTIAL measurements
obs = gr.load(fn, meas=['L1C', 'S1C'])
assert 'L2P' not in obs
L1C = obs['L1C']
assert L1C.shape == (2, 14)
assert (L1C.sel(sv='G07') == approx([118767195.32608, 133174968.81808])).all()
S1C = obs['S1C']
assert S1C.shape == (2, 14)
assert (S1C.sel(sv='R23') == approx([39., 79.])).all()
assert not C1C.equals(L1C)
# %% measurement not in some systems
obs = gr.load(fn, meas=['S2P'])
assert 'L2P' not in obs
S2P = obs['S2P']
assert S2P.shape == (2, 14)
assert (S2P.sel(sv='G13') == approx([40., 80.])).all()
# satellites that don't have a measurement are NaN
# either because they weren't visible at that time
# or simply do not make that kind of measurement at all
R23 = S2P.sel(sv='R23')
assert np.isnan(R23).all()
# %% measurement not in any system
obs = gr.load(fn, meas='nonsense')
assert 'nonsense' not in obs
assert len(obs.data_vars) == 0
# %% wildcard
obs = gr.load(fn, meas='C')
assert 'L1C' not in obs
assert 'C1P' in obs and 'C2P' in obs and 'C1C' in obs
assert len(obs.data_vars) == 3
def test_zip():
fn = R/'ABMF00GLP_R_20181330000_01D_30S_MO.zip'
obs = gr.load(fn)
assert (obs.sv.values == ['E04', 'E09', 'E12', 'E24', 'G02', 'G05', 'G06', 'G07', 'G09', 'G12', 'G13',
'G17', 'G19', 'G25', 'G30', 'R01', 'R02', 'R08', 'R22', 'R23', 'R24', 'S20',
'S31', 'S35', 'S38']).all()
times = gr.gettime(fn).values.astype('datetime64[us]').astype(datetime)
assert (times == [datetime(2018, 5, 13, 1, 30), datetime(2018, 5, 13, 1, 30, 30), datetime(2018, 5, 13, 1, 31)]).all()
hdr = gr.rinexheader(fn)
assert hdr['t0'] <= times[0]
def test_tlim():
fn = R/'CEDA00USA_R_20182100000_23H_15S_MO.rnx.gz'
obs = gr.load(fn, tlim=('2018-07-29T01:17', '2018-07-29T01:18'))
times = obs.time.values.astype('datetime64[us]').astype(datetime)
assert (times == [datetime(2018, 7, 29, 1, 17), datetime(2018, 7, 29, 1, 17, 15),
datetime(2018, 7, 29, 1, 17, 45), datetime(2018, 7, 29, 1, 18)]).all()
def test_bad_system():
with pytest.raises(KeyError):
gr.load(R/'demo3.10o', use='Z')
with pytest.raises(KeyError):
gr.load(R/'demo3.10o', use=['Z', 'Y'])
def test_one_system():
"""
./ReadRinex.py -q tests/demo3.10o -u G -o r3G.nc
"""
pytest.importorskip('netCDF4')
truth = xarray.open_dataset(R/'r3G.nc', group='OBS', autoclose=True)
for u in ('G', ['G']):
obs = gr.load(R/'demo3.10o', use=u)
assert obs.equals(truth)
assert obs.position == approx([4789028.4701, 176610.0133, 4195017.031])
try:
assert obs.position_geodetic == approx([41.38871005, 2.11199932, 166.25085213])
except AttributeError: # no pymap3d
pass
def test_multi_system():
"""
./ReadRinex.py -q tests/demo3.10o -u G R -o r3GR.nc
"""
pytest.importorskip('netCDF4')
use = ('G', 'R')
obs = gr.load(R/'demo3.10o', use=use)
truth = xarray.open_dataset(R/'r3GR.nc', group='OBS', autoclose=True)
assert obs.equals(truth)
def test_all_system():
"""
./ReadRinex.py -q tests/demo3.10o -o r3all.nc
"""
pytest.importorskip('netCDF4')
obs = gr.load(R/'demo3.10o')
truth = gr.rinexobs(R/'r3all.nc', group='OBS')
assert obs.equals(truth)
def tests_all_indicators():
"""
./ReadRinex.py -q tests/demo3.10o -useindicators -o r3all_indicators.nc
"""
pytest.importorskip('netCDF4')
obs = gr.load(R/'demo3.10o', useindicators=True)
truth = gr.rinexobs(R/'r3all_indicators.nc', group='OBS')
assert obs.equals(truth)
def test_time_system_determination():
obs = gr.load(R/"demo3.10o")
assert obs.attrs['time_system'] == 'GPS'
obs = gr.load(R/'default_time_system3.10o')
assert obs.attrs['time_system'] == 'GAL'
if __name__ == '__main__':
pytest.main(['-x', __file__])
| [
"pytest.importorskip",
"georinex.rinexobs",
"xarray.open_dataset",
"numpy.isnan",
"pytest.main",
"georinex.load",
"georinex.gettime",
"pathlib.Path",
"pytest.raises",
"datetime.datetime",
"georinex.rinexheader",
"pytest.approx"
] | [((271, 282), 'georinex.load', 'gr.load', (['fn'], {}), '(fn)\n', (278, 282), True, 'import georinex as gr\n'), ((333, 352), 'georinex.load', 'gr.load', (['fn', 'outdir'], {}), '(fn, outdir)\n', (340, 352), True, 'import georinex as gr\n'), ((366, 380), 'georinex.gettime', 'gr.gettime', (['fn'], {}), '(fn)\n', (376, 380), True, 'import georinex as gr\n'), ((472, 483), 'georinex.load', 'gr.load', (['fn'], {}), '(fn)\n', (479, 483), True, 'import georinex as gr\n'), ((595, 614), 'georinex.load', 'gr.load', (['fn', 'outdir'], {}), '(fn, outdir)\n', (602, 614), True, 'import georinex as gr\n'), ((753, 767), 'georinex.gettime', 'gr.gettime', (['fn'], {}), '(fn)\n', (763, 767), True, 'import georinex as gr\n'), ((779, 803), 'numpy.isnan', 'np.isnan', (['times.interval'], {}), '(times.interval)\n', (787, 803), True, 'import numpy as np\n'), ((966, 977), 'georinex.load', 'gr.load', (['fn'], {}), '(fn)\n', (973, 977), True, 'import georinex as gr\n'), ((1139, 1162), 'georinex.load', 'gr.load', (['fn'], {'meas': '"""C1C"""'}), "(fn, meas='C1C')\n", (1146, 1162), True, 'import georinex as gr\n'), ((1426, 1458), 'georinex.load', 'gr.load', (['fn'], {'meas': "['L1C', 'S1C']"}), "(fn, meas=['L1C', 'S1C'])\n", (1433, 1458), True, 'import georinex as gr\n'), ((1817, 1842), 'georinex.load', 'gr.load', (['fn'], {'meas': "['S2P']"}), "(fn, meas=['S2P'])\n", (1824, 1842), True, 'import georinex as gr\n'), ((2258, 2286), 'georinex.load', 'gr.load', (['fn'], {'meas': '"""nonsense"""'}), "(fn, meas='nonsense')\n", (2265, 2286), True, 'import georinex as gr\n'), ((2380, 2401), 'georinex.load', 'gr.load', (['fn'], {'meas': '"""C"""'}), "(fn, meas='C')\n", (2387, 2401), True, 'import georinex as gr\n'), ((2603, 2614), 'georinex.load', 'gr.load', (['fn'], {}), '(fn)\n', (2610, 2614), True, 'import georinex as gr\n'), ((3101, 3119), 'georinex.rinexheader', 'gr.rinexheader', (['fn'], {}), '(fn)\n', (3115, 3119), True, 'import georinex as gr\n'), ((3237, 3295), 'georinex.load', 'gr.load', (['fn'], {'tlim': "('2018-07-29T01:17', '2018-07-29T01:18')"}), "(fn, tlim=('2018-07-29T01:17', '2018-07-29T01:18'))\n", (3244, 3295), True, 'import georinex as gr\n'), ((3827, 3857), 'pytest.importorskip', 'pytest.importorskip', (['"""netCDF4"""'], {}), "('netCDF4')\n", (3846, 3857), False, 'import pytest\n'), ((3871, 3933), 'xarray.open_dataset', 'xarray.open_dataset', (["(R / 'r3G.nc')"], {'group': '"""OBS"""', 'autoclose': '(True)'}), "(R / 'r3G.nc', group='OBS', autoclose=True)\n", (3890, 3933), False, 'import xarray\n'), ((4369, 4399), 'pytest.importorskip', 'pytest.importorskip', (['"""netCDF4"""'], {}), "('netCDF4')\n", (4388, 4399), False, 'import pytest\n'), ((4433, 4466), 'georinex.load', 'gr.load', (["(R / 'demo3.10o')"], {'use': 'use'}), "(R / 'demo3.10o', use=use)\n", (4440, 4466), True, 'import georinex as gr\n'), ((4477, 4540), 'xarray.open_dataset', 'xarray.open_dataset', (["(R / 'r3GR.nc')"], {'group': '"""OBS"""', 'autoclose': '(True)'}), "(R / 'r3GR.nc', group='OBS', autoclose=True)\n", (4496, 4540), False, 'import xarray\n'), ((4664, 4694), 'pytest.importorskip', 'pytest.importorskip', (['"""netCDF4"""'], {}), "('netCDF4')\n", (4683, 4694), False, 'import pytest\n'), ((4706, 4730), 'georinex.load', 'gr.load', (["(R / 'demo3.10o')"], {}), "(R / 'demo3.10o')\n", (4713, 4730), True, 'import georinex as gr\n'), ((4741, 4781), 'georinex.rinexobs', 'gr.rinexobs', (["(R / 'r3all.nc')"], {'group': '"""OBS"""'}), "(R / 'r3all.nc', group='OBS')\n", (4752, 4781), True, 'import georinex as gr\n'), ((4936, 4966), 'pytest.importorskip', 'pytest.importorskip', (['"""netCDF4"""'], {}), "('netCDF4')\n", (4955, 4966), False, 'import pytest\n'), ((4978, 5022), 'georinex.load', 'gr.load', (["(R / 'demo3.10o')"], {'useindicators': '(True)'}), "(R / 'demo3.10o', useindicators=True)\n", (4985, 5022), True, 'import georinex as gr\n'), ((5033, 5084), 'georinex.rinexobs', 'gr.rinexobs', (["(R / 'r3all_indicators.nc')"], {'group': '"""OBS"""'}), "(R / 'r3all_indicators.nc', group='OBS')\n", (5044, 5084), True, 'import georinex as gr\n'), ((5163, 5187), 'georinex.load', 'gr.load', (["(R / 'demo3.10o')"], {}), "(R / 'demo3.10o')\n", (5170, 5187), True, 'import georinex as gr\n'), ((5242, 5281), 'georinex.load', 'gr.load', (["(R / 'default_time_system3.10o')"], {}), "(R / 'default_time_system3.10o')\n", (5249, 5281), True, 'import georinex as gr\n'), ((5358, 5387), 'pytest.main', 'pytest.main', (["['-x', __file__]"], {}), "(['-x', __file__])\n", (5369, 5387), False, 'import pytest\n'), ((178, 192), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (182, 192), False, 'from pathlib import Path\n'), ((706, 720), 'georinex.load', 'gr.load', (['outfn'], {}), '(outfn)\n', (713, 720), True, 'import georinex as gr\n'), ((3581, 3604), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (3594, 3604), False, 'import pytest\n'), ((3614, 3647), 'georinex.load', 'gr.load', (["(R / 'demo3.10o')"], {'use': '"""Z"""'}), "(R / 'demo3.10o', use='Z')\n", (3621, 3647), True, 'import georinex as gr\n'), ((3656, 3679), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (3669, 3679), False, 'import pytest\n'), ((3689, 3729), 'georinex.load', 'gr.load', (["(R / 'demo3.10o')"], {'use': "['Z', 'Y']"}), "(R / 'demo3.10o', use=['Z', 'Y'])\n", (3696, 3729), True, 'import georinex as gr\n'), ((3974, 4005), 'georinex.load', 'gr.load', (["(R / 'demo3.10o')"], {'use': 'u'}), "(R / 'demo3.10o', use=u)\n", (3981, 4005), True, 'import georinex as gr\n'), ((4065, 4113), 'pytest.approx', 'approx', (['[4789028.4701, 176610.0133, 4195017.031]'], {}), '([4789028.4701, 176610.0133, 4195017.031])\n', (4071, 4113), False, 'from pytest import approx\n'), ((2193, 2206), 'numpy.isnan', 'np.isnan', (['R23'], {}), '(R23)\n', (2201, 2206), True, 'import numpy as np\n'), ((4163, 4210), 'pytest.approx', 'approx', (['[41.38871005, 2.11199932, 166.25085213]'], {}), '([41.38871005, 2.11199932, 166.25085213])\n', (4169, 4210), False, 'from pytest import approx\n'), ((1337, 1371), 'pytest.approx', 'approx', (['[22227666.76, 25342359.37]'], {}), '([22227666.76, 25342359.37])\n', (1343, 1371), False, 'from pytest import approx\n'), ((1574, 1616), 'pytest.approx', 'approx', (['[118767195.32608, 133174968.81808]'], {}), '([118767195.32608, 133174968.81808])\n', (1580, 1616), False, 'from pytest import approx\n'), ((1712, 1732), 'pytest.approx', 'approx', (['[39.0, 79.0]'], {}), '([39.0, 79.0])\n', (1718, 1732), False, 'from pytest import approx\n'), ((1958, 1978), 'pytest.approx', 'approx', (['[40.0, 80.0]'], {}), '([40.0, 80.0])\n', (1964, 1978), False, 'from pytest import approx\n'), ((2988, 3016), 'datetime.datetime', 'datetime', (['(2018)', '(5)', '(13)', '(1)', '(30)'], {}), '(2018, 5, 13, 1, 30)\n', (2996, 3016), False, 'from datetime import datetime\n'), ((3018, 3050), 'datetime.datetime', 'datetime', (['(2018)', '(5)', '(13)', '(1)', '(30)', '(30)'], {}), '(2018, 5, 13, 1, 30, 30)\n', (3026, 3050), False, 'from datetime import datetime\n'), ((3053, 3081), 'datetime.datetime', 'datetime', (['(2018)', '(5)', '(13)', '(1)', '(31)'], {}), '(2018, 5, 13, 1, 31)\n', (3061, 3081), False, 'from datetime import datetime\n'), ((3390, 3418), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(29)', '(1)', '(17)'], {}), '(2018, 7, 29, 1, 17)\n', (3398, 3418), False, 'from datetime import datetime\n'), ((3420, 3452), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(29)', '(1)', '(17)', '(15)'], {}), '(2018, 7, 29, 1, 17, 15)\n', (3428, 3452), False, 'from datetime import datetime\n'), ((3476, 3508), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(29)', '(1)', '(17)', '(45)'], {}), '(2018, 7, 29, 1, 17, 45)\n', (3484, 3508), False, 'from datetime import datetime\n'), ((3510, 3538), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(29)', '(1)', '(18)'], {}), '(2018, 7, 29, 1, 18)\n', (3518, 3538), False, 'from datetime import datetime\n'), ((2901, 2915), 'georinex.gettime', 'gr.gettime', (['fn'], {}), '(fn)\n', (2911, 2915), True, 'import georinex as gr\n')] |
import scipy.signal as sig
import pywt
import numpy as np
from scipy.io import loadmat, savemat
import os
def ecg_preprocessing(data, wfun='db6', levels=9, type=2):
# data is the row data
# 第一步:去除基线漂移---此噪声一般小于0.5HZ, 去除一般噪声,经验值50HZ(注意这两个值可以调整)
filter_sig = bandpass_filter(
data, low_cutoff=0.5, high_cutoff=50, sampling_frequency=500, filter_order=1)
# 第二步:小波降噪
levels = min(levels, pywt.dwt_max_level(data.shape[0], pywt.Wavelet(wfun)))
if type == 1:
# 论文:ECG beat classification using PCA, LDA, ICA and discrete wavelet transform
# 用db6小波对信号进行9级小波分解,去除D1,D2,A9分量,使用剩下的分量进行重构,得到滤波后的信号。
# 这种降噪方式理论上就是带通滤波器,所以测试精度可以使用type=1或者type=0加上带通进行测试
coeffs = pywt.wavedec(filter_sig, wfun, level=levels)
coeffs[-1] = np.zeros(len(coeffs[-1]))
#coeffs[-2] = np.zeros(len(coeffs[-2]))
coeffs[0] = np.zeros(len(coeffs[0]))
processed_sig = pywt.waverec(coeffs, wfun)
elif type == 2:
processed_sig = filter_sig
else:
# 论文:Application of deep convolutional neural network for automated detection of myocardial infarction using ECG signals
# 估计噪声,利用Donoho的估计公式
coef = pywt.wavedec(filter_sig, 'sym8', level=2)
Sigma = np.median(np.abs(coef[-1] - np.median(coef[-1]))) / 0.6745
# 小波降噪
coeffs = pywt.wavedec(filter_sig, wfun, level=levels)
thresh = Sigma * np.sqrt(2 * np.log(filter_sig.shape[0]))
for i in range(len(coeffs)):
coeffs[i] = pywt.threshold(
coeffs[i], thresh, 'soft') # 还可以用'hard',
processed_sig = pywt.waverec(coeffs, wfun)
return processed_sig
def bandpass_filter(data, low_cutoff, high_cutoff, sampling_frequency, filter_order):
nyquist_frequency = sampling_frequency / 2
low = low_cutoff / nyquist_frequency
high = high_cutoff / nyquist_frequency
b, a = sig.butter(filter_order, [low, high], btype="band")
filter_sig = sig.lfilter(b, a, data)
return filter_sig
data_path = '../TRAIN/'
save_path = './Output/'
if not os.path.exists(save_path):
os.makedirs(save_path)
dirs = os.listdir(data_path)
for filename in dirs:
print('Processing: ' + filename)
m = loadmat(os.path.join(data_path, filename))
data = m['data']
for i in range(data.shape[0]):
data[i, :] = ecg_preprocessing(
data[i, :], wfun='db6', levels=9, type=1)
# (name, extension) = os.path.splitext(filename)
savemat(os.path.join(save_path, filename), {'data': data})
| [
"pywt.Wavelet",
"pywt.threshold",
"os.makedirs",
"numpy.log",
"scipy.signal.lfilter",
"pywt.wavedec",
"numpy.median",
"os.path.exists",
"pywt.waverec",
"os.path.join",
"os.listdir",
"scipy.signal.butter"
] | [((2190, 2211), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (2200, 2211), False, 'import os\n'), ((1946, 1997), 'scipy.signal.butter', 'sig.butter', (['filter_order', '[low, high]'], {'btype': '"""band"""'}), "(filter_order, [low, high], btype='band')\n", (1956, 1997), True, 'import scipy.signal as sig\n'), ((2016, 2039), 'scipy.signal.lfilter', 'sig.lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2027, 2039), True, 'import scipy.signal as sig\n'), ((2125, 2150), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (2139, 2150), False, 'import os\n'), ((2157, 2179), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (2168, 2179), False, 'import os\n'), ((741, 785), 'pywt.wavedec', 'pywt.wavedec', (['filter_sig', 'wfun'], {'level': 'levels'}), '(filter_sig, wfun, level=levels)\n', (753, 785), False, 'import pywt\n'), ((954, 980), 'pywt.waverec', 'pywt.waverec', (['coeffs', 'wfun'], {}), '(coeffs, wfun)\n', (966, 980), False, 'import pywt\n'), ((2290, 2323), 'os.path.join', 'os.path.join', (['data_path', 'filename'], {}), '(data_path, filename)\n', (2302, 2323), False, 'import os\n'), ((2546, 2579), 'os.path.join', 'os.path.join', (['save_path', 'filename'], {}), '(save_path, filename)\n', (2558, 2579), False, 'import os\n'), ((469, 487), 'pywt.Wavelet', 'pywt.Wavelet', (['wfun'], {}), '(wfun)\n', (481, 487), False, 'import pywt\n'), ((1225, 1266), 'pywt.wavedec', 'pywt.wavedec', (['filter_sig', '"""sym8"""'], {'level': '(2)'}), "(filter_sig, 'sym8', level=2)\n", (1237, 1266), False, 'import pywt\n'), ((1377, 1421), 'pywt.wavedec', 'pywt.wavedec', (['filter_sig', 'wfun'], {'level': 'levels'}), '(filter_sig, wfun, level=levels)\n', (1389, 1421), False, 'import pywt\n'), ((1652, 1678), 'pywt.waverec', 'pywt.waverec', (['coeffs', 'wfun'], {}), '(coeffs, wfun)\n', (1664, 1678), False, 'import pywt\n'), ((1552, 1593), 'pywt.threshold', 'pywt.threshold', (['coeffs[i]', 'thresh', '"""soft"""'], {}), "(coeffs[i], thresh, 'soft')\n", (1566, 1593), False, 'import pywt\n'), ((1460, 1487), 'numpy.log', 'np.log', (['filter_sig.shape[0]'], {}), '(filter_sig.shape[0])\n', (1466, 1487), True, 'import numpy as np\n'), ((1312, 1331), 'numpy.median', 'np.median', (['coef[-1]'], {}), '(coef[-1])\n', (1321, 1331), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
from subs import create_object,ask_for_steps
from AnalysisFunctions import fcorr
rc=create_object()
rc.vars2load(['bx','by'])
ofile=open('intscale.'+rc.dirname+'.dat','w')
ie=1/np.e
bs,fs,step=ask_for_steps(rc.numslices)
for i in range(bs,fs,step):
print(i)
rc.loadslice(i)
rx,bxcor=fcorr(rc.bx,rc.bx,ax=0,dx=rc.dx)
# lxc=rx[abs(bxcor-ie).argmin()]
lxint=np.sum(bxcor)*rc.dx
ry,bycor=fcorr(rc.by,rc.by,ax=1,dx=rc.dy)
# lyc=ry[abs(bycor-ie).argmin()]
lyint=np.sum(bycor)*rc.dy
print(rc.time,lxint,lyint,0.5*(lxint+lyint), file=ofile)
ofile.close()
| [
"subs.create_object",
"numpy.sum",
"subs.ask_for_steps",
"AnalysisFunctions.fcorr"
] | [((126, 141), 'subs.create_object', 'create_object', ([], {}), '()\n', (139, 141), False, 'from subs import create_object, ask_for_steps\n'), ((235, 262), 'subs.ask_for_steps', 'ask_for_steps', (['rc.numslices'], {}), '(rc.numslices)\n', (248, 262), False, 'from subs import create_object, ask_for_steps\n'), ((334, 369), 'AnalysisFunctions.fcorr', 'fcorr', (['rc.bx', 'rc.bx'], {'ax': '(0)', 'dx': 'rc.dx'}), '(rc.bx, rc.bx, ax=0, dx=rc.dx)\n', (339, 369), False, 'from AnalysisFunctions import fcorr\n'), ((442, 477), 'AnalysisFunctions.fcorr', 'fcorr', (['rc.by', 'rc.by'], {'ax': '(1)', 'dx': 'rc.dy'}), '(rc.by, rc.by, ax=1, dx=rc.dy)\n', (447, 477), False, 'from AnalysisFunctions import fcorr\n'), ((410, 423), 'numpy.sum', 'np.sum', (['bxcor'], {}), '(bxcor)\n', (416, 423), True, 'import numpy as np\n'), ((518, 531), 'numpy.sum', 'np.sum', (['bycor'], {}), '(bycor)\n', (524, 531), True, 'import numpy as np\n')] |
'''
data/read.py
2018. 11. 22
데이터셋 이름을 입력받아 데이터를 불러오는 기능
'''
import os
import sys
import csv
import importlib
import numpy as np
from scipy.io import arff
import pickle_loader as pkl_loader
from ds import Pair, Data
from sampling import smote_dataset
# util - absolute directory of current file
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(dir_path, ".."))
# Primitive function that read data file
def _read_csv(filename, basedir="."):
filedir = os.path.join(basedir, filename)
with open(filedir, 'r') as csv_f:
data = list(csv.reader(csv_f))
# handles the data that have null column
for i, row in enumerate(data):
for j, item in enumerate(row):
data[i][j] = '0.0' if item == '' else item
return data
def _read_arff(filename, basedir="."):
filedir = os.path.join(basedir, filename)
data, meta = arff.loadarff(filedir)
return data
# wrapping function that handles specific dataset
def _read_secom():
# unsplitted data
# 1568 -> 1199 + 368
working_dir = os.path.join(dir_path, "uci-secom")
uci_secom_data = []
for filename in os.listdir(working_dir):
uci_secom_data.extend(_read_csv(filename, working_dir))
uci_secom_data = np.asarray(uci_secom_data)
# first line has label name, so it will be removed.
# secom data has time column(at first), so we will erase it
train_data = uci_secom_data[1:1200, 1:-1].astype(float)
test_data = uci_secom_data[1200:, 1:-1].astype(float)
train_labels = (uci_secom_data[1:1200, -1].astype(int) + 1) / 2
test_labels = (uci_secom_data[1200:, -1].astype(int) + 1) / 2
return Data(train_data, train_labels, test_data, test_labels)
def _read_secom_preprocessed(process_type: str = None):
working_dir = os.path.join(dir_path, "uci-secom-preprocessed")
data = Data(None, None, None, None)
for filename in os.listdir(working_dir):
# common : labels
if filename == "train.labels.csv":
data.train.labels = np.asarray(_read_csv(filename, working_dir)).astype(int).flatten()
if filename == "test.labels.csv":
data.test.labels = np.asarray(_read_csv(filename, working_dir)).astype(int).flatten()
if process_type == None:
if filename == "train_knnImpute.csv":
data.train.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
if filename == "test_knnImpute.csv":
data.test.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
if process_type == "pca":
if filename == "train_pca.csv":
data.train.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
if filename == "test_pca.csv":
data.test.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
if process_type == "ica":
if filename == "train_ica.csv":
data.train.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
if filename == "test_ica.csv":
data.test.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
if process_type == "chisq":
if filename == "train_chisq.csv":
data.train.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
if filename == "test_chisq.csv":
data.test.data = np.asarray(_read_csv(filename, working_dir)).astype(float)
return data
def _read_wafer():
# train data 6164 * 152
# test data 1000 * 152
# train labels 6164
# test labels 1000
working_dir = os.path.join(dir_path, "wafer")
wafer_data = {}
for filename in os.listdir(working_dir):
if ".arff" in filename:
# there are some other formatted data(e.g. txt or md), so filter
if "TEST" in filename:
# add test data
wafer_data["test"] = _read_arff(filename, working_dir)
if "TRAIN" in filename:
# add train data
wafer_data["train"] = _read_arff(filename, working_dir)
train_data = np.asarray(list(map(lambda x: list(x)[:-1], wafer_data["train"])))
test_data = np.asarray(list(map(lambda x: list(x)[:-1], wafer_data["test"])))
train_labels = (1 - np.asarray(list(map(lambda x: int(x[-1]), wafer_data["train"])))) / 2
test_labels = (1 - np.asarray(list(map(lambda x: int(x[-1]), wafer_data["test"])))) / 2
return Data(train_data, train_labels, test_data, test_labels)
def _read_earthquakes():
# train_data 322 * 512
# test_data 139 * 512
# train_labels 322
# test_labels 139
working_dir = os.path.join(dir_path, "earthquakes")
wafer_data = {}
for filename in os.listdir(working_dir):
if ".arff" in filename:
# there are some other formatted data(e.g. txt or md), so filter
if "TEST" in filename:
# add test data
wafer_data["test"] = _read_arff(filename, working_dir)
if "TRAIN" in filename:
# add train data
wafer_data["train"] = _read_arff(filename, working_dir)
train_data = np.asarray(list(map(lambda x: list(x)[:-1], wafer_data["train"])))
test_data = np.asarray(list(map(lambda x: list(x)[:-1], wafer_data["test"])))
train_labels = np.asarray(list(map(lambda x: int(x[-1]), wafer_data["train"])))
test_labels = np.asarray(list(map(lambda x: int(x[-1]), wafer_data["test"])))
return Data(train_data, train_labels, test_data, test_labels)
'''
def _read_cmu_wafer():
normal_dir = os.path.join(dir_path, "cmu-wafer", "normal")
abnormal_dir = os.path.join(dir_path, "cmu-wafer", "abnormal")
def parse_filename(filename: str):
return filename.split(".")
def dtoi(desc: str):
return ('6', '7', '8', '11', '12', '15').index(desc)
def read_file(filedir):
with open(filedir, 'r') as f:
r = csv.reader(f, delimiter='\t')
try:
return [line[1] for line in r]
except:
print(filedir)
return
def read_abnormal():
data = {}
labels = {}
for filename in os.listdir(abnormal_dir):
filedir = os.path.join(abnormal_dir, filename)
run_wafer, desc = parse_filename(filename)
if desc not in ('6', '7', '8', '11', '12', '15'):
continue
if not run_wafer in data.keys():
data[run_wafer] = [None] * 6
data[run_wafer][dtoi(desc)] = read_file(filedir)
labels[run_wafer] = 1
return data, labels
def read_normal():
data = {}
labels = {}
for filename in os.listdir(normal_dir):
filedir = os.path.join(normal_dir, filename)
run_wafer, desc = parse_filename(filename)
if desc not in ('6', '7', '8', '11', '12', '15'):
continue
if not run_wafer in data.keys():
data[run_wafer] = [None] * 6
data[run_wafer][dtoi(desc)] = read_file(filedir)
labels[run_wafer] = 0
return data, labels
ab_data, ab_labels = read_abnormal()
no_data, no_labels = read_normal()
# merge normal & abnormal data
data_dict = {**ab_data, **no_data}
labels_dict = {**ab_labels, **no_labels}
# integrity check
assert data_dict.keys() == labels_dict.keys()
data = []
labels = []
for key in sorted(data_dict.keys()):
# truncate first 100 elements from each series
data.append(np.asarray(data_dict[key])[:, :100])
labels.append(np.asarray(labels_dict[key]))
data = np.array(data)
labels = np.reshape(labels, -1)
np.random.seed(0)
x = np.arange(len(data))
np.random.shuffle(x)
data = data[x]
labels = labels[x]
train_data = data[:800]
test_data = data[800:]
train_labels = labels[:800]
test_labels = labels[800:]
return Data(train_data, train_labels, test_data, test_labels)
'''
def _read_cmu_wafer():
# load faster using pickle
data = Data(None, None, None, None)
data.train.data = pkl_loader.pkl2np("train_data.pkl").astype(int)
data.train.labels = pkl_loader.pkl2np("train_labels.pkl").astype(int)
data.test.data = pkl_loader.pkl2np("test_data.pkl").astype(int)
data.test.labels = pkl_loader.pkl2np("test_labels.pkl").astype(int)
return data
# main function
def main(dataset_name: str, smote: bool = False):
if dataset_name == "uci-secom":
dataset = _read_secom()
if dataset_name == "wafer":
dataset = _read_wafer()
if dataset_name == "earthquake":
dataset = _read_earthquakes()
if dataset_name == "cmu-wafer":
dataset = _read_cmu_wafer()
if dataset_name == "etc":
# add something in here
return None
if dataset_name == "uci-secom-prep":
dataset = _read_secom_preprocessed()
if dataset_name == "uci-secom-pca":
dataset = _read_secom_preprocessed("pca")
if dataset_name == "uci-secom-ica":
dataset = _read_secom_preprocessed("ica")
if dataset_name == "uci-secom-chisq":
dataset = _read_secom_preprocessed("chisq")
# data manipulation
if smote:
dataset = smote_dataset(dataset)
return dataset
# for unit test
if __name__ == "__main__":
data = main(input("dataset name(uci-secom, wafer): "), smote = True)
print(data.train.data.shape)
print(data.train.labels.shape)
print(data.test.data.shape)
print(data.test.labels.shape)
| [
"scipy.io.arff.loadarff",
"csv.reader",
"os.path.realpath",
"numpy.asarray",
"sampling.smote_dataset",
"pickle_loader.pkl2np",
"ds.Data",
"os.path.join",
"os.listdir"
] | [((327, 353), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (343, 353), False, 'import os\n'), ((374, 402), 'os.path.join', 'os.path.join', (['dir_path', '""".."""'], {}), "(dir_path, '..')\n", (386, 402), False, 'import os\n'), ((500, 531), 'os.path.join', 'os.path.join', (['basedir', 'filename'], {}), '(basedir, filename)\n', (512, 531), False, 'import os\n'), ((853, 884), 'os.path.join', 'os.path.join', (['basedir', 'filename'], {}), '(basedir, filename)\n', (865, 884), False, 'import os\n'), ((902, 924), 'scipy.io.arff.loadarff', 'arff.loadarff', (['filedir'], {}), '(filedir)\n', (915, 924), False, 'from scipy.io import arff\n'), ((1079, 1114), 'os.path.join', 'os.path.join', (['dir_path', '"""uci-secom"""'], {}), "(dir_path, 'uci-secom')\n", (1091, 1114), False, 'import os\n'), ((1164, 1187), 'os.listdir', 'os.listdir', (['working_dir'], {}), '(working_dir)\n', (1174, 1187), False, 'import os\n'), ((1274, 1300), 'numpy.asarray', 'np.asarray', (['uci_secom_data'], {}), '(uci_secom_data)\n', (1284, 1300), True, 'import numpy as np\n'), ((1690, 1744), 'ds.Data', 'Data', (['train_data', 'train_labels', 'test_data', 'test_labels'], {}), '(train_data, train_labels, test_data, test_labels)\n', (1694, 1744), False, 'from ds import Pair, Data\n'), ((1820, 1868), 'os.path.join', 'os.path.join', (['dir_path', '"""uci-secom-preprocessed"""'], {}), "(dir_path, 'uci-secom-preprocessed')\n", (1832, 1868), False, 'import os\n'), ((1885, 1913), 'ds.Data', 'Data', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (1889, 1913), False, 'from ds import Pair, Data\n'), ((1939, 1962), 'os.listdir', 'os.listdir', (['working_dir'], {}), '(working_dir)\n', (1949, 1962), False, 'import os\n'), ((3679, 3710), 'os.path.join', 'os.path.join', (['dir_path', '"""wafer"""'], {}), "(dir_path, 'wafer')\n", (3691, 3710), False, 'import os\n'), ((3751, 3774), 'os.listdir', 'os.listdir', (['working_dir'], {}), '(working_dir)\n', (3761, 3774), False, 'import os\n'), ((4529, 4583), 'ds.Data', 'Data', (['train_data', 'train_labels', 'test_data', 'test_labels'], {}), '(train_data, train_labels, test_data, test_labels)\n', (4533, 4583), False, 'from ds import Pair, Data\n'), ((4727, 4764), 'os.path.join', 'os.path.join', (['dir_path', '"""earthquakes"""'], {}), "(dir_path, 'earthquakes')\n", (4739, 4764), False, 'import os\n'), ((4805, 4828), 'os.listdir', 'os.listdir', (['working_dir'], {}), '(working_dir)\n', (4815, 4828), False, 'import os\n'), ((5565, 5619), 'ds.Data', 'Data', (['train_data', 'train_labels', 'test_data', 'test_labels'], {}), '(train_data, train_labels, test_data, test_labels)\n', (5569, 5619), False, 'from ds import Pair, Data\n'), ((8217, 8245), 'ds.Data', 'Data', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (8221, 8245), False, 'from ds import Pair, Data\n'), ((9443, 9465), 'sampling.smote_dataset', 'smote_dataset', (['dataset'], {}), '(dataset)\n', (9456, 9465), False, 'from sampling import smote_dataset\n'), ((590, 607), 'csv.reader', 'csv.reader', (['csv_f'], {}), '(csv_f)\n', (600, 607), False, 'import csv\n'), ((8268, 8303), 'pickle_loader.pkl2np', 'pkl_loader.pkl2np', (['"""train_data.pkl"""'], {}), "('train_data.pkl')\n", (8285, 8303), True, 'import pickle_loader as pkl_loader\n'), ((8340, 8377), 'pickle_loader.pkl2np', 'pkl_loader.pkl2np', (['"""train_labels.pkl"""'], {}), "('train_labels.pkl')\n", (8357, 8377), True, 'import pickle_loader as pkl_loader\n'), ((8411, 8445), 'pickle_loader.pkl2np', 'pkl_loader.pkl2np', (['"""test_data.pkl"""'], {}), "('test_data.pkl')\n", (8428, 8445), True, 'import pickle_loader as pkl_loader\n'), ((8481, 8517), 'pickle_loader.pkl2np', 'pkl_loader.pkl2np', (['"""test_labels.pkl"""'], {}), "('test_labels.pkl')\n", (8498, 8517), True, 'import pickle_loader as pkl_loader\n')] |
import time
import collections
import functools
import random
import numpy as np
import pygame
from cycle_growth import HamCycle
import settings
class Snake:
def __init__(self, R, C, graph, start_length, centered = True, shortcuts = True):
self.R = R
self.C = C
# Once he snake is max_length just chase tail
self.chase_tail = False
# a directed graph for a hamiltonian cycle of the map (the path the snake will follow)
self.graph = graph
# spawn a snake head at a random location
head = (R // 2, C // 2) if centered else self.spawn_snake(R, C)
self.body = collections.deque([head])
# Snake starts at snake_length
for _ in range(start_length - 1):
self.body.appendleft(self.graph[self.body[0]])
self.food = self.spawn_food(R, C)
self.on_food = False # True when the snake's head is on the food
# If there is a shorter (and safe) path to food, break from Hamiltonian Cycle
self.shortcuts = shortcuts
self.cost = self.calc_cost(self.food) if shortcuts else collections.defaultdict(int)
def spawn_snake(self, R, C):
return spawn(R, C)
def spawn_food(self, R, C):
snake_body = set(self.body)
return spawn(R, C, choices=[(m, n) for m in range(R) for n in range(C) if (m, n) not in snake_body])
def is_safe(self, new_head, food_found = 5):
"""
Looks ahead snake.length + food_found steps:
if snake never bites it's tail when following the ham path returns True
if snake bites its tail then the path is not safe returns False
"""
if self.chase_tail:
return False
temp_body = self.body.copy()
temp_body.appendleft(new_head)
temp_body_set = set(temp_body)
for _ in range(len(temp_body)):
temp_body.appendleft(self.graph[temp_body[0]])
if food_found > 0:
temp_body_set.remove(temp_body.pop())
food_found -= 1
if temp_body[0] in temp_body_set:
return False
temp_body_set.add(temp_body[0])
return True
def step(self):
"""Move the snake forward one step."""
if not self.shortcuts or self.chase_tail:
self.body.appendleft(self.graph[self.body[0]])
else:
i, j = self.body[0]
new_head = min((pos for pos in ((i+1,j),(i-1,j),(i,j+1),(i,j-1)) if pos not in self.body),
key = lambda p: self.cost[p])
if new_head == self.graph[self.body[0]] or self.is_safe(new_head):
# Make sure short cut doesn't lead to potential death
self.body.appendleft(new_head)
else:
self.body.appendleft(self.graph[self.body[0]])
if not self.on_food:
self.body.pop()
# If snake found food, update food
self.on_food = self.food == self.body[0]
if self.on_food:
self.food = self.spawn_food(self.R, self.C)
if self.food == (-1, -1):
self.chase_tail = True
if self.shortcuts:
self.cost = self.calc_cost(self.food)
@functools.lru_cache(None)
def calc_cost(self, food):
"""Returns a map of (i, j) -> steps to reach food if following the ham cycle"""
if self.chase_tail:
return collections.defaultdict(lambda: self.R * self.C)
pos = self.graph[food]
cost = collections.defaultdict(lambda: self.R * self.C)
cost[food] = 0
N = self.R * self.C
steps = 1
while steps <= N:
cost[pos] = N - steps
pos = self.graph[pos]
steps += 1
return cost
class Game():
def __init__(self, **kwargs):
pygame.init()
for key in kwargs:
self.__dict__[key] = kwargs[key]
# set display width and height
if self.WIDTH is None:
self.WIDTH, self.HEIGHT = self.C*self.BOX_WIDTH, self.R*self.BOX_WIDTH
# Create
self.SURFACE = pygame.display.set_mode((self.HEIGHT, self.WIDTH))
self.COL_WIDTH = self.WIDTH // self.C
self.ROW_HEIGHT = self.HEIGHT // self.R
# Hamiltonian cycle is HamCycle.graph
self.HAM = HamCycle(self.R, self.C, max_size=self.MAX_SIZE, shuffle=self.SHUFFLE, display=False)
# Draw grid and hamiltonian cycle
self.GRID_SURFACE = self.get_grid_surface()
self.HAM_SURFACE_GRID, self.HAM_SURFACE = self.get_ham_surface(self.HAM.graph)
# Snake
self.SNAKE = Snake(self.R, self.C, self.HAM.graph, self.SNAKE_LENGTH,
centered=self.CENTER_SNAKE, shortcuts=self.SHORTCUTS)
self.SNAKE_WIDTH = int(round(self.SNAKE_WIDTH * min(self.COL_WIDTH, self.ROW_HEIGHT), 0))
# True when the game is running
self.active = True
# Inputs are locked until time > input_lock
self.input_lock = -1
# Blank Screen
self.BLANK = pygame.surfarray.make_surface(np.array([[(0,0,0) for _ in range(self.WIDTH)] for _ in range(self.HEIGHT)]))
def temporary_lock(self):
"""Temporarily locks out keys to prevent accidental double key presses."""
self.input_lock = time.time() + self.LOCK_TIME
def get_grid_surface(self):
"""
Returns a pygame surface with a grid that marks the rows and columns that the snake can move on.
"""
arr = [[(0,0,0) for _ in range(self.WIDTH)] for _ in range(self.HEIGHT)]
for i in range(0, self.HEIGHT, self.ROW_HEIGHT):
for j in range(self.WIDTH):
arr[i][j] = self.GRID_COLOR
for j in range(0, self.WIDTH, self.COL_WIDTH):
for i in range(self.HEIGHT):
arr[i][j] = self.GRID_COLOR
return pygame.surfarray.make_surface(np.array(arr))
def get_ham_surface(self, graph, start = (0, 0)):
"""
Creates a pygame surface showing the hamiltonian path.
Returns ham_surface_with_grid, ham_surface_without_grid
"""
ham_surface_grid = self.GRID_SURFACE.copy()
ham_surface = pygame.surfarray.make_surface(np.array([[(0,0,0) for _ in range(self.WIDTH)] for _ in range(self.HEIGHT)]))
path = [start, graph[start]]
while path[-1] != start:
path.append(graph[path[-1]])
path = [self.map_to_grid(*p) for p in path]
pygame.draw.lines(ham_surface, self.HAM_COLOR, True, path, self.HAM_WIDTH)
pygame.draw.lines(ham_surface_grid, self.HAM_COLOR, True, path, self.HAM_WIDTH)
return ham_surface_grid, ham_surface
def map_to_grid(self, i, j):
"""
Maps the grid point (row = i, col = j) to the center of the block representing (i, j)
i.e. if the window is 100 by 100 pixels and there are 10 rows and 10 columns:
(0, 0) -> (5, 5)
(8, 5) -> (85, 55)
"""
y = (self.ROW_HEIGHT // 2) + i * self.ROW_HEIGHT
x = (self.COL_WIDTH // 2) + j * self.COL_WIDTH
return (y, x)
def run(self):
"""
Main game loop.
Handles input key presses, updating snake position, and drawing the background and snake.
"""
# Set display icon and window name
logo = pygame.image.load("./graphics/simple-logo.png")
pygame.display.set_icon(logo)
pygame.display.set_caption('Hamiltonian Snake')
while self.active:
time.sleep(self.SLEEP_TIME)
self.get_events()
keys = pygame.key.get_pressed()
t = time.time()
if t >= self.input_lock:
if keys[pygame.K_UP]:
self.temporary_lock()
self.SLEEP_TIME -= 0.01
self.SLEEP_TIME = max(0, self.SLEEP_TIME)
elif keys[pygame.K_DOWN]:
self.temporary_lock()
self.SLEEP_TIME += 0.01
self.SLEEP_TIME = min(0.3, self.SLEEP_TIME)
elif keys[pygame.K_ESCAPE]:
self.temporary_lock()
self.active = False
elif keys[pygame.K_g]:
self.UPDATE_BACKGROUND = True
self.temporary_lock()
self.SHOW_GRID = not self.SHOW_GRID
print("Show Grid",self.SHOW_GRID)
elif keys[pygame.K_h]:
self.UPDATE_BACKGROUND = True
self.temporary_lock()
self.SHOW_PATH = not self.SHOW_PATH
print('Show Path', self.SHOW_PATH)
elif keys[pygame.K_s]:
self.temporary_lock()
self.SHORTCUTS = not self.SHORTCUTS
self.SNAKE.shortcuts = self.SHORTCUTS
self.SNAKE.cost = self.SNAKE.calc_cost(self.SNAKE.food)
print('Shortcuts', self.SHORTCUTS)
# Move snake forward one step
self.SNAKE.step()
# Draw the snake and board
self.draw()
pygame.quit()
def get_events(self):
"""Gets key and mouse inputs. Deactivates game if input action was quit."""
self.events = pygame.event.poll()
if self.events.type == pygame.QUIT:
self.active = False
self.keys_press = pygame.key.get_pressed()
def get_food_rect(self):
"""Returns [top, left, width, height] for the food object in pixels."""
i, j = self.map_to_grid(*self.SNAKE.food)
x0, y0 = j - self.SNAKE_WIDTH // 2, i - self.SNAKE_WIDTH // 2
return [y0, x0, self.SNAKE_WIDTH, self.SNAKE_WIDTH]
def draw(self):
"""Updates pygame display with background, snake and food."""
if self.UPDATE_BACKGROUND:
self.SURFACE.blit(self.BLANK, (0, 0)) # blank screen
self.UPDATE_BACKGROUND = False
# Draw grid and / or hamiltonian cycle
if self.SHOW_PATH and self.SHOW_GRID:
self.SURFACE.blit(self.HAM_SURFACE_GRID, (0, 0))
elif self.SHOW_PATH:
self.SURFACE.blit(self.HAM_SURFACE, (0, 0))
elif self.SHOW_GRID:
self.SURFACE.blit(self.GRID_SURFACE, (0, 0))
else:
self.SURFACE.blit(self.BLANK, (0, 0))
# Draw snake
pygame.draw.lines(self.SURFACE, self.SNAKE_COLOR, False,
[self.map_to_grid(*segment) for segment in self.SNAKE.body],
self.SNAKE_WIDTH
)
# Draw apple
self.SNAKE.food
pygame.draw.rect(self.SURFACE, self.FOOD_COLOR, self.get_food_rect())
# Update display
pygame.display.flip()
def spawn(R, C, choices = None):
"""
Returns a random location on a grid of dimensions (R, C).
If Choices is given, randomly selects a position from choices (a list of positions (y, x)).
"""
if choices is None:
return random.randint(0, R-1), random.randint(0, C-1)
elif len(choices) == 1:
return (-1, -1)
return random.choice(choices)
if __name__ == "__main__":
g = Game(**settings.settings)
g.run() | [
"collections.defaultdict",
"pygame.draw.lines",
"collections.deque",
"random.randint",
"pygame.display.set_mode",
"pygame.display.set_caption",
"pygame.event.poll",
"pygame.quit",
"pygame.init",
"time.sleep",
"pygame.image.load",
"pygame.display.set_icon",
"random.choice",
"pygame.display.... | [((3345, 3370), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (3364, 3370), False, 'import functools\n'), ((11524, 11546), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (11537, 11546), False, 'import random\n'), ((655, 680), 'collections.deque', 'collections.deque', (['[head]'], {}), '([head])\n', (672, 680), False, 'import collections\n'), ((3641, 3690), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : self.R * self.C)'], {}), '(lambda : self.R * self.C)\n', (3664, 3690), False, 'import collections\n'), ((3970, 3983), 'pygame.init', 'pygame.init', ([], {}), '()\n', (3981, 3983), False, 'import pygame\n'), ((4277, 4327), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.HEIGHT, self.WIDTH)'], {}), '((self.HEIGHT, self.WIDTH))\n', (4300, 4327), False, 'import pygame\n'), ((4496, 4585), 'cycle_growth.HamCycle', 'HamCycle', (['self.R', 'self.C'], {'max_size': 'self.MAX_SIZE', 'shuffle': 'self.SHUFFLE', 'display': '(False)'}), '(self.R, self.C, max_size=self.MAX_SIZE, shuffle=self.SHUFFLE,\n display=False)\n', (4504, 4585), False, 'from cycle_growth import HamCycle\n'), ((6725, 6799), 'pygame.draw.lines', 'pygame.draw.lines', (['ham_surface', 'self.HAM_COLOR', '(True)', 'path', 'self.HAM_WIDTH'], {}), '(ham_surface, self.HAM_COLOR, True, path, self.HAM_WIDTH)\n', (6742, 6799), False, 'import pygame\n'), ((6808, 6887), 'pygame.draw.lines', 'pygame.draw.lines', (['ham_surface_grid', 'self.HAM_COLOR', '(True)', 'path', 'self.HAM_WIDTH'], {}), '(ham_surface_grid, self.HAM_COLOR, True, path, self.HAM_WIDTH)\n', (6825, 6887), False, 'import pygame\n'), ((7610, 7657), 'pygame.image.load', 'pygame.image.load', (['"""./graphics/simple-logo.png"""'], {}), "('./graphics/simple-logo.png')\n", (7627, 7657), False, 'import pygame\n'), ((7666, 7695), 'pygame.display.set_icon', 'pygame.display.set_icon', (['logo'], {}), '(logo)\n', (7689, 7695), False, 'import pygame\n'), ((7704, 7751), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Hamiltonian Snake"""'], {}), "('Hamiltonian Snake')\n", (7730, 7751), False, 'import pygame\n'), ((9483, 9496), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (9494, 9496), False, 'import pygame\n'), ((9639, 9658), 'pygame.event.poll', 'pygame.event.poll', ([], {}), '()\n', (9656, 9658), False, 'import pygame\n'), ((9761, 9785), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (9783, 9785), False, 'import pygame\n'), ((11145, 11166), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (11164, 11166), False, 'import pygame\n'), ((1148, 1176), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (1171, 1176), False, 'import collections\n'), ((3537, 3586), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : self.R * self.C)'], {}), '(lambda : self.R * self.C)\n', (3560, 3586), False, 'import collections\n'), ((5541, 5552), 'time.time', 'time.time', ([], {}), '()\n', (5550, 5552), False, 'import time\n'), ((6147, 6160), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (6155, 6160), True, 'import numpy as np\n'), ((7800, 7827), 'time.sleep', 'time.sleep', (['self.SLEEP_TIME'], {}), '(self.SLEEP_TIME)\n', (7810, 7827), False, 'import time\n'), ((7877, 7901), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (7899, 7901), False, 'import pygame\n'), ((7918, 7929), 'time.time', 'time.time', ([], {}), '()\n', (7927, 7929), False, 'import time\n'), ((11414, 11438), 'random.randint', 'random.randint', (['(0)', '(R - 1)'], {}), '(0, R - 1)\n', (11428, 11438), False, 'import random\n'), ((11438, 11462), 'random.randint', 'random.randint', (['(0)', '(C - 1)'], {}), '(0, C - 1)\n', (11452, 11462), False, 'import random\n')] |
import numpy as np
def SCH(x):
f1 = x[0] ** 2
f2 = (x[0] - 2) ** 2
return np.array([f1, f2]) | [
"numpy.array"
] | [((87, 105), 'numpy.array', 'np.array', (['[f1, f2]'], {}), '([f1, f2])\n', (95, 105), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
class Strategy():
"""
Parent strategy class
"""
def __init__(self,
name='Strategy',
params = {}
):
self.name = name
self.indicators = ['MA20']
self.params = params
def get_indicators(self):
return self.indicators
def check(self, data, indicators, curr_shares):
"""
Takes in the ochl data, indicators, current amount of shares
Returns:
* amount of shares to buy, 0 if no move, negative if sell
* 0 if at market, else limit price
* [0..1] of how certain a trade is
"""
shares_change = None
limit = None
quality = 1.
return shares_change, limit, quality
class MA20Strat(Strategy):
"""
MovingAverage strategy: buys wheb curr price is lower than MA, sell vice versa
"""
def __init__(self,
name='MA20Strat',
params = {}
):
self.name = name
self.indicators = ['MA20']
self.params = params
def check(self, data, indicators, curr_shares, cash_avail, ticker, buy_price=None):
"""
Takes in the ochl data, indicators, current amount of shares
Returns:
* amount of shares to buy, 0 if no move, negative if sell
* 0 if at market, else limit price
* [0..1] of how certain a trade is
"""
shares_change = None
limit = None
quality = 1.
curr_close = data.Close.iloc[-1]
curr_ma = indicators.MA20.iloc[-1]
try:
last_close = data.Close.iloc[-2]
except:
last_close = None
if curr_shares==0: # check if buy
if curr_close < 0.95 * curr_ma and last_close<=curr_close:
# BUY
shares_change = np.floor(cash_avail/curr_close)
else: # check if sell
if curr_close > 1.05 * curr_ma and last_close>curr_close:
# SELL
shares_change = -curr_shares
return shares_change, limit, quality
class RSIStrat(Strategy):
"""
RSI strategy: buys on low RSI and sells on high RSI
"""
def __init__(self,
name='RSIStrat',
params = {}
):
self.name = name
self.indicators = ['RSI']
self.params = params
def check(self, data, indicators, curr_shares, cash_avail, ticker, buy_price=None):
"""
Buys if RSI falls below 30, sells if RSI is above 70
"""
shares_change = None
limit = None
quality = 1.
curr_close = data.Close.iloc[-1]
curr_RSI = indicators.RSI.iloc[-1]
if curr_RSI is None:
return shares_change, limit, quality
# print(f"curr RSI for {ticker} is {curr_RSI}")
if curr_RSI<40 and curr_shares==0: # check if buy
# BUY
shares_change = np.floor(cash_avail/curr_close)
elif curr_RSI>60 and curr_shares>0: # check if sell
# SELL
shares_change = -curr_shares
return shares_change, limit, quality
class SLBStrat(Strategy):
"""
Based solely on the SLB indicator
https://atas.net/atas-possibilities/squeeze-momentum-indicator/
"""
def __init__(self,
name='SLBStrat',
params = {}
):
self.name = name
self.indicators = ['SLBval','SLBtrend','STDEV20']
self.params = params
def check(self, data, indicators, curr_shares, cash_avail, ticker, buy_price=None):
"""
Buys if squeeze off and val>0 and val_diff > 0
Sells if squeeze on or val_diff < 0
"""
shares_change = None
limit = None
quality = 1.
curr_close = data.Close.iloc[-1]
curr_val = indicators.SLBval.iloc[-1]
val_diff = indicators.SLBval.diff()
curr_val_diff = val_diff[-1]
squeeze_off = indicators.SLBtrend.iloc[-1]
squeeze_on = not squeeze_off
try:
last_val = indicators.SLBval.iloc[-2]
except:
last_val = None
if curr_val is None or last_val is None or curr_val==np.nan or last_val==np.nan:
return shares_change, limit, quality
if curr_shares==0: # no shares yet, could buy
if squeeze_off and curr_val<0 and curr_val_diff>0:
# BUY
print(f'buying: squeeze_off {squeeze_off} val {curr_val} diff {curr_val_diff}')
shares_change = np.floor(cash_avail/curr_close)
else: # already has shares, could sell
if squeeze_on or curr_val_diff<0:
print(f'selling: squeeze on {squeeze_on} val {curr_val} diff {curr_val_diff}')
# SELL
shares_change = -curr_shares
return shares_change, limit, quality | [
"numpy.floor"
] | [((3236, 3269), 'numpy.floor', 'np.floor', (['(cash_avail / curr_close)'], {}), '(cash_avail / curr_close)\n', (3244, 3269), True, 'import numpy as np\n'), ((2049, 2082), 'numpy.floor', 'np.floor', (['(cash_avail / curr_close)'], {}), '(cash_avail / curr_close)\n', (2057, 2082), True, 'import numpy as np\n'), ((4954, 4987), 'numpy.floor', 'np.floor', (['(cash_avail / curr_close)'], {}), '(cash_avail / curr_close)\n', (4962, 4987), True, 'import numpy as np\n')] |
# Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
class SVMTrainer(object):
def __init__(self, examples, example_ids, labels, classes,
dataset_name='dataset', force_gpu=False,
model_path=None):
# Define feature names including the original CSV column name
self.feature_columns = [
tf.contrib.layers.real_valued_column(x) for x in labels]
self.example_ids = np.array(example_ids)
self.examples = examples
self.classes = classes
self.force_gpu = force_gpu
if not model_path:
model_data_folder = os.sep.join([
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'data',
dataset_name, 'model'])
else:
model_data_folder = os.sep.join([model_path, 'data', dataset_name,
'model'])
os.makedirs(model_data_folder, exist_ok=True)
self.estimator = tf.contrib.learn.SVM(
feature_columns=self.feature_columns,
example_id_column='example_id',
model_dir=model_data_folder,
feature_engineering_fn=self.feature_engineering_fn)
# Separate traing set and evaluation set by building randomised lists
# of indexes that can be used for examples, example_ids and classes
self.all_indexes = range(len(self.example_ids))
self.training_idx = pd.Series(self.all_indexes).sample(
len(self.example_ids) // 2).values
self.evaluate_idx = list(
set(self.all_indexes) - set(self.training_idx))
def input_fn(self, idx_filter, return_classes=True):
num_features = len(self.feature_columns)
# Dict comprehension to build a dict of features
# I suppose numpy might be able to do this more efficiently
_features = {
self.feature_columns[n].column_name:
tf.constant(self.examples[idx_filter, n])
for n in range(num_features)}
_features['example_id'] = tf.constant(self.example_ids[idx_filter])
print("Done preparing input data")
if return_classes:
return _features, tf.constant(self.classes[idx_filter])
else:
return _features
def training_input_fn(self):
return self.input_fn(self.training_idx)
def evaluate_input_fn(self):
return self.input_fn(self.evaluate_idx)
def feature_engineering_fn(self, features, labels):
# Further data normalization may happen here
print("Built engineered data")
return features, labels
def train(self, steps=30):
if self.force_gpu:
with tf.device('/device:GPU:0'):
self.estimator.fit(input_fn=self.training_input_fn,
steps=steps)
train_loss = self.estimator.evaluate(
input_fn=self.evaluate_input_fn, steps=1)
else:
self.estimator.fit(input_fn=self.training_input_fn, steps=steps)
train_loss = self.estimator.evaluate(
input_fn=self.evaluate_input_fn, steps=1)
print('Training loss %r' % train_loss)
def predict_fn(self):
return self.input_fn(range(len(self.example_ids)),
return_classes=False)
def predict(self):
prediction = list(self.estimator.predict(input_fn=self.predict_fn))
return prediction
| [
"os.makedirs",
"os.path.realpath",
"tensorflow.device",
"tensorflow.contrib.learn.SVM",
"tensorflow.constant",
"tensorflow.logging.set_verbosity",
"numpy.array",
"os.sep.join",
"pandas.Series",
"tensorflow.contrib.layers.real_valued_column"
] | [((650, 691), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (674, 691), True, 'import tensorflow as tf\n'), ((1075, 1096), 'numpy.array', 'np.array', (['example_ids'], {}), '(example_ids)\n', (1083, 1096), True, 'import numpy as np\n'), ((1545, 1590), 'os.makedirs', 'os.makedirs', (['model_data_folder'], {'exist_ok': '(True)'}), '(model_data_folder, exist_ok=True)\n', (1556, 1590), False, 'import os\n'), ((1616, 1795), 'tensorflow.contrib.learn.SVM', 'tf.contrib.learn.SVM', ([], {'feature_columns': 'self.feature_columns', 'example_id_column': '"""example_id"""', 'model_dir': 'model_data_folder', 'feature_engineering_fn': 'self.feature_engineering_fn'}), "(feature_columns=self.feature_columns,\n example_id_column='example_id', model_dir=model_data_folder,\n feature_engineering_fn=self.feature_engineering_fn)\n", (1636, 1795), True, 'import tensorflow as tf\n'), ((2690, 2731), 'tensorflow.constant', 'tf.constant', (['self.example_ids[idx_filter]'], {}), '(self.example_ids[idx_filter])\n', (2701, 2731), True, 'import tensorflow as tf\n'), ((991, 1030), 'tensorflow.contrib.layers.real_valued_column', 'tf.contrib.layers.real_valued_column', (['x'], {}), '(x)\n', (1027, 1030), True, 'import tensorflow as tf\n'), ((1435, 1491), 'os.sep.join', 'os.sep.join', (["[model_path, 'data', dataset_name, 'model']"], {}), "([model_path, 'data', dataset_name, 'model'])\n", (1446, 1491), False, 'import os\n'), ((2572, 2613), 'tensorflow.constant', 'tf.constant', (['self.examples[idx_filter, n]'], {}), '(self.examples[idx_filter, n])\n', (2583, 2613), True, 'import tensorflow as tf\n'), ((2832, 2869), 'tensorflow.constant', 'tf.constant', (['self.classes[idx_filter]'], {}), '(self.classes[idx_filter])\n', (2843, 2869), True, 'import tensorflow as tf\n'), ((3334, 3360), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (3343, 3360), True, 'import tensorflow as tf\n'), ((2076, 2103), 'pandas.Series', 'pd.Series', (['self.all_indexes'], {}), '(self.all_indexes)\n', (2085, 2103), True, 'import pandas as pd\n'), ((1301, 1327), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1317, 1327), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
GOAL
Fig. 4: Map of ocean heat flux in each grid cell
Ocean heat flux computed via average_ohf.py, which follows compute_oht.py
PROGRAMMER
<NAME>
LAST UPDATE
17/11/2020
'''
# Standard libraries
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Working directories
dir_input = '/nobackup/rossby24/proj/rossby/joint_exp/oseaice/post-proc/'
dir_grid = '/nobackup/rossby24/proj/rossby/joint_exp/oseaice/grid/'
dir_output = '/nobackup/rossby24/proj/rossby/joint_exp/oseaice/OSeaIce_Paper/'
# Options
save_fig = True
# Load OHF D000
filename = dir_input + 'D000/OHT_transects/oht_mean_D000.npy'
[oht_u_D000,oht_v_D000] = np.load(filename)
oht_D000 = np.sqrt(oht_u_D000**2. + oht_v_D000**2.)
oht_D000 = oht_D000 / 1.e3
# Load OHF D012
filename = dir_input + 'D012/OHT_transects/oht_mean_D012.npy'
[oht_u_D012,oht_v_D012] = np.load(filename)
oht_D012 = np.sqrt(oht_u_D012**2. + oht_v_D012**2.)
oht_D012 = oht_D012 / 1.e3
# Load OHF D015
filename = dir_input + 'D015/OHT_transects/oht_mean_D015.npy'
[oht_u_D015,oht_v_D015] = np.load(filename)
oht_D015 = np.sqrt(oht_u_D015**2. + oht_v_D015**2.)
oht_D015 = oht_D015 / 1.e3
# Load OHF D018
filename = dir_input + 'D018/OHT_transects/oht_mean_D018.npy'
[oht_u_D018,oht_v_D018] = np.load(filename)
oht_D018 = np.sqrt(oht_u_D018**2. + oht_v_D018**2.)
oht_D018 = oht_D018 / 1.e3
# Load OHF D021
filename = dir_input + 'D021/OHT_transects/oht_mean_D021.npy'
[oht_u_D021,oht_v_D021] = np.load(filename)
oht_D021 = np.sqrt(oht_u_D021**2. + oht_v_D021**2.)
oht_D021 = oht_D021 / 1.e3
# Load OHF D022
filename = dir_input + 'D022/OHT_transects/oht_mean_D022.npy'
[oht_u_D022,oht_v_D022] = np.load(filename)
oht_D022 = np.sqrt(oht_u_D022**2. + oht_v_D022**2.)
oht_D022 = oht_D022 / 1.e3
# Load OHF D023
filename = dir_input + 'D023/OHT_transects/oht_mean_D023.npy'
[oht_u_D023,oht_v_D023] = np.load(filename)
oht_D023 = np.sqrt(oht_u_D023**2. + oht_v_D023**2.)
oht_D023 = oht_D023 / 1.e3
# Load siconc D000
filename = dir_input + 'D000/siconc_D000_ym.nc'
fh = Dataset(filename, mode='r')
siconc_D000 = fh.variables['siconc'][:]
siconc_D000 = siconc_D000 * 100.
nm,ny,nx = siconc_D000.shape
fh.close()
# Load siconc D012
filename = dir_input + 'D012/siconc_D012_ym.nc'
fh = Dataset(filename, mode='r')
siconc_D012 = fh.variables['siconc'][:]
siconc_D012 = siconc_D012 * 100.
fh.close()
# Load siconc D015
filename = dir_input + 'D015/siconc_D015_ym.nc'
fh = Dataset(filename, mode='r')
siconc_D015 = fh.variables['siconc'][:]
siconc_D015 = siconc_D015 * 100.
fh.close()
# Load siconc D018
filename = dir_input + 'D018/siconc_D018_ym.nc'
fh = Dataset(filename, mode='r')
siconc_D018 = fh.variables['siconc'][:]
siconc_D018 = siconc_D018 * 100.
fh.close()
# Load siconc D021
filename = dir_input + 'D021/siconc_D021_ym.nc'
fh = Dataset(filename, mode='r')
siconc_D021 = fh.variables['siconc'][:]
siconc_D021 = siconc_D021 * 100.
fh.close()
# Load siconc D022
filename = dir_input + 'D022/siconc_D022_ym.nc'
fh = Dataset(filename, mode='r')
siconc_D022 = fh.variables['siconc'][:]
siconc_D022 = siconc_D022 * 100.
fh.close()
# Load siconc D023
filename = dir_input + 'D023/siconc_D023_ym.nc'
fh = Dataset(filename, mode='r')
siconc_D023 = fh.variables['siconc'][:]
siconc_D023 = siconc_D023 * 100.
fh.close()
# Load latitude and longitude of model
filename = dir_grid + 'mesh_zgr.nc'
fh = Dataset(filename, mode='r')
lon = fh.variables['nav_lon'][:]
lat = fh.variables['nav_lat'][:]
fh.close()
# Map projection
boundlat = 40.
l0 = 0.
map = Basemap(projection='nplaea',boundinglat=boundlat,lon_0=l0,resolution='c')
x,y = map(lon,lat)
# Palettes
palette_oht = plt.cm.cubehelix_r._resample(20)
min_oht = 0.
max_oht = 1000.
palette_diff = plt.cm.seismic._resample(40)
min_diff = -400.
max_diff = 400.
# Fig. 4 - OHF maps
fig,ax=plt.subplots(3,3,figsize=(18,18))
fig.subplots_adjust(left=0.06,bottom=0.05,right=0.95,top=0.95,wspace=0.2,hspace=0.3)
# D000
cs=map.pcolor(x,y,oht_D000,vmin=min_oht,vmax=max_oht,cmap=palette_oht,ax=ax[0,0])
map.contour(x,y,siconc_D000[2,:,:],range(15,16,5),colors='blue',ax=ax[0,0],linewidths=3)
map.contour(x,y,siconc_D000[8,:,:],range(15,16,5),colors='black',ax=ax[0,0],linewidths=3)
map.drawparallels(np.arange(-80.,81.,10.),labels=[1,0,0,0],fontsize=24,ax=ax[0,0])
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=24,ax=ax[0,0])
map.drawcoastlines(ax=ax[0,0])
map.fillcontinents(color='grey',lake_color='w',ax=ax[0,0])
ax[0,0].set_title('CTRL',fontsize=32)
ax[0,0].set_title('a',loc='left',fontsize=32,fontweight='bold')
ax[0,0].yaxis.set_label_coords(-0.05,0.9)
# Add color bar absolute value
cb_ax = fig.add_axes([0.35, 0.7, 0.015, 0.25])
cbar = fig.colorbar(cs,cax=cb_ax,orientation='vertical',ticks=[0,250,500,750,1000],extend='both')
cbar.ax.tick_params(labelsize=24)
cbar.set_label('Horizontal OHF (kW m$^{-2}$)',fontsize=28)
# Delete axes
fig.delaxes(ax[0,1])
fig.delaxes(ax[0,2])
# D012
cs=map.pcolor(x,y,oht_D012-oht_D000,vmin=min_diff,vmax=max_diff,cmap=palette_diff,ax=ax[1,0])
map.contour(x,y,siconc_D012[2,:,:],range(15,16,5),colors='blue',ax=ax[1,0],linewidths=3)
map.contour(x,y,siconc_D012[8,:,:],range(15,16,5),colors='black',ax=ax[1,0],linewidths=3)
map.drawparallels(np.arange(-80.,81.,10.),labels=[1,0,0,0],fontsize=24,ax=ax[1,0])
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=24,ax=ax[1,0])
map.drawcoastlines(ax=ax[1,0])
map.fillcontinents(color='grey',lake_color='w',ax=ax[1,0])
ax[1,0].set_title('ATL1+3$^{\circ}$C',fontsize=32)
ax[1,0].set_title('b',loc='left',fontsize=32,fontweight='bold')
ax[1,0].yaxis.set_label_coords(-0.05,0.9)
# D015
cs=map.pcolor(x,y,oht_D015-oht_D000,vmin=min_diff,vmax=max_diff,cmap=palette_diff,ax=ax[1,1])
map.contour(x,y,siconc_D015[2,:,:],range(15,16,5),colors='blue',ax=ax[1,1],linewidths=3)
map.contour(x,y,siconc_D015[8,:,:],range(15,16,5),colors='black',ax=ax[1,1],linewidths=3)
map.drawparallels(np.arange(-80.,81.,10.),labels=[1,0,0,0],fontsize=24,ax=ax[1,1])
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=24,ax=ax[1,1])
map.drawcoastlines(ax=ax[1,1])
map.fillcontinents(color='grey',lake_color='w',ax=ax[1,1])
ax[1,1].set_title('ATL2+3$^{\circ}$C',fontsize=32)
ax[1,1].set_title('c',loc='left',fontsize=32,fontweight='bold')
ax[1,1].yaxis.set_label_coords(-0.05,0.9)
# D018
cs=map.pcolor(x,y,oht_D018-oht_D000,vmin=min_diff,vmax=max_diff,cmap=palette_diff,ax=ax[1,2])
map.contour(x,y,siconc_D018[2,:,:],range(15,16,5),colors='blue',ax=ax[1,2],linewidths=3)
map.contour(x,y,siconc_D018[8,:,:],range(15,16,5),colors='black',ax=ax[1,2],linewidths=3)
map.drawparallels(np.arange(-80.,81.,10.),labels=[1,0,0,0],fontsize=24,ax=ax[1,2])
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=24,ax=ax[1,2])
map.drawcoastlines(ax=ax[1,2])
map.fillcontinents(color='grey',lake_color='w',ax=ax[1,2])
ax[1,2].set_title('ATL3+3$^{\circ}$C',fontsize=32)
ax[1,2].set_title('d',loc='left',fontsize=32,fontweight='bold')
ax[1,2].yaxis.set_label_coords(-0.05,0.9)
# D021
cs=map.pcolor(x,y,oht_D021-oht_D000,vmin=min_diff,vmax=max_diff,cmap=palette_diff,ax=ax[2,0])
map.contour(x,y,siconc_D021[2,:,:],range(15,16,5),colors='blue',ax=ax[2,0],linewidths=3)
map.contour(x,y,siconc_D021[8,:,:],range(15,16,5),colors='black',ax=ax[2,0],linewidths=3)
map.drawparallels(np.arange(-80.,81.,10.),labels=[1,0,0,0],fontsize=24,ax=ax[2,0])
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=24,ax=ax[2,0])
map.drawcoastlines(ax=ax[2,0])
map.fillcontinents(color='grey',lake_color='w',ax=ax[2,0])
ax[2,0].set_title('PAC1+3$^{\circ}$C',fontsize=32)
ax[2,0].set_title('e',loc='left',fontsize=32,fontweight='bold')
ax[2,0].yaxis.set_label_coords(-0.05,0.9)
# D022
cs=map.pcolor(x,y,oht_D022-oht_D000,vmin=min_diff,vmax=max_diff,cmap=palette_diff,ax=ax[2,1])
map.contour(x,y,siconc_D022[2,:,:],range(15,16,5),colors='blue',ax=ax[2,1],linewidths=3)
map.contour(x,y,siconc_D022[8,:,:],range(15,16,5),colors='black',ax=ax[2,1],linewidths=3)
map.drawparallels(np.arange(-80.,81.,10.),labels=[1,0,0,0],fontsize=24,ax=ax[2,1])
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=24,ax=ax[2,1])
map.drawcoastlines(ax=ax[2,1])
map.fillcontinents(color='grey',lake_color='w',ax=ax[2,1])
ax[2,1].set_title('PAC2+3$^{\circ}$C',fontsize=32)
ax[2,1].set_title('f',loc='left',fontsize=32,fontweight='bold')
ax[2,1].yaxis.set_label_coords(-0.05,0.9)
# D023
cs=map.pcolor(x,y,oht_D023-oht_D000,vmin=min_diff,vmax=max_diff,cmap=palette_diff,ax=ax[2,2])
map.contour(x,y,siconc_D023[2,:,:],range(15,16,5),colors='blue',ax=ax[2,2],linewidths=3)
map.contour(x,y,siconc_D023[8,:,:],range(15,16,5),colors='black',ax=ax[2,2],linewidths=3)
map.drawparallels(np.arange(-80.,81.,10.),labels=[1,0,0,0],fontsize=24,ax=ax[2,2])
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=24,ax=ax[2,2])
map.drawcoastlines(ax=ax[2,2])
map.fillcontinents(color='grey',lake_color='w',ax=ax[2,2])
ax[2,2].set_title('PAC3+3$^{\circ}$C',fontsize=32)
ax[2,2].set_title('g',loc='left',fontsize=32,fontweight='bold')
ax[2,2].yaxis.set_label_coords(-0.05,0.9)
# Add color bar diff
cb_ax = fig.add_axes([0.5, 0.82, 0.4, 0.02])
cbar = fig.colorbar(cs,cax=cb_ax,orientation='horizontal',ticks=[-400,-200,0,200,400],extend='both')
cbar.ax.tick_params(labelsize=24)
cbar.set_label('Horizontal OHF PERT - CTRL (kW m$^{-2}$)',fontsize=28)
# Save figure
if save_fig == True:
fig.savefig(dir_output + 'fig4.png')
fig.savefig(dir_output + 'fig4.eps',dpi=300)
| [
"netCDF4.Dataset",
"numpy.load",
"matplotlib.pyplot.cm.seismic._resample",
"numpy.arange",
"matplotlib.pyplot.cm.cubehelix_r._resample",
"matplotlib.pyplot.subplots",
"mpl_toolkits.basemap.Basemap",
"numpy.sqrt"
] | [((759, 776), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (766, 776), True, 'import numpy as np\n'), ((788, 834), 'numpy.sqrt', 'np.sqrt', (['(oht_u_D000 ** 2.0 + oht_v_D000 ** 2.0)'], {}), '(oht_u_D000 ** 2.0 + oht_v_D000 ** 2.0)\n', (795, 834), True, 'import numpy as np\n'), ((961, 978), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (968, 978), True, 'import numpy as np\n'), ((990, 1036), 'numpy.sqrt', 'np.sqrt', (['(oht_u_D012 ** 2.0 + oht_v_D012 ** 2.0)'], {}), '(oht_u_D012 ** 2.0 + oht_v_D012 ** 2.0)\n', (997, 1036), True, 'import numpy as np\n'), ((1163, 1180), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1170, 1180), True, 'import numpy as np\n'), ((1192, 1238), 'numpy.sqrt', 'np.sqrt', (['(oht_u_D015 ** 2.0 + oht_v_D015 ** 2.0)'], {}), '(oht_u_D015 ** 2.0 + oht_v_D015 ** 2.0)\n', (1199, 1238), True, 'import numpy as np\n'), ((1365, 1382), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1372, 1382), True, 'import numpy as np\n'), ((1394, 1440), 'numpy.sqrt', 'np.sqrt', (['(oht_u_D018 ** 2.0 + oht_v_D018 ** 2.0)'], {}), '(oht_u_D018 ** 2.0 + oht_v_D018 ** 2.0)\n', (1401, 1440), True, 'import numpy as np\n'), ((1567, 1584), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1574, 1584), True, 'import numpy as np\n'), ((1596, 1642), 'numpy.sqrt', 'np.sqrt', (['(oht_u_D021 ** 2.0 + oht_v_D021 ** 2.0)'], {}), '(oht_u_D021 ** 2.0 + oht_v_D021 ** 2.0)\n', (1603, 1642), True, 'import numpy as np\n'), ((1769, 1786), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1776, 1786), True, 'import numpy as np\n'), ((1798, 1844), 'numpy.sqrt', 'np.sqrt', (['(oht_u_D022 ** 2.0 + oht_v_D022 ** 2.0)'], {}), '(oht_u_D022 ** 2.0 + oht_v_D022 ** 2.0)\n', (1805, 1844), True, 'import numpy as np\n'), ((1971, 1988), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1978, 1988), True, 'import numpy as np\n'), ((2000, 2046), 'numpy.sqrt', 'np.sqrt', (['(oht_u_D023 ** 2.0 + oht_v_D023 ** 2.0)'], {}), '(oht_u_D023 ** 2.0 + oht_v_D023 ** 2.0)\n', (2007, 2046), True, 'import numpy as np\n'), ((2141, 2168), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (2148, 2168), False, 'from netCDF4 import Dataset\n'), ((2355, 2382), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (2362, 2382), False, 'from netCDF4 import Dataset\n'), ((2540, 2567), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (2547, 2567), False, 'from netCDF4 import Dataset\n'), ((2725, 2752), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (2732, 2752), False, 'from netCDF4 import Dataset\n'), ((2910, 2937), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (2917, 2937), False, 'from netCDF4 import Dataset\n'), ((3095, 3122), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (3102, 3122), False, 'from netCDF4 import Dataset\n'), ((3280, 3307), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (3287, 3307), False, 'from netCDF4 import Dataset\n'), ((3473, 3500), 'netCDF4.Dataset', 'Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (3480, 3500), False, 'from netCDF4 import Dataset\n'), ((3625, 3701), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""nplaea"""', 'boundinglat': 'boundlat', 'lon_0': 'l0', 'resolution': '"""c"""'}), "(projection='nplaea', boundinglat=boundlat, lon_0=l0, resolution='c')\n", (3632, 3701), False, 'from mpl_toolkits.basemap import Basemap\n'), ((3744, 3776), 'matplotlib.pyplot.cm.cubehelix_r._resample', 'plt.cm.cubehelix_r._resample', (['(20)'], {}), '(20)\n', (3772, 3776), True, 'import matplotlib.pyplot as plt\n'), ((3821, 3849), 'matplotlib.pyplot.cm.seismic._resample', 'plt.cm.seismic._resample', (['(40)'], {}), '(40)\n', (3845, 3849), True, 'import matplotlib.pyplot as plt\n'), ((3916, 3952), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(18, 18)'}), '(3, 3, figsize=(18, 18))\n', (3928, 3952), True, 'import matplotlib.pyplot as plt\n'), ((4322, 4350), 'numpy.arange', 'np.arange', (['(-80.0)', '(81.0)', '(10.0)'], {}), '(-80.0, 81.0, 10.0)\n', (4331, 4350), True, 'import numpy as np\n'), ((4405, 4435), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(20.0)'], {}), '(-180.0, 181.0, 20.0)\n', (4414, 4435), True, 'import numpy as np\n'), ((5332, 5360), 'numpy.arange', 'np.arange', (['(-80.0)', '(81.0)', '(10.0)'], {}), '(-80.0, 81.0, 10.0)\n', (5341, 5360), True, 'import numpy as np\n'), ((5415, 5445), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(20.0)'], {}), '(-180.0, 181.0, 20.0)\n', (5424, 5445), True, 'import numpy as np\n'), ((6028, 6056), 'numpy.arange', 'np.arange', (['(-80.0)', '(81.0)', '(10.0)'], {}), '(-80.0, 81.0, 10.0)\n', (6037, 6056), True, 'import numpy as np\n'), ((6111, 6141), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(20.0)'], {}), '(-180.0, 181.0, 20.0)\n', (6120, 6141), True, 'import numpy as np\n'), ((6724, 6752), 'numpy.arange', 'np.arange', (['(-80.0)', '(81.0)', '(10.0)'], {}), '(-80.0, 81.0, 10.0)\n', (6733, 6752), True, 'import numpy as np\n'), ((6807, 6837), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(20.0)'], {}), '(-180.0, 181.0, 20.0)\n', (6816, 6837), True, 'import numpy as np\n'), ((7420, 7448), 'numpy.arange', 'np.arange', (['(-80.0)', '(81.0)', '(10.0)'], {}), '(-80.0, 81.0, 10.0)\n', (7429, 7448), True, 'import numpy as np\n'), ((7503, 7533), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(20.0)'], {}), '(-180.0, 181.0, 20.0)\n', (7512, 7533), True, 'import numpy as np\n'), ((8116, 8144), 'numpy.arange', 'np.arange', (['(-80.0)', '(81.0)', '(10.0)'], {}), '(-80.0, 81.0, 10.0)\n', (8125, 8144), True, 'import numpy as np\n'), ((8199, 8229), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(20.0)'], {}), '(-180.0, 181.0, 20.0)\n', (8208, 8229), True, 'import numpy as np\n'), ((8812, 8840), 'numpy.arange', 'np.arange', (['(-80.0)', '(81.0)', '(10.0)'], {}), '(-80.0, 81.0, 10.0)\n', (8821, 8840), True, 'import numpy as np\n'), ((8895, 8925), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(20.0)'], {}), '(-180.0, 181.0, 20.0)\n', (8904, 8925), True, 'import numpy as np\n')] |
import numpy as np
def load_spontaneous():
return np.load("Data/stringer_spontaneous.npy", allow_pickle=True).item()
def load_orientations():
return np.load("Data/stringer_orientations.npy", allow_pickle=True).item()
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy
import scipy.signal
from scipy.signal import find_peaks
data = np.load('stringer_spontaneous.npy', allow_pickle=True).item()
print(data['pupilArea'].shape)
pupil = data['pupilArea']
pupil1d = pupil.squeeze()
scipy.signal.find_peaks(pupil1d, height=None, threshold=1000, distance=None, prominence=None, width=None,
wlen=None, rel_height=0.5, plateau_size=None)
peaks, _ = find_peaks(pupil1d, height=0)
plt.plot(pupil1d)
plt.plot(peaks, pupil1d[peaks], label='var')
plt.plot(np.zeros_like(pupil1d), "--", color="gray")
plt.show()
| [
"numpy.load",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.signal.find_peaks"
] | [((512, 667), 'scipy.signal.find_peaks', 'scipy.signal.find_peaks', (['pupil1d'], {'height': 'None', 'threshold': '(1000)', 'distance': 'None', 'prominence': 'None', 'width': 'None', 'wlen': 'None', 'rel_height': '(0.5)', 'plateau_size': 'None'}), '(pupil1d, height=None, threshold=1000, distance=None,\n prominence=None, width=None, wlen=None, rel_height=0.5, plateau_size=None)\n', (535, 667), False, 'import scipy\n'), ((700, 729), 'scipy.signal.find_peaks', 'find_peaks', (['pupil1d'], {'height': '(0)'}), '(pupil1d, height=0)\n', (710, 729), False, 'from scipy.signal import find_peaks\n'), ((730, 747), 'matplotlib.pyplot.plot', 'plt.plot', (['pupil1d'], {}), '(pupil1d)\n', (738, 747), True, 'import matplotlib.pyplot as plt\n'), ((748, 792), 'matplotlib.pyplot.plot', 'plt.plot', (['peaks', 'pupil1d[peaks]'], {'label': '"""var"""'}), "(peaks, pupil1d[peaks], label='var')\n", (756, 792), True, 'import matplotlib.pyplot as plt\n'), ((846, 856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (854, 856), True, 'import matplotlib.pyplot as plt\n'), ((802, 824), 'numpy.zeros_like', 'np.zeros_like', (['pupil1d'], {}), '(pupil1d)\n', (815, 824), True, 'import numpy as np\n'), ((363, 417), 'numpy.load', 'np.load', (['"""stringer_spontaneous.npy"""'], {'allow_pickle': '(True)'}), "('stringer_spontaneous.npy', allow_pickle=True)\n", (370, 417), True, 'import numpy as np\n'), ((55, 114), 'numpy.load', 'np.load', (['"""Data/stringer_spontaneous.npy"""'], {'allow_pickle': '(True)'}), "('Data/stringer_spontaneous.npy', allow_pickle=True)\n", (62, 114), True, 'import numpy as np\n'), ((159, 219), 'numpy.load', 'np.load', (['"""Data/stringer_orientations.npy"""'], {'allow_pickle': '(True)'}), "('Data/stringer_orientations.npy', allow_pickle=True)\n", (166, 219), True, 'import numpy as np\n')] |
import mmcv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from scipy import ndimage
from mmdet.core import matrix_nms, multi_apply
from ..builder import HEADS, build_loss
from .base_dense_seg_head import BaseDenseSegHead
INF = 1e8
def center_of_mass(bitmasks):
_, h, w = bitmasks.size()
ys = torch.arange(0, h, dtype=torch.float32, device=bitmasks.device)
xs = torch.arange(0, w, dtype=torch.float32, device=bitmasks.device)
m00 = bitmasks.sum(dim=-1).sum(dim=-1).clamp(min=1e-6)
m10 = (bitmasks * xs).sum(dim=-1).sum(dim=-1)
m01 = (bitmasks * ys[:, None]).sum(dim=-1).sum(dim=-1)
center_x = m10 / m00
center_y = m01 / m00
return center_x, center_y
def points_nms(heat, kernel=2):
# kernel must be 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=1)
keep = (hmax[:, :, :-1, :-1] == heat).float()
return heat * keep
def dice_loss(input, target):
input = input.contiguous().view(input.size()[0], -1)
target = target.contiguous().view(target.size()[0], -1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = (2 * a) / (b + c)
return 1 - d
@HEADS.register_module()
# class SOLOv2Head(BaseDenseSegHead):
# """SOLO: Segmenting Objects by Locations
# https://arxiv.org/abs/1912.04488
# """
#
# def __init__(
# self,
# num_classes,
# in_channels,
# seg_feat_channels=256,
# stacked_convs=4,
# strides=(8, 8, 16, 32, 32),
# base_edge_list=(16, 32, 64, 128, 256),
# scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384,
# 2048)),
# sigma=0.2,
# num_grids=None,
# # cate_down_pos=0,
# ins_out_channels=64,
# background_label=None,
# loss_mask=None,
# loss_cls=None,
# conv_cfg=None,
# norm_cfg=None,
# train_cfg=None,
# test_cfg=None,
# use_dcn_in_tower=False,
# type_dcn=None):
# super(SOLOv2Head, self).__init__()
# self.num_classes = num_classes
# self.seg_num_grids = num_grids
# self.cate_out_channels = self.num_classes
# self.ins_out_channels = ins_out_channels
# self.in_channels = in_channels
# self.seg_feat_channels = seg_feat_channels
# self.stacked_convs = stacked_convs
# self.strides = strides
# self.sigma = sigma
# self.stacked_convs = stacked_convs
# self.kernel_out_channels = self.ins_out_channels * 1 * 1
# # self.cate_down_pos = cate_down_pos
# self.base_edge_list = base_edge_list
# self.scale_ranges = scale_ranges
# self.background_label = (
# num_classes if background_label is None else background_label)
# # background_label should be either 0 or num_classes
# assert (self.background_label == 0
# or self.background_label == num_classes)
# self.loss_cls = build_loss(loss_cls)
# self.ins_loss_weight = loss_mask['loss_weight']
# self.conv_cfg = conv_cfg
# self.norm_cfg = norm_cfg
# self.train_cfg = train_cfg
# self.test_cfg = test_cfg
# self.use_dcn_in_tower = use_dcn_in_tower
# self.type_dcn = type_dcn
# self._init_layers()
class SOLOv2Head(nn.Module):
def __init__(self,
num_classes,
in_channels,
seg_feat_channels=256,
stacked_convs=4,
strides=(8, 8, 16, 32, 32),
#strides=(4, 8, 16, 32, 64),
base_edge_list=(16, 32, 64, 128, 256),
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384,2048)),
#scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)),
sigma=0.2,
num_grids=None,
ins_out_channels=64,
background_label=None,
loss_mask=None,
loss_cls=None,
conv_cfg=None,
norm_cfg=None,
train_cfg=None,
test_cfg=None,
est_cfg=None,
use_dcn_in_tower=False,
type_dcn=None):
super(SOLOv2Head, self).__init__()
self.num_classes = num_classes
self.seg_num_grids = num_grids
self.cate_out_channels = self.num_classes
self.ins_out_channels = ins_out_channels
self.in_channels = in_channels
self.seg_feat_channels = seg_feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.sigma = sigma
self.stacked_convs = stacked_convs
self.kernel_out_channels = self.ins_out_channels * 1 * 1
self.base_edge_list = base_edge_list
self.scale_ranges = scale_ranges
self.background_label = (
num_classes if background_label is None else background_label)
# background_label should be either 0 or num_classes
assert (self.background_label == 0
or self.background_label == num_classes)
self.loss_cls = build_loss(loss_cls)
self.ins_loss_weight = loss_mask['loss_weight']
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.use_dcn_in_tower = use_dcn_in_tower
self.type_dcn = type_dcn
self._init_layers()
def _init_layers(self):
# self.ins_convs = nn.ModuleList()
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
self.cate_convs = nn.ModuleList()
self.kernel_convs = nn.ModuleList()
for i in range(self.stacked_convs):
if self.use_dcn_in_tower:
cfg_conv = dict(type=self.type_dcn)
else:
cfg_conv = self.conv_cfg
chn = self.in_channels + 2 if i == 0 else self.seg_feat_channels
self.kernel_convs.append(
ConvModule(
chn,
self.seg_feat_channels,
3,
stride=1,
padding=1,
conv_cfg=cfg_conv,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
chn = self.in_channels if i == 0 else self.seg_feat_channels
self.cate_convs.append(
ConvModule(
chn,
self.seg_feat_channels,
3,
stride=1,
padding=1,
conv_cfg=cfg_conv,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
# self.solo_ins_list = nn.ModuleList()
# for seg_num_grid in self.seg_num_grids:
# self.solo_ins_list.append(
# nn.Conv2d(
# self.seg_feat_channels, seg_num_grid**2, 1))
self.solo_cate = nn.Conv2d(
self.seg_feat_channels, self.cate_out_channels, 3, padding=1)
self.solo_kernel = nn.Conv2d(
self.seg_feat_channels, self.kernel_out_channels, 3, padding=1)
def init_weights(self):
# for m in self.ins_convs:
# normal_init(m.conv, std=0.01)
for m in self.cate_convs:
normal_init(m.conv, std=0.01)
for m in self.kernel_convs:
normal_init(m.conv, std=0.01)
# bias_ins = bias_init_with_prob(0.01)
# for m in self.solo_ins_list:
# normal_init(m, std=0.01, bias=bias_ins)
bias_cate = bias_init_with_prob(0.01)
normal_init(self.solo_cate, std=0.01, bias=bias_cate)
normal_init(self.solo_kernel, std=0.01)
def forward(self, feats, eval=False):
new_feats = self.split_feats(feats)
featmap_sizes = [featmap.size()[-2:] for featmap in new_feats]
upsampled_size = (featmap_sizes[0][0] * 2, featmap_sizes[0][1] * 2)
cate_pred, kernel_pred = multi_apply(
self.forward_single,
new_feats,
list(range(len(self.seg_num_grids))),
eval=eval,
upsampled_size=upsampled_size)
return cate_pred, kernel_pred
# def split_feats(self, feats):
# return (F.interpolate(
# feats[0], scale_factor=0.5, mode='bilinear',
# align_corners=False), feats[1], feats[2], feats[3],
# F.interpolate(
# feats[4],
# size=feats[3].shape[-2:],
# mode='bilinear',
# align_corners=False))
def split_feats(self, feats):
return (F.interpolate(feats[0], scale_factor=2, mode='bilinear', align_corners=False),
feats[1],
feats[2],
feats[3],
F.interpolate(feats[4], size=feats[3].shape[-2:], mode='bilinear', align_corners=False))
def forward_single(self, x, idx, eval=False, upsampled_size=None):
ins_kernel_feat = x
# ins branch
# concat coord
x_range = torch.linspace(
-1, 1, ins_kernel_feat.shape[-1], device=ins_kernel_feat.device)
y_range = torch.linspace(
-1, 1, ins_kernel_feat.shape[-2], device=ins_kernel_feat.device)
y, x = torch.meshgrid(y_range, x_range)
y = y.expand([ins_kernel_feat.shape[0], 1, -1, -1])
x = x.expand([ins_kernel_feat.shape[0], 1, -1, -1])
coord_feat = torch.cat([x, y], 1)
ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1)
# kernel branch
kernel_feat = ins_kernel_feat
seg_num_grid = self.seg_num_grids[idx]
kernel_feat = F.interpolate(
kernel_feat,
size=seg_num_grid,
mode='bilinear',
align_corners=False)
cate_feat = kernel_feat[:, :-2, :, :]
kernel_feat = kernel_feat.contiguous()
for i, kernel_layer in enumerate(self.kernel_convs):
kernel_feat = kernel_layer(kernel_feat)
kernel_pred = self.solo_kernel(kernel_feat)
# cate branch
cate_feat = cate_feat.contiguous()
for i, cate_layer in enumerate(self.cate_convs):
cate_feat = cate_layer(cate_feat)
cate_pred = self.solo_cate(cate_feat)
if eval:
cate_pred = points_nms(
cate_pred.sigmoid(), kernel=2).permute(0, 2, 3, 1)
return cate_pred, kernel_pred
def loss(self,
cate_preds,
kernel_preds,
ins_pred,
gt_bbox_list,
gt_label_list,
gt_mask_list,
img_metas,
cfg,
gt_bboxes_ignore=None):
mask_feat_size = ins_pred.size()[-2:]
ins_label_list, cate_label_list, ins_ind_label_list, \
grid_order_list = multi_apply(self.solov2_target_single,
gt_bbox_list, gt_label_list,
gt_mask_list,
mask_feat_size=mask_feat_size)
# ins
ins_labels = [
torch.cat([
ins_labels_level_img
for ins_labels_level_img in ins_labels_level
], 0) for ins_labels_level in zip(*ins_label_list)
]
kernel_preds = [[
kernel_preds_level_img.view(kernel_preds_level_img.shape[0],
-1)[:, grid_orders_level_img]
for kernel_preds_level_img, grid_orders_level_img in zip(
kernel_preds_level, grid_orders_level)
] for kernel_preds_level, grid_orders_level in zip(
kernel_preds, zip(*grid_order_list))]
# generate masks
ins_pred = ins_pred
ins_pred_list = []
for b_kernel_pred in kernel_preds:
b_mask_pred = []
for idx, kernel_pred in enumerate(b_kernel_pred):
if kernel_pred.size()[-1] == 0:
continue
cur_ins_pred = ins_pred[idx, ...]
h, w = cur_ins_pred.shape[-2:]
n, i = kernel_pred.shape
cur_ins_pred = cur_ins_pred.unsqueeze(0)
kernel_pred = kernel_pred.permute(1, 0).view(i, -1, 1, 1)
cur_ins_pred = F.conv2d(
cur_ins_pred, kernel_pred, stride=1).view(-1, h, w)
b_mask_pred.append(cur_ins_pred)
if len(b_mask_pred) == 0:
b_mask_pred = None
else:
b_mask_pred = torch.cat(b_mask_pred, 0)
ins_pred_list.append(b_mask_pred)
ins_ind_labels = [
torch.cat([
ins_ind_labels_level_img.flatten()
for ins_ind_labels_level_img in ins_ind_labels_level
]) for ins_ind_labels_level in zip(*ins_ind_label_list)
]
flatten_ins_ind_labels = torch.cat(ins_ind_labels)
num_ins = flatten_ins_ind_labels.sum()
# dice loss
# loss_ins = []
# for input, target in zip(ins_pred_list, ins_labels):
# if input is None:
# continue
# input = torch.sigmoid(input)
# loss_ins.append(dice_loss(input, target))
# loss_ins = torch.cat(loss_ins).mean()
# loss_ins = loss_ins * self.ins_loss_weight
# cate
cate_labels = [
torch.cat([
cate_labels_level_img.flatten()
for cate_labels_level_img in cate_labels_level
]) for cate_labels_level in zip(*cate_label_list)
]
flatten_cate_labels = torch.cat(cate_labels)
cate_preds = [
cate_pred.permute(0, 2, 3, 1).reshape(-1, self.cate_out_channels)
for cate_pred in cate_preds
]
flatten_cate_preds = torch.cat(cate_preds)
loss_cls = self.loss_cls(
flatten_cate_preds, flatten_cate_labels, avg_factor=num_ins + 1)
# dice loss
loss_mask = []
for input, target in zip(ins_pred_list, ins_labels):
if input is None:
continue
input = torch.sigmoid(input)
loss_mask.append(dice_loss(input, target))
loss_mask = torch.cat(loss_mask).mean()
loss_mask = loss_mask * self.ins_loss_weight
return dict(loss_mask=loss_mask, loss_cls=loss_cls)
def solov2_target_single(self, gt_bboxes_raw, gt_labels_raw, gt_masks_raw,
mask_feat_size):
device = gt_labels_raw[0].device
# ins
gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *
(gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
ins_label_list = []
cate_label_list = []
ins_ind_label_list = []
grid_order_list = []
for (lower_bound, upper_bound), stride, num_grid \
in zip(self.scale_ranges, self.strides, self.seg_num_grids):
hit_indices = ((gt_areas >= lower_bound) &
(gt_areas <= upper_bound)).nonzero(
as_tuple=False).flatten()
num_ins = len(hit_indices)
ins_label = []
grid_order = []
cate_label = torch.zeros([num_grid, num_grid],
dtype=torch.int64,
device=device) + self.num_classes
ins_ind_label = torch.zeros([num_grid**2],
dtype=torch.bool,
device=device)
if num_ins == 0:
ins_label = torch.zeros(
[0, mask_feat_size[0], mask_feat_size[1]],
dtype=torch.uint8,
device=device)
ins_label_list.append(ins_label)
cate_label_list.append(cate_label)
ins_ind_label_list.append(ins_ind_label)
grid_order_list.append([])
continue
gt_bboxes = gt_bboxes_raw[hit_indices]
gt_labels = gt_labels_raw[hit_indices]
gt_masks = gt_masks_raw[hit_indices.cpu().numpy(), ...]
half_ws = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * self.sigma
half_hs = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) * self.sigma
# mass center
# gt_masks_pt = torch.from_numpy(gt_masks).to(device=device)
# center_ws, center_hs = center_of_mass(gt_masks_pt)
# valid_mask_flags = gt_masks_pt.sum(dim=-1).sum(dim=-1) > 0
output_stride = 4
for seg_mask, gt_label, half_h, half_w in zip(
gt_masks, gt_labels, half_hs, half_ws):
if seg_mask.sum() < 10:
continue
upsampled_size = (mask_feat_size[0] * 4, mask_feat_size[1] * 4)
center_h, center_w = ndimage.measurements.center_of_mass(
seg_mask)
coord_w = int(
(center_w / upsampled_size[1]) // (1. / num_grid))
coord_h = int(
(center_h / upsampled_size[0]) // (1. / num_grid))
# left, top, right, down
top_box = max(
0,
int(((center_h - half_h) / upsampled_size[0]) //
(1. / num_grid)))
down_box = min(
num_grid - 1,
int(((center_h + half_h) / upsampled_size[0]) //
(1. / num_grid)))
left_box = max(
0,
int(((center_w - half_w) / upsampled_size[1]) //
(1. / num_grid)))
right_box = min(
num_grid - 1,
int(((center_w + half_w) / upsampled_size[1]) //
(1. / num_grid)))
top = max(top_box, coord_h - 1)
down = min(down_box, coord_h + 1)
left = max(coord_w - 1, left_box)
right = min(right_box, coord_w + 1)
cate_label[top:(down + 1), left:(right + 1)] = gt_label
seg_mask = mmcv.imrescale(seg_mask, scale=1. / output_stride)
seg_mask = torch.Tensor(seg_mask)
for i in range(top, down + 1):
for j in range(left, right + 1):
label = int(i * num_grid + j)
cur_ins_label = torch.zeros(
[mask_feat_size[0], mask_feat_size[1]],
dtype=torch.uint8,
device=device)
cur_ins_label[:seg_mask.shape[0], :seg_mask.
shape[1]] = seg_mask
ins_label.append(cur_ins_label)
ins_ind_label[label] = True
grid_order.append(label)
if len(ins_label) == 0:
ins_label = torch.zeros(
[0, mask_feat_size[0], mask_feat_size[1]],
dtype=torch.uint8,
device=device)
else:
ins_label = torch.stack(ins_label, 0)
ins_label_list.append(ins_label)
cate_label_list.append(cate_label)
ins_ind_label_list.append(ins_ind_label)
grid_order_list.append(grid_order)
return ins_label_list, cate_label_list, ins_ind_label_list, \
grid_order_list
def get_seg(self,
cate_preds,
kernel_preds,
seg_pred,
img_metas,
cfg,
rescale=None):
num_levels = len(cate_preds)
featmap_size = seg_pred.size()[-2:]
result_list = []
bbox_result_list = []
segm_result_list = []
for img_id in range(len(img_metas)):
cate_pred_list = [
cate_preds[i][img_id].view(-1,
self.cate_out_channels).detach()
for i in range(num_levels)
]
seg_pred_list = seg_pred[img_id, ...].unsqueeze(0)
kernel_pred_list = [
kernel_preds[i][img_id].permute(1, 2, 0).view(
-1, self.kernel_out_channels).detach()
for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
ori_shape = img_metas[img_id]['ori_shape']
cate_pred_list = torch.cat(cate_pred_list, dim=0)
kernel_pred_list = torch.cat(kernel_pred_list, dim=0)
result = self.get_seg_single(cate_pred_list, seg_pred_list,
kernel_pred_list, featmap_size,
img_shape, ori_shape, scale_factor,
cfg, rescale)
bbox_result, segm_result = self.segm2result(result)
bbox_result_list.append(bbox_result)
segm_result_list.append(segm_result)
result_list.append(result)
# # Note WES: use this implementation for inference, e.g. wes_python_demo.py
return bbox_result_list, segm_result_list
# # Note WES: use this implementation for test_ins_vis.py
#return result_list #bbox_result_list,
# def get_seg(self, seg_preds, cate_preds, img_metas, cfg, rescale=None):
# assert len(seg_preds) == len(cate_preds)
# num_levels = len(cate_preds)
# featmap_size = seg_preds[0].size()[-2:]
#
# result_list = []
# for img_id in range(len(img_metas)):
# cate_pred_list = [
# cate_preds[i][img_id].view(-1, self.cate_out_channels).
# detach() for i in range(num_levels)
# ]
# seg_pred_list = [
# seg_preds[i][img_id].detach() for i in range(num_levels)
# ]
# img_shape = img_metas[img_id]['img_shape']
# scale_factor = img_metas[img_id]['scale_factor']
# ori_shape = img_metas[img_id]['ori_shape']
#
# cate_pred_list = torch.cat(cate_pred_list, dim=0)
# seg_pred_list = torch.cat(seg_pred_list, dim=0)
#
# result = self.get_seg_single(cate_pred_list, seg_pred_list,
# featmap_size, img_shape,
# ori_shape, scale_factor, cfg,
# rescale)
# result_list.append(result)
# return result_list
def segm2result(self, result):
if result is None:
bbox_result = [
np.zeros((0, 5), dtype=np.float32)
for i in range(self.num_classes)
]
# BG is not included in num_classes
segm_result = [[] for _ in range(self.num_classes)]
else:
bbox_result = [
np.zeros((0, 5), dtype=np.float32)
for i in range(self.num_classes)
]
segm_result = [[] for _ in range(self.num_classes)]
seg_pred = result[0].cpu().numpy()
cate_label = result[1].cpu().numpy()
cate_score = result[2].cpu().detach().numpy()
# cate_score = result[2].cpu().numpy()
# tensor.detach().numpy()
num_ins = seg_pred.shape[0]
# fake bboxes
bboxes = np.zeros((num_ins, 5), dtype=np.float32)
bboxes[:, -1] = cate_score
bbox_result = [
bboxes[cate_label == i, :] for i in range(self.num_classes)
]
for idx in range(num_ins):
segm_result[cate_label[idx]].append(seg_pred[idx])
return bbox_result, segm_result
def get_seg_single(self,
cate_preds,
seg_preds,
kernel_preds,
featmap_size,
img_shape,
ori_shape,
scale_factor,
cfg,
rescale=False,
debug=False):
cfg = self.test_cfg if cfg is None else cfg
assert len(cate_preds) == len(kernel_preds)
# overall info.
h, w, _ = img_shape
upsampled_size_out = (featmap_size[0] * 4, featmap_size[1] * 4)
# process.
inds = (cate_preds > cfg.score_thr)
cate_scores = cate_preds[inds]
if len(cate_scores) == 0:
return None
# cate_labels & kernel_preds
inds = inds.nonzero(as_tuple=False)
cate_labels = inds[:, 1]
kernel_preds = kernel_preds[inds[:, 0]]
# trans vector.
size_trans = cate_labels.new_tensor(
self.seg_num_grids).pow(2).cumsum(0)
strides = kernel_preds.new_ones(size_trans[-1])
n_stage = len(self.seg_num_grids)
strides[:size_trans[0]] *= self.strides[0]
for ind_ in range(1, n_stage):
strides[size_trans[ind_ -
1]:size_trans[ind_]] *= self.strides[ind_]
strides = strides[inds[:, 0]]
# mask encoding.
I, N = kernel_preds.shape
kernel_preds = kernel_preds.view(I, N, 1, 1)
seg_preds = F.conv2d(
seg_preds, kernel_preds, stride=1).squeeze(0).sigmoid()
# mask.
seg_masks = seg_preds > cfg.mask_thr
sum_masks = seg_masks.sum((1, 2)).float()
# filter.
keep = sum_masks > strides
if keep.sum() == 0:
return None
seg_masks = seg_masks[keep, ...]
seg_preds = seg_preds[keep, ...]
sum_masks = sum_masks[keep]
cate_scores = cate_scores[keep]
cate_labels = cate_labels[keep]
# mask scoring.
seg_scores = (seg_preds * seg_masks.float()).sum((1, 2)) / sum_masks
cate_scores *= seg_scores
# sort and keep top nms_pre
sort_inds = torch.argsort(cate_scores, descending=True)
if len(sort_inds) > cfg.nms_pre:
sort_inds = sort_inds[:cfg.nms_pre]
seg_masks = seg_masks[sort_inds, :, :]
seg_preds = seg_preds[sort_inds, :, :]
sum_masks = sum_masks[sort_inds]
cate_scores = cate_scores[sort_inds]
cate_labels = cate_labels[sort_inds]
# Matrix NMS
cate_scores = matrix_nms(
seg_masks,
cate_labels,
cate_scores,
kernel=cfg.kernel,
sigma=cfg.sigma,
sum_masks=sum_masks)
# filter.
keep = cate_scores >= cfg.update_thr
if keep.sum() == 0:
return None
seg_preds = seg_preds[keep, :, :]
cate_scores = cate_scores[keep]
cate_labels = cate_labels[keep]
# sort and keep top_k
sort_inds = torch.argsort(cate_scores, descending=True)
if len(sort_inds) > cfg.max_per_img:
sort_inds = sort_inds[:cfg.max_per_img]
seg_preds = seg_preds[sort_inds, :, :]
cate_scores = cate_scores[sort_inds]
cate_labels = cate_labels[sort_inds]
seg_preds = F.interpolate(
seg_preds.unsqueeze(0),
size=upsampled_size_out,
mode='bilinear',
align_corners=False)[:, :, :h, :w]
seg_masks = F.interpolate(
seg_preds,
size=ori_shape[:2],
mode='bilinear',
align_corners=False).squeeze(0)
seg_masks = seg_masks > cfg.mask_thr
return seg_masks, cate_labels, cate_scores
| [
"torch.sqrt",
"torch.cat",
"torch.arange",
"mmdet.core.matrix_nms",
"mmcv.cnn.ConvModule",
"mmcv.cnn.normal_init",
"torch.Tensor",
"torch.nn.functional.max_pool2d",
"torch.zeros",
"mmcv.imrescale",
"mmdet.core.multi_apply",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.argsort",
"torc... | [((409, 472), 'torch.arange', 'torch.arange', (['(0)', 'h'], {'dtype': 'torch.float32', 'device': 'bitmasks.device'}), '(0, h, dtype=torch.float32, device=bitmasks.device)\n', (421, 472), False, 'import torch\n'), ((482, 545), 'torch.arange', 'torch.arange', (['(0)', 'w'], {'dtype': 'torch.float32', 'device': 'bitmasks.device'}), '(0, w, dtype=torch.float32, device=bitmasks.device)\n', (494, 545), False, 'import torch\n'), ((862, 931), 'torch.nn.functional.max_pool2d', 'nn.functional.max_pool2d', (['heat', '(kernel, kernel)'], {'stride': '(1)', 'padding': '(1)'}), '(heat, (kernel, kernel), stride=1, padding=1)\n', (886, 931), True, 'import torch.nn as nn\n'), ((1180, 1208), 'torch.sum', 'torch.sum', (['(input * target)', '(1)'], {}), '(input * target, 1)\n', (1189, 1208), False, 'import torch\n'), ((1217, 1244), 'torch.sum', 'torch.sum', (['(input * input)', '(1)'], {}), '(input * input, 1)\n', (1226, 1244), False, 'import torch\n'), ((1261, 1290), 'torch.sum', 'torch.sum', (['(target * target)', '(1)'], {}), '(target * target, 1)\n', (1270, 1290), False, 'import torch\n'), ((5979, 5994), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5992, 5994), True, 'import torch.nn as nn\n'), ((6023, 6038), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6036, 6038), True, 'import torch.nn as nn\n'), ((7341, 7412), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.seg_feat_channels', 'self.cate_out_channels', '(3)'], {'padding': '(1)'}), '(self.seg_feat_channels, self.cate_out_channels, 3, padding=1)\n', (7350, 7412), True, 'import torch.nn as nn\n'), ((7454, 7527), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.seg_feat_channels', 'self.kernel_out_channels', '(3)'], {'padding': '(1)'}), '(self.seg_feat_channels, self.kernel_out_channels, 3, padding=1)\n', (7463, 7527), True, 'import torch.nn as nn\n'), ((7962, 7987), 'mmcv.cnn.bias_init_with_prob', 'bias_init_with_prob', (['(0.01)'], {}), '(0.01)\n', (7981, 7987), False, 'from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n'), ((7996, 8049), 'mmcv.cnn.normal_init', 'normal_init', (['self.solo_cate'], {'std': '(0.01)', 'bias': 'bias_cate'}), '(self.solo_cate, std=0.01, bias=bias_cate)\n', (8007, 8049), False, 'from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n'), ((8058, 8097), 'mmcv.cnn.normal_init', 'normal_init', (['self.solo_kernel'], {'std': '(0.01)'}), '(self.solo_kernel, std=0.01)\n', (8069, 8097), False, 'from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n'), ((9452, 9531), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', 'ins_kernel_feat.shape[-1]'], {'device': 'ins_kernel_feat.device'}), '(-1, 1, ins_kernel_feat.shape[-1], device=ins_kernel_feat.device)\n', (9466, 9531), False, 'import torch\n'), ((9563, 9642), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', 'ins_kernel_feat.shape[-2]'], {'device': 'ins_kernel_feat.device'}), '(-1, 1, ins_kernel_feat.shape[-2], device=ins_kernel_feat.device)\n', (9577, 9642), False, 'import torch\n'), ((9671, 9703), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (9685, 9703), False, 'import torch\n'), ((9845, 9865), 'torch.cat', 'torch.cat', (['[x, y]', '(1)'], {}), '([x, y], 1)\n', (9854, 9865), False, 'import torch\n'), ((9892, 9935), 'torch.cat', 'torch.cat', (['[ins_kernel_feat, coord_feat]', '(1)'], {}), '([ins_kernel_feat, coord_feat], 1)\n', (9901, 9935), False, 'import torch\n'), ((10068, 10155), 'torch.nn.functional.interpolate', 'F.interpolate', (['kernel_feat'], {'size': 'seg_num_grid', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(kernel_feat, size=seg_num_grid, mode='bilinear',\n align_corners=False)\n", (10081, 10155), True, 'import torch.nn.functional as F\n'), ((11230, 11346), 'mmdet.core.multi_apply', 'multi_apply', (['self.solov2_target_single', 'gt_bbox_list', 'gt_label_list', 'gt_mask_list'], {'mask_feat_size': 'mask_feat_size'}), '(self.solov2_target_single, gt_bbox_list, gt_label_list,\n gt_mask_list, mask_feat_size=mask_feat_size)\n', (11241, 11346), False, 'from mmdet.core import matrix_nms, multi_apply\n'), ((13306, 13331), 'torch.cat', 'torch.cat', (['ins_ind_labels'], {}), '(ins_ind_labels)\n', (13315, 13331), False, 'import torch\n'), ((14020, 14042), 'torch.cat', 'torch.cat', (['cate_labels'], {}), '(cate_labels)\n', (14029, 14042), False, 'import torch\n'), ((14224, 14245), 'torch.cat', 'torch.cat', (['cate_preds'], {}), '(cate_preds)\n', (14233, 14245), False, 'import torch\n'), ((14977, 15083), 'torch.sqrt', 'torch.sqrt', (['((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, 3] -\n gt_bboxes_raw[:, 1]))'], {}), '((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, \n 3] - gt_bboxes_raw[:, 1]))\n', (14987, 15083), False, 'import torch\n'), ((26536, 26579), 'torch.argsort', 'torch.argsort', (['cate_scores'], {'descending': '(True)'}), '(cate_scores, descending=True)\n', (26549, 26579), False, 'import torch\n'), ((26938, 27047), 'mmdet.core.matrix_nms', 'matrix_nms', (['seg_masks', 'cate_labels', 'cate_scores'], {'kernel': 'cfg.kernel', 'sigma': 'cfg.sigma', 'sum_masks': 'sum_masks'}), '(seg_masks, cate_labels, cate_scores, kernel=cfg.kernel, sigma=\n cfg.sigma, sum_masks=sum_masks)\n', (26948, 27047), False, 'from mmdet.core import matrix_nms, multi_apply\n'), ((27405, 27448), 'torch.argsort', 'torch.argsort', (['cate_scores'], {'descending': '(True)'}), '(cate_scores, descending=True)\n', (27418, 27448), False, 'import torch\n'), ((7694, 7723), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (7705, 7723), False, 'from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n'), ((7772, 7801), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (7783, 7801), False, 'from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n'), ((9029, 9106), 'torch.nn.functional.interpolate', 'F.interpolate', (['feats[0]'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(feats[0], scale_factor=2, mode='bilinear', align_corners=False)\n", (9042, 9106), True, 'import torch.nn.functional as F\n'), ((9202, 9293), 'torch.nn.functional.interpolate', 'F.interpolate', (['feats[4]'], {'size': 'feats[3].shape[-2:]', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(feats[4], size=feats[3].shape[-2:], mode='bilinear',\n align_corners=False)\n", (9215, 9293), True, 'import torch.nn.functional as F\n'), ((11519, 11604), 'torch.cat', 'torch.cat', (['[ins_labels_level_img for ins_labels_level_img in ins_labels_level]', '(0)'], {}), '([ins_labels_level_img for ins_labels_level_img in\n ins_labels_level], 0)\n', (11528, 11604), False, 'import torch\n'), ((14538, 14558), 'torch.sigmoid', 'torch.sigmoid', (['input'], {}), '(input)\n', (14551, 14558), False, 'import torch\n'), ((15849, 15910), 'torch.zeros', 'torch.zeros', (['[num_grid ** 2]'], {'dtype': 'torch.bool', 'device': 'device'}), '([num_grid ** 2], dtype=torch.bool, device=device)\n', (15860, 15910), False, 'import torch\n'), ((21034, 21066), 'torch.cat', 'torch.cat', (['cate_pred_list'], {'dim': '(0)'}), '(cate_pred_list, dim=0)\n', (21043, 21066), False, 'import torch\n'), ((21098, 21132), 'torch.cat', 'torch.cat', (['kernel_pred_list'], {'dim': '(0)'}), '(kernel_pred_list, dim=0)\n', (21107, 21132), False, 'import torch\n'), ((23979, 24019), 'numpy.zeros', 'np.zeros', (['(num_ins, 5)'], {'dtype': 'np.float32'}), '((num_ins, 5), dtype=np.float32)\n', (23987, 24019), True, 'import numpy as np\n'), ((6363, 6502), 'mmcv.cnn.ConvModule', 'ConvModule', (['chn', 'self.seg_feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'conv_cfg': 'cfg_conv', 'norm_cfg': 'self.norm_cfg', 'bias': '(self.norm_cfg is None)'}), '(chn, self.seg_feat_channels, 3, stride=1, padding=1, conv_cfg=\n cfg_conv, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)\n', (6373, 6502), False, 'from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n'), ((6786, 6925), 'mmcv.cnn.ConvModule', 'ConvModule', (['chn', 'self.seg_feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'conv_cfg': 'cfg_conv', 'norm_cfg': 'self.norm_cfg', 'bias': '(self.norm_cfg is None)'}), '(chn, self.seg_feat_channels, 3, stride=1, padding=1, conv_cfg=\n cfg_conv, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)\n', (6796, 6925), False, 'from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n'), ((12951, 12976), 'torch.cat', 'torch.cat', (['b_mask_pred', '(0)'], {}), '(b_mask_pred, 0)\n', (12960, 12976), False, 'import torch\n'), ((14634, 14654), 'torch.cat', 'torch.cat', (['loss_mask'], {}), '(loss_mask)\n', (14643, 14654), False, 'import torch\n'), ((15660, 15727), 'torch.zeros', 'torch.zeros', (['[num_grid, num_grid]'], {'dtype': 'torch.int64', 'device': 'device'}), '([num_grid, num_grid], dtype=torch.int64, device=device)\n', (15671, 15727), False, 'import torch\n'), ((16047, 16139), 'torch.zeros', 'torch.zeros', (['[0, mask_feat_size[0], mask_feat_size[1]]'], {'dtype': 'torch.uint8', 'device': 'device'}), '([0, mask_feat_size[0], mask_feat_size[1]], dtype=torch.uint8,\n device=device)\n', (16058, 16139), False, 'import torch\n'), ((17321, 17366), 'scipy.ndimage.measurements.center_of_mass', 'ndimage.measurements.center_of_mass', (['seg_mask'], {}), '(seg_mask)\n', (17356, 17366), False, 'from scipy import ndimage\n'), ((18621, 18672), 'mmcv.imrescale', 'mmcv.imrescale', (['seg_mask'], {'scale': '(1.0 / output_stride)'}), '(seg_mask, scale=1.0 / output_stride)\n', (18635, 18672), False, 'import mmcv\n'), ((18699, 18721), 'torch.Tensor', 'torch.Tensor', (['seg_mask'], {}), '(seg_mask)\n', (18711, 18721), False, 'import torch\n'), ((19437, 19529), 'torch.zeros', 'torch.zeros', (['[0, mask_feat_size[0], mask_feat_size[1]]'], {'dtype': 'torch.uint8', 'device': 'device'}), '([0, mask_feat_size[0], mask_feat_size[1]], dtype=torch.uint8,\n device=device)\n', (19448, 19529), False, 'import torch\n'), ((19633, 19658), 'torch.stack', 'torch.stack', (['ins_label', '(0)'], {}), '(ins_label, 0)\n', (19644, 19658), False, 'import torch\n'), ((23219, 23253), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {'dtype': 'np.float32'}), '((0, 5), dtype=np.float32)\n', (23227, 23253), True, 'import numpy as np\n'), ((23487, 23521), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {'dtype': 'np.float32'}), '((0, 5), dtype=np.float32)\n', (23495, 23521), True, 'import numpy as np\n'), ((27888, 27975), 'torch.nn.functional.interpolate', 'F.interpolate', (['seg_preds'], {'size': 'ori_shape[:2]', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(seg_preds, size=ori_shape[:2], mode='bilinear', align_corners\n =False)\n", (27901, 27975), True, 'import torch.nn.functional as F\n'), ((12699, 12744), 'torch.nn.functional.conv2d', 'F.conv2d', (['cur_ins_pred', 'kernel_pred'], {'stride': '(1)'}), '(cur_ins_pred, kernel_pred, stride=1)\n', (12707, 12744), True, 'import torch.nn.functional as F\n'), ((18917, 19006), 'torch.zeros', 'torch.zeros', (['[mask_feat_size[0], mask_feat_size[1]]'], {'dtype': 'torch.uint8', 'device': 'device'}), '([mask_feat_size[0], mask_feat_size[1]], dtype=torch.uint8,\n device=device)\n', (18928, 19006), False, 'import torch\n'), ((25849, 25892), 'torch.nn.functional.conv2d', 'F.conv2d', (['seg_preds', 'kernel_preds'], {'stride': '(1)'}), '(seg_preds, kernel_preds, stride=1)\n', (25857, 25892), True, 'import torch.nn.functional as F\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of bag-related robotics tasks.
Bag type and radii at the start, with no perturbations on the spheres.
- bag 1, radius at very start: 0.1000 (as expected)
- bag 2, radius at very start: 0.0981
- bag 3, radius at very start: 0.0924
- bag 4, radius at very start: 0.0831
- bag 5, radius at very start: 0.0707
The gripping threshold, `self._def_threshold`, is slightly lower compared to
cloth tasks, because with bags that combine beads and vertices, it's normally
better (for physics purposes) to grip beads instead of vertices.
Reminder: add any soft body IDs to `self.def_ids`.
"""
import os
import time
import cv2
import numpy as np
import pybullet as p
from ravens import utils as U
from ravens.tasks import Task
BAGS_TO_FILES = {
1: 'assets/bags/bl_sphere_bag_rad_1.0_zthresh_0.1_numV_257.obj',
2: 'assets/bags/bl_sphere_bag_rad_1.0_zthresh_0.3_numV_289.obj',
3: 'assets/bags/bl_sphere_bag_rad_1.0_zthresh_0.4_numV_321.obj',
4: 'assets/bags/bl_sphere_bag_rad_1.0_zthresh_0.6_numV_353.obj',
5: 'assets/bags/bl_sphere_bag_rad_1.0_zthresh_0.8_numV_385.obj',
}
# An identity pose we can use to gracefully handle failure cases.
IDENTITY = {
'pose0': ((0.3, 0, 0.3), (0, 0, 0, 1)),
'pose1': ((0.3, 0, 0.3), (0, 0, 0, 1))
} # TODO(daniel) remove
BEAD_THRESH = 0.33 # TODO(daniel) make cleaner
class BagEnv(Task):
"""Superclass for tasks that use bags."""
def __init__(self):
super().__init__()
self.ee = 'suction'
self.primitive = 'pick_place'
self.max_steps = 11
self._settle_secs = 0
# Gripping parameters. Empirically, 0.020 works well.
self._def_threshold = 0.020
self._def_nb_anchors = 1
# Scale the bag / zone. The zone.obj ranges from (-20,20).
self._zone_scale = 0.0130
self._bag_scale = 0.10
self._zone_length = (20. * self._zone_scale)
self.zone_size = (20. * self._zone_scale, 20. * self._zone_scale, 0.0)
self._bag_size = (1. * self._bag_scale, 1. * self._bag_scale, 0.01)
# Bag type (or resolution?) and parameters.
self._bag = 4
self._mass = 1.0
self._scale = 0.25
self._collision_margin = 0.003
self._base_orn = [np.pi / 2.0, 0.0, 0.0]
self._f_bag = BAGS_TO_FILES[self._bag]
self._drop_height = 0.15
def add_zone(self, env):
"""Adds green square target zone, size based on zone_scale.
Similar to add_zone for the cloth tasks, except it's not necessary to
pre-assign zone poses (no goal-conditioning) or to find the zone mask.
Args:
env: A ravens environment.
"""
zone_template = 'assets/zone/zone-template.urdf'
replace = {'LENGTH': (self._zone_scale, self._zone_scale)}
zone_urdf = self.fill_template(zone_template, replace)
self.zone_pose = self.random_pose(env, self.zone_size)
# Add objects in a consistent manner.
zone_id = env.add_object(zone_urdf, self.zone_pose, fixed=True)
os.remove(zone_urdf)
# As in cloth tasks, assign zone to reference it later, for removal.
self.zone_id = zone_id
def add_cube(self, env, pose, global_scaling=1.0):
"""Andy's ravens/block's default size should be (0.04, 0.04, 0.04)."""
cube_id = p.loadURDF(
fileName='assets/block/block_for_anchors.urdf',
basePosition=pose[0],
baseOrientation=pose[1],
globalScaling=global_scaling,
useMaximalCoordinates=True)
# Add objects in a consistent manner.
self.object_points[cube_id] = np.float32((0, 0, 0)).reshape(3, 1)
env.objects.append(cube_id)
return cube_id
def add_random_box(self, env, max_total_dims):
"""Generate randomly shaped box, adapted from the aligning task.
Make rand_x and rand_y add up to the max_total. The aligning env uses
a box with mass 0.1, but this one can be lighter. Heavier boxes mean
the robot will not fully lift the bag off the ground.
Args:
env: Environment, used for the add_object convenience method.
max_total_dims: To control dimensions of boxes. Recommended to keep
this value at a level making these boxes comparable to the cubes,
if not smaller, used in bag-items-easy.
Returns:
Tuple with The PyBullet integer ID for the box, and the box size,
which is randomly drawn (in case we use it later).
"""
min_val = 0.015
assert min_val * 2 <= max_total_dims, min_val
rand_x = np.random.uniform(min_val, max_total_dims - min_val)
rand_y = max_total_dims - rand_x
box_size = (rand_x, rand_y, 0.03)
# Add box. See tasks/aligning.py.
box_template = 'assets/box/box-template.urdf'
box_urdf = self.fill_template(box_template, {'DIM': box_size})
box_pose = self.random_pose(env, box_size)
box_id = env.add_object(box_urdf, box_pose)
os.remove(box_urdf)
self.color_random_brown(box_id)
self.object_points[box_id] = np.float32(
(0, 0, 0)).reshape(3, 1) # TODO(daniel) remove?
return (box_id, box_size)
def add_cable_ring(self, env):
"""Make the cable beads coincide with the vertices of the top ring.
Should lead to better physics and will make it easy for an algorithm
to see the bag's top ring. Notable differences between this and the
cables: (1) we don't need to discretize rotations and manually
compute bead positions, because the previously created bag does it
for us, (2) Beads have anchors with vertices, in ADDITION to
constraints with adjacent beads.
Args:
env: A ravens environment.
"""
num_parts = len(self._top_ring_idxs)
radius = 0.005
color = U.COLORS['blue'] + [1]
beads = []
bead_positions_l = []
part_shape = p.createCollisionShape(p.GEOM_BOX, halfExtents=[radius] * 3)
part_visual = p.createVisualShape(p.GEOM_SPHERE, radius=radius * 1.5)
# Fortunately `verts_l` coincides with `self._top_ring_idxs`.
_, verts_l = p.getMeshData(self.bag_id)
# Iterate through parts and create constraints as needed.
for i in range(num_parts):
bag_vidx = self._top_ring_idxs[i]
bead_position = np.float32(verts_l[bag_vidx])
part_id = p.createMultiBody(
0.01, part_shape, part_visual, basePosition=bead_position)
p.changeVisualShape(part_id, -1, rgbaColor=color)
if i > 0:
parent_frame = bead_position - bead_positions_l[-1]
constraint_id = p.createConstraint(
parentBodyUniqueId=beads[-1],
parentLinkIndex=-1,
childBodyUniqueId=part_id,
childLinkIndex=-1,
jointType=p.JOINT_POINT2POINT,
jointAxis=(0, 0, 0),
parentFramePosition=parent_frame,
childFramePosition=(0, 0, 0))
p.changeConstraint(constraint_id, maxForce=100)
# Make a constraint with i=0. Careful with `parent_frame`!
if i == num_parts - 1:
parent_frame = bead_positions_l[0] - bead_position
constraint_id = p.createConstraint(
parentBodyUniqueId=part_id,
parentLinkIndex=-1,
childBodyUniqueId=beads[0],
childLinkIndex=-1,
jointType=p.JOINT_POINT2POINT,
jointAxis=(0, 0, 0),
parentFramePosition=parent_frame,
childFramePosition=(0, 0, 0))
p.changeConstraint(constraint_id, maxForce=100)
# Create constraint between a bead and certain bag vertices.
_ = p.createSoftBodyAnchor(
softBodyBodyUniqueId=self.bag_id,
nodeIndex=bag_vidx,
bodyUniqueId=part_id,
linkIndex=-1)
# Track beads.
beads.append(part_id)
bead_positions_l.append(bead_position)
# Add objects in a consistent manner.
self.cable_bead_ids.append(part_id)
env.objects.append(part_id)
self.object_points[part_id] = np.float32((0, 0, 0)).reshape(3, 1)
def add_bag(self, env, base_pos, base_orn): # pylint: disable=g-doc-args
"""Adding a bag from an .obj file.
Since this is a soft body, need to add this ID to `self.def_ids.
Returns:
bullet object id for the bag.
"""
bag_id = p.loadSoftBody(
fileName=self._f_bag,
basePosition=base_pos,
baseOrientation=base_orn,
collisionMargin=self._collision_margin,
scale=self._bag_scale,
mass=self._mass,
useNeoHookean=0,
useBendingSprings=1,
useMassSpring=1,
springElasticStiffness=40,
springDampingStiffness=0.1,
springDampingAllDirections=0,
useSelfCollision=1,
frictionCoeff=1.0,
useFaceContact=1)
# Add objects in a consistent manner.
self.object_points[bag_id] = np.float32((0, 0, 0)).reshape(3, 1)
env.objects.append(bag_id)
self.def_ids.append(bag_id)
return bag_id
def _sample_bag_orientation(self):
"""Sample the bag (and let it drop) to get interesting starting states."""
orn = [
self._base_orn[0] + np.random.normal(loc=0.0, scale=self._scale),
self._base_orn[1] + np.random.normal(loc=0.0, scale=self._scale),
self._base_orn[2] + np.random.normal(loc=0.0, scale=self._scale),
]
return p.getQuaternionFromEuler(orn)
@property
def circle_area(self):
return self._circle_area
@property
def area_thresh(self):
"""Testing with bag-alone-open, similar to cable-ring, slightly lower?"""
return 0.70
@property
def circle_target_positions(self):
return self._target_positions
@property
def circle_target_center(self):
return self._circle_center
@property
def top_ring_idxs(self):
return self._top_ring_idxs
@property
def def_threshold(self):
return self._def_threshold
@property
def def_nb_anchors(self):
return self._def_nb_anchors
def understand_bag_top_ring(self, env, base_pos): # pylint: disable=g-doc-args
"""By our circular bag design, there exists a top ring file.
Reading it gives us several important pieces of information. We assign
to:
_top_ring_idxs: indices of the vertices (out of entire bag).
_top_ring_posi: their starting xyz positions (BEFORE simulation
or applying pose transformations). This way we can get the
area of the circle. We can't take the rotated bag and map
vertices to the xy plane, because any rotation will make the
area artificially smaller.
The .txt file saves in (x,y,z) order but the .obj files put z second.
Make sure vertex indices are MONOTONICALLY INCREASING since I use
that assumption to 'assign' vertex indices in order to targets.
Input: base_pos, the center of the bag's sphere.
"""
self._top_ring_f = (self._f_bag).replace('.obj', '_top_ring.txt')
self._top_ring_f = os.path.join('ravens', self._top_ring_f)
self._top_ring_idxs = [] # is this the same as p.getMeshData?
self._top_ring_posi = [] # for raw, non-scaled bag
with open(self._top_ring_f, 'r') as fh:
for line in fh:
ls = (line.rstrip()).split()
vidx = int(ls[0])
vx, vy, vz = float(ls[1]), float(ls[2]), float(ls[3])
if len(self._top_ring_idxs) >= 1:
assert vidx > self._top_ring_idxs[-1], \
f'Wrong: {vidx} vs {self._top_ring_idxs}'
self._top_ring_idxs.append(vidx)
self._top_ring_posi.append((vx, vy, vz))
# Next, define a target zone. This makes a bunch of plus signs in a
# circular fashion from the xy projection of the ring.
self._target_positions = []
for item in self._top_ring_posi:
sx, sy, _ = item
sx = sx * self._bag_scale + base_pos[0]
sy = sy * self._bag_scale + base_pos[1]
self._target_positions.append((sx, sy, 0))
if self._targets_visible:
square_pose = ((sx, sy, 0.001), (0, 0, 0, 1))
square_template = 'assets/square/square-template-allsides-green.urdf'
replace = {'DIM': (0.004,), 'HALF': (0.004 / 2,)}
urdf = self.fill_template(square_template, replace)
env.add_object(urdf, square_pose, fixed=True)
os.remove(urdf)
# Fit a circle and print some statistics, can be used by demonstrator.
# We should be careful to consider nonplanar cases, etc.
xc, yc, rad, _ = U.fit_circle(
self._top_ring_posi, self._bag_scale, debug=False)
self._circle_area = np.pi * (rad**2)
self._circle_center = (xc * self._bag_scale + base_pos[0],
yc * self._bag_scale + base_pos[1])
def _apply_small_force(self, num_iters, fx=10, fy=10, fz=8, debug=False):
"""A small force to perturb the starting bag."""
bead_idx = np.random.randint(len(self.cable_bead_ids))
bead_id = self.cable_bead_ids[bead_idx]
fx = np.random.randint(low=-fx, high=fx + 1)
fy = np.random.randint(low=-fy, high=fy + 1)
for _ in range(num_iters):
p.applyExternalForce(
bead_id,
linkIndex=-1,
forceObj=[fx, fy, fz],
posObj=[0, 0, 0],
flags=p.LINK_FRAME)
if debug:
print(f'Perturbing {bead_id}: [{fx:0.2f}, {fy:0.2f}, {fz:0.2f}]')
class BagAloneOpen(BagEnv):
"""Given a single perturbed bag, the objective is to open it.
This task is similar to cable-ring wrt bead opening. The zone is created
and then deleted, so that the bag color is the same as in
bag-items-{easy,hard}, at least for PyBullet 2.8.4.
"""
def __init__(self):
super().__init__()
self.max_steps = 11
self.metric = 'bag-alone-open'
self._name = 'bag-alone-open'
self._settle_secs = 5
self._targets_visible = False
# Make the scale small as it's just to make the bag a certain color.
self._zone_scale = 0.004
# Higher means more forces applied to the bag.
self.num_force_iters = 12
# Parameters for pick_place primitive. Setting prepick_z to be <0.3
# because it takes a while to check against gripping deformables.
self.primitive_params = {
1: {
'speed': 0.003,
'delta_z': -0.001,
'prepick_z': 0.10,
'postpick_z': 0.05,
'preplace_z': 0.05,
'pause_place': 0.5,
},
}
self.task_stage = 1
def reset(self, env):
self.total_rewards = 0
self.object_points = {}
self.t = 0
self.task_stage = 1
self.cable_bead_ids = []
self.def_ids = []
# Add square target zone only to get the bag a certain color.
self.add_zone(env)
# Pose of the bag, sample mid-air to let it drop naturally.
bpos, _ = self.random_pose(env, self._bag_size)
self.base_pos = [bpos[0], bpos[1], self._drop_height]
self.base_orn = self._sample_bag_orientation()
# Add the bag, load info about top ring, and make a cable.
self.bag_id = self.add_bag(env, self.base_pos, self.base_orn)
self.understand_bag_top_ring(env, self.base_pos)
self.add_cable_ring(env)
# Env must begin before we can apply forces to perturb the bag.
env.start()
self._apply_small_force(num_iters=self.num_force_iters)
# Remove the zone ID -- only had this to make the bag color the same.
p.removeBody(self.zone_id)
time.sleep(self._settle_secs)
env.pause()
class BagItemsEasy(BagEnv):
"""Like BagAlone except we add other stuff.
Right now I'm trying to make the demonstrator follow one of three stages,
where the stages are bag opening, item insertion, and bag moving. For a
consistant API among other 'bag-items' environments, please put all items
to be inserted in `self.item_ids[]` and use `self.items_in_bag_ids` to
track those IDs which are already inserted (or at least, which the
demonstrator thinks is inserted).
"""
def __init__(self):
super().__init__()
self.max_steps = 11
self.metric = 'bag-items'
self._name = 'bag-items-easy'
self._settle_secs = 5
self._targets_visible = False
# Can make this smaller compared to bag-items-alone.
self.num_force_iters = 8
# Extra items, in addition to the bag.
self._nb_items = 1
# Env reference so we can call Task.get_object_masks(env)
self.env = None
# Parameters for pick_place primitive, which is task dependent.
# stage 1: bag opening. [Copying params from bag-alone-open]
# stage 2: item insertion.
# stage 3: bag pulling.
self.primitive_params = {
1: {
'speed': 0.003,
'delta_z': -0.001,
'prepick_z': 0.10,
'postpick_z': 0.05,
'preplace_z': 0.05,
'pause_place': 0.5,
},
2: {
'speed': 0.010,
'delta_z': -0.001,
'prepick_z': 0.10, # hopefully makes it faster
'postpick_z': 0.30,
'preplace_z': 0.30,
'pause_place': 0.0,
},
3: {
'speed': 0.002, # Will this slow bag movement?
'delta_z': -0.001,
'prepick_z': 0.08, # hopefully makes it faster
'postpick_z': 0.40,
'preplace_z': 0.40,
'pause_place': 2.0,
},
}
self.task_stage = 1
def reset(self, env):
self.total_rewards = 0
self.object_points = {}
self.t = 0
self.task_stage = 1
self.cable_bead_ids = []
self.def_ids = []
self.env = env
# New stuff versus bag-alone-open, to better track stats.
self.item_ids = []
self.items_in_bag_ids = []
self.item_sizes = []
# Add square target zone.
self.add_zone(env)
# Pose of the bag, sample mid-air to let it drop naturally.
bpos, _ = self.random_pose(env, self._bag_size)
self.base_pos = [bpos[0], bpos[1], self._drop_height]
self.base_orn = self._sample_bag_orientation()
# Add the bag, load info about top ring, and make a cable.
self.bag_id = self.add_bag(env, self.base_pos, self.base_orn)
self.understand_bag_top_ring(env, self.base_pos)
self.add_cable_ring(env)
# Add cube(s). The size is straight from the urdf.
item_size = (0.04, 0.04, 0.04)
for _ in range(self._nb_items):
item_pose = self.random_pose(env, item_size)
item_id = self.add_cube(env, pose=item_pose, global_scaling=1.0)
self.item_ids.append(item_id)
self.item_sizes.append(item_size)
# Env must begin before we can apply forces to perturb the bag.
env.start()
self._apply_small_force(num_iters=self.num_force_iters)
time.sleep(self._settle_secs)
env.pause()
# TODO(daniel) clean up method?
def determine_task_stage(self,
colormap=None,
heightmap=None,
object_mask=None,
visible_beads=None): # pylint: disable=g-doc-args
"""Get the task stage in a consistent manner among different policies.
When training an oracle policy, we can determine the training stage,
which is critical because of this task's particular quirks in
requiring different action parameters (particularly height of the
pull) for each stage. One option is to use this method to determine
the hard-coded task stage for each task. This does depend on the
learned policy inferring when to switch among task stages?
Returns:
Tuple, first item is False if the task is almost certainly going
to fail, and the second provides valid placing pixels (locations)
if it's relevant to the task stage.
"""
if self.task_stage == 2 and (len(self.items_in_bag_ids) == len(
self.item_ids)):
self.task_stage = 3
return (True, None)
elif self.task_stage == 3:
return (True, None)
# Hand-tuned, seems reasonable to use.
buf = 0.025
# Check object_mask for all IDs that correspond to the cable ring.
cable_ids = np.array(self.cable_bead_ids)
bead_mask = np.isin(object_mask, test_elements=cable_ids)
# Threshold image to get 0s and 255s (255s=bead pixels) and find its
# contours.
bead_mask = np.uint8(bead_mask * 255)
contours, _ = cv2.findContours(bead_mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# If only a few beads are visible (or no contours detected) exit early.
frac_visible = len(visible_beads) / len(self.cable_bead_ids)
if not contours or frac_visible <= BEAD_THRESH:
return (False, None)
# Combine contours via concatenation (shape=(N,1,2)) and get the convex
# hull.
allc = np.concatenate(list(contours))
contours_list = [allc]
hull_list = [cv2.convexHull(c) for c in contours_list]
# Make an RGB image, then draw the filled-in area of all items in
# `hull_list`.
hull = np.zeros((bead_mask.shape[0], bead_mask.shape[1], 3), dtype=np.uint8)
cv2.drawContours(hull, hull_list, -1, (255, 255, 255), thickness=-1)
hull = cv2.cvtColor(hull, cv2.COLOR_BGR2GRAY)
# Following task.random_pose, use object_size to find placing points.
# Assumes object sizes are same. We add a buffer since convex hulls inflate
# area.
object_size = self.item_sizes[0]
object_size = (object_size[0] + buf, object_size[1] + buf, object_size[2])
max_size = np.sqrt(object_size[0]**2 + object_size[1]**2)
erode_size = int(np.round(max_size / self.pixel_size))
# Use cv2.erode to find place pixels on the hull, converted to grayscale.
place_pixels = np.uint8(hull == 255)
kernel = np.ones((erode_size, erode_size), np.uint8)
place_pixels_eroded = cv2.erode(place_pixels, kernel)
# If on stage 1, and there exists any possible placing point, go to stage 2.
if self.task_stage == 1:
if np.sum(place_pixels_eroded) > 0:
self.task_stage = 2
return (True, place_pixels_eroded)
class BagItemsHard(BagEnv):
"""The harder version of BagItemsEasy, where items are randomized."""
def __init__(self):
super().__init__()
self.max_steps = 11
self.metric = 'bag-items'
self._name = 'bag-items-hard'
self._settle_secs = 5
self._targets_visible = False
# Could make this smaller compared to bag-alone-open?
self.num_force_iters = 8
# Extra items, in addition to the bag.
self._nb_items = 2
self._max_total_dims = 0.08
# Env reference so we can call Task.get_object_masks(env)
self.env = None
# Exactly the same as BagItemsEasy.
self.primitive_params = {
1: {
'speed': 0.003,
'delta_z': -0.001,
'prepick_z': 0.10,
'postpick_z': 0.05,
'preplace_z': 0.05,
'pause_place': 0.5,
},
2: {
'speed': 0.010,
'delta_z': -0.001,
'prepick_z': 0.10, # hopefully makes it faster
'postpick_z': 0.30,
'preplace_z': 0.30,
'pause_place': 0.0,
},
3: {
'speed': 0.002, # Will this slow bag movement?
'delta_z': -0.001,
'prepick_z': 0.08, # hopefully makes it faster
'postpick_z': 0.40,
'preplace_z': 0.40,
'pause_place': 2.0,
},
}
self.task_stage = 1
def reset(self, env):
self.total_rewards = 0
self.object_points = {}
self.t = 0
self.task_stage = 1
self.cable_bead_ids = []
self.def_ids = []
self.env = env
# New stuff versus bag-alone-open, to better track stats.
self.item_ids = []
self.items_in_bag_ids = []
self.item_sizes = []
# Add square target zone.
self.add_zone(env)
# Pose of the bag, sample mid-air to let it drop naturally.
bpos, _ = self.random_pose(env, self._bag_size)
self.base_pos = [bpos[0], bpos[1], self._drop_height]
self.base_orn = self._sample_bag_orientation()
# Add the bag, load info about top ring, and make a cable.
self.bag_id = self.add_bag(env, self.base_pos, self.base_orn)
self.understand_bag_top_ring(env, self.base_pos)
self.add_cable_ring(env)
# Add randomly-shaped boxes.
for _ in range(self._nb_items):
box_id, box_size = self.add_random_box(env, self._max_total_dims)
self.item_ids.append(box_id)
self.item_sizes.append(box_size)
# Env must begin before we can apply forces to perturb the bag.
env.start()
self._apply_small_force(num_iters=self.num_force_iters)
time.sleep(self._settle_secs)
env.pause()
# TODO(daniel) clean up method?
def determine_task_stage(self,
colormap=None,
heightmap=None,
object_mask=None,
visible_beads=None): # pylint: disable=g-doc-args
"""Get the task stage in a consistent manner among different policies.
Similar (but not quite the same) as in bag-items-easy.
Returns:
Tuple, first item is False if the task is almost certainly going
to fail, and the second provides valid placing pixels (locations)
if it's relevant to the task stage.
"""
if self.task_stage == 2 and (len(self.items_in_bag_ids) == len(
self.item_ids)):
self.task_stage = 3
return (True, None)
elif self.task_stage == 3:
return (True, None)
# Hand-tuned, if too small the agent won't open the bag enough ...
buf = 0.025
# But we can decrease it if we're on task stage 2 and have to put in more
# items.
if self.task_stage == 2:
buf = 0.015
# Check object_mask for all IDs that correspond to the cable ring.
cable_ids = np.array(self.cable_bead_ids)
bead_mask = np.isin(object_mask, test_elements=cable_ids)
# Threshold image to get 0s and 255s (255s=bead pixels) and find its
# contours.
bead_mask = np.uint8(bead_mask * 255)
contours, _ = cv2.findContours(bead_mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# If only a few beads are visible (or no contours detected) exit early.
frac_visible = len(visible_beads) / len(self.cable_bead_ids)
if not contours or frac_visible <= BEAD_THRESH:
return (False, None)
# Combine contours via concatenation (shape=(N,1,2)) and get the convex
# hull.
allc = np.concatenate(list(contours))
contours_list = [allc]
hull_list = [cv2.convexHull(c) for c in contours_list]
# Make an RGB image, then draw the filled-in area of all items in
# `hull_list`.
hull = np.zeros((bead_mask.shape[0], bead_mask.shape[1], 3), dtype=np.uint8)
cv2.drawContours(hull, hull_list, -1, (255, 255, 255), thickness=-1)
hull = cv2.cvtColor(hull, cv2.COLOR_BGR2GRAY)
# Following task.random_pose, use object_size to find placing points.
# Assumes object sizes are same. We add a buffer since convex hulls inflate
# area.
object_size = self.item_sizes[0]
object_size = (object_size[0] + buf, object_size[1] + buf, object_size[2])
max_size = np.sqrt(object_size[0]**2 + object_size[1]**2)
erode_size = int(np.round(max_size / self.pixel_size))
if self.task_stage == 2 and self.items_in_bag_ids:
# For the hard bag-items version, get array of 0s = items in bag (hence,
# invalid placing points) and 1s = all other points (could be valid).
pixels_bag_items = np.ones((hull.shape[0], hull.shape[1]), dtype=np.uint8)
for item_id in self.items_in_bag_ids:
item_pix = np.uint8(item_id == object_mask)
pixels_bag_items = pixels_bag_items & item_pix # Logical AND
pixels_no_bag_items = np.uint8(1 - pixels_bag_items)
else:
# Make it all 1s so it's safe to apply logical AND with hull pixels.
pixels_no_bag_items = np.ones((hull.shape[0], hull.shape[1]),
dtype=np.uint8)
# Combine the hull and pixel conditions.
place_pixels_hull = np.uint8(hull == 255)
place_pixels = place_pixels_hull & pixels_no_bag_items
# Use cv2.erode to find valid place pixels.
kernel = np.ones((erode_size, erode_size), np.uint8)
place_pixels_eroded = cv2.erode(place_pixels, kernel)
# If we're in task stage 2 and there's nothing, let's revert back to
# original.
if self.task_stage == 2 and np.sum(place_pixels_eroded) == 0:
place_pixels_eroded = cv2.erode(place_pixels_hull, kernel)
# Keep this debugging code to make it easier to inspect.
if False: # pylint: disable=using-constant-test
heightmap = heightmap / np.max(heightmap) * 255
place_rgb = cv2.cvtColor(hull.copy(), cv2.COLOR_GRAY2BGR)
place_rgb[place_pixels_eroded > 0] = 127 # gray
print(f'max_size: {max_size:0.3f}, erode_size: {erode_size}')
print(f'number of pixels for placing: {np.sum(place_pixels)}')
print(
f'number of pixels for placing (after eroding): {np.sum(place_pixels_eroded)}'
)
nb = len([x for x in os.listdir('tmp/') if 'color' in x and '.png' in x])
cv2.imwrite(f'tmp/img_{nb}_colormap.png',
cv2.cvtColor(colormap, cv2.COLOR_RGB2BGR).astype(np.uint8))
cv2.imwrite(f'tmp/img_{nb}_heightmap.png', heightmap.astype(np.uint8))
cv2.imwrite(f'tmp/img_{nb}_bead_mask.png', bead_mask)
cv2.imwrite(f'tmp/img_{nb}_place_rgb.png',
cv2.cvtColor(place_rgb, cv2.COLOR_RGB2BGR))
cv2.imwrite(f'tmp/img_{nb}_place_pixels_eroded.png',
np.uint8(place_pixels_eroded * 255))
if self.task_stage == 2 and self.items_in_bag_ids:
pixels_no_bag_items *= 255
cv2.imwrite(f'tmp/img_{nb}_pixels_no_bag_items.png',
np.uint8(pixels_no_bag_items))
# If on stage 1, and there exists any possible placing point, go to stage 2.
if self.task_stage == 1:
if np.sum(place_pixels_eroded) > 0:
self.task_stage = 2
return (True, place_pixels_eroded)
| [
"pybullet.loadSoftBody",
"numpy.isin",
"os.remove",
"pybullet.getMeshData",
"numpy.sum",
"pybullet.createVisualShape",
"pybullet.applyExternalForce",
"numpy.ones",
"numpy.random.randint",
"numpy.random.normal",
"cv2.erode",
"os.path.join",
"pybullet.createCollisionShape",
"numpy.round",
... | [((3505, 3525), 'os.remove', 'os.remove', (['zone_urdf'], {}), '(zone_urdf)\n', (3514, 3525), False, 'import os\n'), ((3770, 3942), 'pybullet.loadURDF', 'p.loadURDF', ([], {'fileName': '"""assets/block/block_for_anchors.urdf"""', 'basePosition': 'pose[0]', 'baseOrientation': 'pose[1]', 'globalScaling': 'global_scaling', 'useMaximalCoordinates': '(True)'}), "(fileName='assets/block/block_for_anchors.urdf', basePosition=\n pose[0], baseOrientation=pose[1], globalScaling=global_scaling,\n useMaximalCoordinates=True)\n", (3780, 3942), True, 'import pybullet as p\n'), ((4975, 5027), 'numpy.random.uniform', 'np.random.uniform', (['min_val', '(max_total_dims - min_val)'], {}), '(min_val, max_total_dims - min_val)\n', (4992, 5027), True, 'import numpy as np\n'), ((5358, 5377), 'os.remove', 'os.remove', (['box_urdf'], {}), '(box_urdf)\n', (5367, 5377), False, 'import os\n'), ((6243, 6303), 'pybullet.createCollisionShape', 'p.createCollisionShape', (['p.GEOM_BOX'], {'halfExtents': '([radius] * 3)'}), '(p.GEOM_BOX, halfExtents=[radius] * 3)\n', (6265, 6303), True, 'import pybullet as p\n'), ((6322, 6377), 'pybullet.createVisualShape', 'p.createVisualShape', (['p.GEOM_SPHERE'], {'radius': '(radius * 1.5)'}), '(p.GEOM_SPHERE, radius=radius * 1.5)\n', (6341, 6377), True, 'import pybullet as p\n'), ((6462, 6488), 'pybullet.getMeshData', 'p.getMeshData', (['self.bag_id'], {}), '(self.bag_id)\n', (6475, 6488), True, 'import pybullet as p\n'), ((8658, 9042), 'pybullet.loadSoftBody', 'p.loadSoftBody', ([], {'fileName': 'self._f_bag', 'basePosition': 'base_pos', 'baseOrientation': 'base_orn', 'collisionMargin': 'self._collision_margin', 'scale': 'self._bag_scale', 'mass': 'self._mass', 'useNeoHookean': '(0)', 'useBendingSprings': '(1)', 'useMassSpring': '(1)', 'springElasticStiffness': '(40)', 'springDampingStiffness': '(0.1)', 'springDampingAllDirections': '(0)', 'useSelfCollision': '(1)', 'frictionCoeff': '(1.0)', 'useFaceContact': '(1)'}), '(fileName=self._f_bag, basePosition=base_pos, baseOrientation\n =base_orn, collisionMargin=self._collision_margin, scale=self.\n _bag_scale, mass=self._mass, useNeoHookean=0, useBendingSprings=1,\n useMassSpring=1, springElasticStiffness=40, springDampingStiffness=0.1,\n springDampingAllDirections=0, useSelfCollision=1, frictionCoeff=1.0,\n useFaceContact=1)\n', (8672, 9042), True, 'import pybullet as p\n'), ((9703, 9732), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['orn'], {}), '(orn)\n', (9727, 9732), True, 'import pybullet as p\n'), ((11304, 11344), 'os.path.join', 'os.path.join', (['"""ravens"""', 'self._top_ring_f'], {}), "('ravens', self._top_ring_f)\n", (11316, 11344), False, 'import os\n'), ((12785, 12848), 'ravens.utils.fit_circle', 'U.fit_circle', (['self._top_ring_posi', 'self._bag_scale'], {'debug': '(False)'}), '(self._top_ring_posi, self._bag_scale, debug=False)\n', (12797, 12848), True, 'from ravens import utils as U\n'), ((13267, 13306), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-fx)', 'high': '(fx + 1)'}), '(low=-fx, high=fx + 1)\n', (13284, 13306), True, 'import numpy as np\n'), ((13316, 13355), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-fy)', 'high': '(fy + 1)'}), '(low=-fy, high=fy + 1)\n', (13333, 13355), True, 'import numpy as np\n'), ((15648, 15674), 'pybullet.removeBody', 'p.removeBody', (['self.zone_id'], {}), '(self.zone_id)\n', (15660, 15674), True, 'import pybullet as p\n'), ((15680, 15709), 'time.sleep', 'time.sleep', (['self._settle_secs'], {}), '(self._settle_secs)\n', (15690, 15709), False, 'import time\n'), ((18914, 18943), 'time.sleep', 'time.sleep', (['self._settle_secs'], {}), '(self._settle_secs)\n', (18924, 18943), False, 'import time\n'), ((20288, 20317), 'numpy.array', 'np.array', (['self.cable_bead_ids'], {}), '(self.cable_bead_ids)\n', (20296, 20317), True, 'import numpy as np\n'), ((20334, 20379), 'numpy.isin', 'np.isin', (['object_mask'], {'test_elements': 'cable_ids'}), '(object_mask, test_elements=cable_ids)\n', (20341, 20379), True, 'import numpy as np\n'), ((20486, 20511), 'numpy.uint8', 'np.uint8', (['(bead_mask * 255)'], {}), '(bead_mask * 255)\n', (20494, 20511), True, 'import numpy as np\n'), ((20530, 20597), 'cv2.findContours', 'cv2.findContours', (['bead_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(bead_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (20546, 20597), False, 'import cv2\n'), ((21172, 21241), 'numpy.zeros', 'np.zeros', (['(bead_mask.shape[0], bead_mask.shape[1], 3)'], {'dtype': 'np.uint8'}), '((bead_mask.shape[0], bead_mask.shape[1], 3), dtype=np.uint8)\n', (21180, 21241), True, 'import numpy as np\n'), ((21246, 21314), 'cv2.drawContours', 'cv2.drawContours', (['hull', 'hull_list', '(-1)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(hull, hull_list, -1, (255, 255, 255), thickness=-1)\n', (21262, 21314), False, 'import cv2\n'), ((21326, 21364), 'cv2.cvtColor', 'cv2.cvtColor', (['hull', 'cv2.COLOR_BGR2GRAY'], {}), '(hull, cv2.COLOR_BGR2GRAY)\n', (21338, 21364), False, 'import cv2\n'), ((21663, 21713), 'numpy.sqrt', 'np.sqrt', (['(object_size[0] ** 2 + object_size[1] ** 2)'], {}), '(object_size[0] ** 2 + object_size[1] ** 2)\n', (21670, 21713), True, 'import numpy as np\n'), ((21867, 21888), 'numpy.uint8', 'np.uint8', (['(hull == 255)'], {}), '(hull == 255)\n', (21875, 21888), True, 'import numpy as np\n'), ((21902, 21945), 'numpy.ones', 'np.ones', (['(erode_size, erode_size)', 'np.uint8'], {}), '((erode_size, erode_size), np.uint8)\n', (21909, 21945), True, 'import numpy as np\n'), ((21972, 22003), 'cv2.erode', 'cv2.erode', (['place_pixels', 'kernel'], {}), '(place_pixels, kernel)\n', (21981, 22003), False, 'import cv2\n'), ((24799, 24828), 'time.sleep', 'time.sleep', (['self._settle_secs'], {}), '(self._settle_secs)\n', (24809, 24828), False, 'import time\n'), ((25984, 26013), 'numpy.array', 'np.array', (['self.cable_bead_ids'], {}), '(self.cable_bead_ids)\n', (25992, 26013), True, 'import numpy as np\n'), ((26030, 26075), 'numpy.isin', 'np.isin', (['object_mask'], {'test_elements': 'cable_ids'}), '(object_mask, test_elements=cable_ids)\n', (26037, 26075), True, 'import numpy as np\n'), ((26182, 26207), 'numpy.uint8', 'np.uint8', (['(bead_mask * 255)'], {}), '(bead_mask * 255)\n', (26190, 26207), True, 'import numpy as np\n'), ((26226, 26293), 'cv2.findContours', 'cv2.findContours', (['bead_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(bead_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (26242, 26293), False, 'import cv2\n'), ((26868, 26937), 'numpy.zeros', 'np.zeros', (['(bead_mask.shape[0], bead_mask.shape[1], 3)'], {'dtype': 'np.uint8'}), '((bead_mask.shape[0], bead_mask.shape[1], 3), dtype=np.uint8)\n', (26876, 26937), True, 'import numpy as np\n'), ((26942, 27010), 'cv2.drawContours', 'cv2.drawContours', (['hull', 'hull_list', '(-1)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(hull, hull_list, -1, (255, 255, 255), thickness=-1)\n', (26958, 27010), False, 'import cv2\n'), ((27022, 27060), 'cv2.cvtColor', 'cv2.cvtColor', (['hull', 'cv2.COLOR_BGR2GRAY'], {}), '(hull, cv2.COLOR_BGR2GRAY)\n', (27034, 27060), False, 'import cv2\n'), ((27359, 27409), 'numpy.sqrt', 'np.sqrt', (['(object_size[0] ** 2 + object_size[1] ** 2)'], {}), '(object_size[0] ** 2 + object_size[1] ** 2)\n', (27366, 27409), True, 'import numpy as np\n'), ((28257, 28278), 'numpy.uint8', 'np.uint8', (['(hull == 255)'], {}), '(hull == 255)\n', (28265, 28278), True, 'import numpy as np\n'), ((28400, 28443), 'numpy.ones', 'np.ones', (['(erode_size, erode_size)', 'np.uint8'], {}), '((erode_size, erode_size), np.uint8)\n', (28407, 28443), True, 'import numpy as np\n'), ((28470, 28501), 'cv2.erode', 'cv2.erode', (['place_pixels', 'kernel'], {}), '(place_pixels, kernel)\n', (28479, 28501), False, 'import cv2\n'), ((6645, 6674), 'numpy.float32', 'np.float32', (['verts_l[bag_vidx]'], {}), '(verts_l[bag_vidx])\n', (6655, 6674), True, 'import numpy as np\n'), ((6691, 6767), 'pybullet.createMultiBody', 'p.createMultiBody', (['(0.01)', 'part_shape', 'part_visual'], {'basePosition': 'bead_position'}), '(0.01, part_shape, part_visual, basePosition=bead_position)\n', (6708, 6767), True, 'import pybullet as p\n'), ((6785, 6834), 'pybullet.changeVisualShape', 'p.changeVisualShape', (['part_id', '(-1)'], {'rgbaColor': 'color'}), '(part_id, -1, rgbaColor=color)\n', (6804, 6834), True, 'import pybullet as p\n'), ((7959, 8075), 'pybullet.createSoftBodyAnchor', 'p.createSoftBodyAnchor', ([], {'softBodyBodyUniqueId': 'self.bag_id', 'nodeIndex': 'bag_vidx', 'bodyUniqueId': 'part_id', 'linkIndex': '(-1)'}), '(softBodyBodyUniqueId=self.bag_id, nodeIndex=bag_vidx,\n bodyUniqueId=part_id, linkIndex=-1)\n', (7981, 8075), True, 'import pybullet as p\n'), ((13393, 13502), 'pybullet.applyExternalForce', 'p.applyExternalForce', (['bead_id'], {'linkIndex': '(-1)', 'forceObj': '[fx, fy, fz]', 'posObj': '[0, 0, 0]', 'flags': 'p.LINK_FRAME'}), '(bead_id, linkIndex=-1, forceObj=[fx, fy, fz], posObj=[\n 0, 0, 0], flags=p.LINK_FRAME)\n', (13413, 13502), True, 'import pybullet as p\n'), ((21029, 21046), 'cv2.convexHull', 'cv2.convexHull', (['c'], {}), '(c)\n', (21043, 21046), False, 'import cv2\n'), ((21731, 21767), 'numpy.round', 'np.round', (['(max_size / self.pixel_size)'], {}), '(max_size / self.pixel_size)\n', (21739, 21767), True, 'import numpy as np\n'), ((26725, 26742), 'cv2.convexHull', 'cv2.convexHull', (['c'], {}), '(c)\n', (26739, 26742), False, 'import cv2\n'), ((27427, 27463), 'numpy.round', 'np.round', (['(max_size / self.pixel_size)'], {}), '(max_size / self.pixel_size)\n', (27435, 27463), True, 'import numpy as np\n'), ((27701, 27756), 'numpy.ones', 'np.ones', (['(hull.shape[0], hull.shape[1])'], {'dtype': 'np.uint8'}), '((hull.shape[0], hull.shape[1]), dtype=np.uint8)\n', (27708, 27756), True, 'import numpy as np\n'), ((27951, 27981), 'numpy.uint8', 'np.uint8', (['(1 - pixels_bag_items)'], {}), '(1 - pixels_bag_items)\n', (27959, 27981), True, 'import numpy as np\n'), ((28095, 28150), 'numpy.ones', 'np.ones', (['(hull.shape[0], hull.shape[1])'], {'dtype': 'np.uint8'}), '((hull.shape[0], hull.shape[1]), dtype=np.uint8)\n', (28102, 28150), True, 'import numpy as np\n'), ((28686, 28722), 'cv2.erode', 'cv2.erode', (['place_pixels_hull', 'kernel'], {}), '(place_pixels_hull, kernel)\n', (28695, 28722), False, 'import cv2\n'), ((29547, 29600), 'cv2.imwrite', 'cv2.imwrite', (['f"""tmp/img_{nb}_bead_mask.png"""', 'bead_mask'], {}), "(f'tmp/img_{nb}_bead_mask.png', bead_mask)\n", (29558, 29600), False, 'import cv2\n'), ((4052, 4073), 'numpy.float32', 'np.float32', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (4062, 4073), True, 'import numpy as np\n'), ((5447, 5468), 'numpy.float32', 'np.float32', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (5457, 5468), True, 'import numpy as np\n'), ((6936, 7180), 'pybullet.createConstraint', 'p.createConstraint', ([], {'parentBodyUniqueId': 'beads[-1]', 'parentLinkIndex': '(-1)', 'childBodyUniqueId': 'part_id', 'childLinkIndex': '(-1)', 'jointType': 'p.JOINT_POINT2POINT', 'jointAxis': '(0, 0, 0)', 'parentFramePosition': 'parent_frame', 'childFramePosition': '(0, 0, 0)'}), '(parentBodyUniqueId=beads[-1], parentLinkIndex=-1,\n childBodyUniqueId=part_id, childLinkIndex=-1, jointType=p.\n JOINT_POINT2POINT, jointAxis=(0, 0, 0), parentFramePosition=\n parent_frame, childFramePosition=(0, 0, 0))\n', (6954, 7180), True, 'import pybullet as p\n'), ((7272, 7319), 'pybullet.changeConstraint', 'p.changeConstraint', (['constraint_id'], {'maxForce': '(100)'}), '(constraint_id, maxForce=100)\n', (7290, 7319), True, 'import pybullet as p\n'), ((7498, 7741), 'pybullet.createConstraint', 'p.createConstraint', ([], {'parentBodyUniqueId': 'part_id', 'parentLinkIndex': '(-1)', 'childBodyUniqueId': 'beads[0]', 'childLinkIndex': '(-1)', 'jointType': 'p.JOINT_POINT2POINT', 'jointAxis': '(0, 0, 0)', 'parentFramePosition': 'parent_frame', 'childFramePosition': '(0, 0, 0)'}), '(parentBodyUniqueId=part_id, parentLinkIndex=-1,\n childBodyUniqueId=beads[0], childLinkIndex=-1, jointType=p.\n JOINT_POINT2POINT, jointAxis=(0, 0, 0), parentFramePosition=\n parent_frame, childFramePosition=(0, 0, 0))\n', (7516, 7741), True, 'import pybullet as p\n'), ((7833, 7880), 'pybullet.changeConstraint', 'p.changeConstraint', (['constraint_id'], {'maxForce': '(100)'}), '(constraint_id, maxForce=100)\n', (7851, 7880), True, 'import pybullet as p\n'), ((9218, 9239), 'numpy.float32', 'np.float32', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (9228, 9239), True, 'import numpy as np\n'), ((9492, 9536), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'self._scale'}), '(loc=0.0, scale=self._scale)\n', (9508, 9536), True, 'import numpy as np\n'), ((9566, 9610), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'self._scale'}), '(loc=0.0, scale=self._scale)\n', (9582, 9610), True, 'import numpy as np\n'), ((9640, 9684), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'self._scale'}), '(loc=0.0, scale=self._scale)\n', (9656, 9684), True, 'import numpy as np\n'), ((12611, 12626), 'os.remove', 'os.remove', (['urdf'], {}), '(urdf)\n', (12620, 12626), False, 'import os\n'), ((22124, 22151), 'numpy.sum', 'np.sum', (['place_pixels_eroded'], {}), '(place_pixels_eroded)\n', (22130, 22151), True, 'import numpy as np\n'), ((27820, 27852), 'numpy.uint8', 'np.uint8', (['(item_id == object_mask)'], {}), '(item_id == object_mask)\n', (27828, 27852), True, 'import numpy as np\n'), ((28624, 28651), 'numpy.sum', 'np.sum', (['place_pixels_eroded'], {}), '(place_pixels_eroded)\n', (28630, 28651), True, 'import numpy as np\n'), ((29668, 29710), 'cv2.cvtColor', 'cv2.cvtColor', (['place_rgb', 'cv2.COLOR_RGB2BGR'], {}), '(place_rgb, cv2.COLOR_RGB2BGR)\n', (29680, 29710), False, 'import cv2\n'), ((29789, 29824), 'numpy.uint8', 'np.uint8', (['(place_pixels_eroded * 255)'], {}), '(place_pixels_eroded * 255)\n', (29797, 29824), True, 'import numpy as np\n'), ((30150, 30177), 'numpy.sum', 'np.sum', (['place_pixels_eroded'], {}), '(place_pixels_eroded)\n', (30156, 30177), True, 'import numpy as np\n'), ((8365, 8386), 'numpy.float32', 'np.float32', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (8375, 8386), True, 'import numpy as np\n'), ((28868, 28885), 'numpy.max', 'np.max', (['heightmap'], {}), '(heightmap)\n', (28874, 28885), True, 'import numpy as np\n'), ((29999, 30028), 'numpy.uint8', 'np.uint8', (['pixels_no_bag_items'], {}), '(pixels_no_bag_items)\n', (30007, 30028), True, 'import numpy as np\n'), ((29124, 29144), 'numpy.sum', 'np.sum', (['place_pixels'], {}), '(place_pixels)\n', (29130, 29144), True, 'import numpy as np\n'), ((29220, 29247), 'numpy.sum', 'np.sum', (['place_pixels_eroded'], {}), '(place_pixels_eroded)\n', (29226, 29247), True, 'import numpy as np\n'), ((29285, 29303), 'os.listdir', 'os.listdir', (['"""tmp/"""'], {}), "('tmp/')\n", (29295, 29303), False, 'import os\n'), ((29404, 29445), 'cv2.cvtColor', 'cv2.cvtColor', (['colormap', 'cv2.COLOR_RGB2BGR'], {}), '(colormap, cv2.COLOR_RGB2BGR)\n', (29416, 29445), False, 'import cv2\n')] |
"""
Created on Wed Mar 27 16:48:26 2019
@author: bhargav
"""
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import cv2
import os
import sys
import numpy as np
class Classifier:
def __init__(self):
self.model=None
self.img=None
self.x=0
self.class_probabilities=0
self.pred_class=""
self.MODEL_LOADED=False
self.FAULTY_IMAGE=True
def init_model(self,weights_file='your-model-file.hdf5'):
"""The Model file is loaded."""
try:
self.model = load_model(weights_file)
print("Model loaded.")
self.MODEL_LOADED=True
return 0
except:
sys.exit("Unable to find weights file.")
return 1
def check_faulty_image(self,image):
"""Image is first verified at the given path and is checked
for faults."""
try:
fh=open(image,'r')
self.img = cv2.imread(image, 0)
if cv2.countNonZero(self.img) == 0:
self.FAULTY_IMAGE=True
else:
self.img=cv2.resize(self.img,(150,150))
rows,cols = self.img.shape
for i in range(rows):
for j in range(cols):
k = self.img[i,j]
sdev=np.std(self.img)
#print(sdev)
#vals=np.mean(self.img)
#vals=abs(vals-k)
#print(vals)
#if vals==0.0:
if sdev<15.0:
self.FAULTY_IMAGE=True
else:
self.FAULTY_IMAGE=False
except:
error="File: \'"+image+"\' not found at specified path."
sys.exit(error)
pass
def get_prediction(self,image,confidence=5e-01):
"""Prediction is made for given Image."""
if self.MODEL_LOADED==False:
sys.exit("Weights have not been loaded.")
return 1
else:
self.check_faulty_image(image)
if self.FAULTY_IMAGE==True:
#sys.exit("Faulty image found, cannot process.")
self.pred_class='others'
return self.pred_class.upper()
else:
self.img=load_img(image)
self.x=img_to_array(self.img)
self.x=self.x/255
self.x=cv2.resize(self.x,(150,150))
self.x = self.x.reshape((1,) + self.x.shape)
self.class_probabilities = self.model.predict_proba(self.x)
if self.class_probabilities[0][1]>confidence:
self.pred_class='others'
else:
if self.class_probabilities[0][0]>self.class_probabilities[0][2]:
self.pred_class='aadhar'
else:
self.pred_class='pan'
return self.pred_class.upper()
| [
"keras.models.load_model",
"cv2.countNonZero",
"numpy.std",
"cv2.imread",
"keras.preprocessing.image.load_img",
"keras.preprocessing.image.img_to_array",
"sys.exit",
"cv2.resize"
] | [((624, 648), 'keras.models.load_model', 'load_model', (['weights_file'], {}), '(weights_file)\n', (634, 648), False, 'from keras.models import load_model\n'), ((1041, 1061), 'cv2.imread', 'cv2.imread', (['image', '(0)'], {}), '(image, 0)\n', (1051, 1061), False, 'import cv2\n'), ((2023, 2064), 'sys.exit', 'sys.exit', (['"""Weights have not been loaded."""'], {}), "('Weights have not been loaded.')\n", (2031, 2064), False, 'import sys\n'), ((768, 808), 'sys.exit', 'sys.exit', (['"""Unable to find weights file."""'], {}), "('Unable to find weights file.')\n", (776, 808), False, 'import sys\n'), ((1077, 1103), 'cv2.countNonZero', 'cv2.countNonZero', (['self.img'], {}), '(self.img)\n', (1093, 1103), False, 'import cv2\n'), ((1192, 1224), 'cv2.resize', 'cv2.resize', (['self.img', '(150, 150)'], {}), '(self.img, (150, 150))\n', (1202, 1224), False, 'import cv2\n'), ((1409, 1425), 'numpy.std', 'np.std', (['self.img'], {}), '(self.img)\n', (1415, 1425), True, 'import numpy as np\n'), ((1825, 1840), 'sys.exit', 'sys.exit', (['error'], {}), '(error)\n', (1833, 1840), False, 'import sys\n'), ((2392, 2407), 'keras.preprocessing.image.load_img', 'load_img', (['image'], {}), '(image)\n', (2400, 2407), False, 'from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n'), ((2431, 2453), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['self.img'], {}), '(self.img)\n', (2443, 2453), False, 'from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n'), ((2511, 2541), 'cv2.resize', 'cv2.resize', (['self.x', '(150, 150)'], {}), '(self.x, (150, 150))\n', (2521, 2541), False, 'import cv2\n')] |
__author__ = "<NAME>"
"""
Contains fuzzy values and functions to calculate fuzzy variables
"""
import numpy as np
size = {"1U": 0.,
"1.5U": 0.167,
"2U": 0.33,
"3U": 0.5,
"4U": 0.667,
"5U": 0.834,
"6U": 1.}
mass_imp = {"Very Unimportant": 0.,
"Unimportant": 0.167,
"Less Than Average": 0.33,
"Average": 0.5,
"More Than Average": 0.667,
"Important": 0.834,
"Very Important": 1.}
size_imp = {"Very Unimportant": 0.,
"Unimportant": 0.167,
"Less Than Average": 0.33,
"Average": 0.5,
"More Than Average": 0.667,
"Important": 0.834,
"Very Important": 1.}
down_sp = {"Extremely Slow": 0.,
"Very Slow": 0.167,
"Slow": 0.33,
"Average": 0.5,
"Fast": 0.667,
"Very Fast": 0.834,
"Extremely Fast": 1.0}
up_sp = {"Extremely Slow": 0.,
"Very Slow": 0.1,
"Slow": 0.3,
"Average": 0.5,
"Fast": 0.7,
"Very Fast": 0.9,
"Extremely Fast": 1.0}
att_ctrl = {"Extremely Lenient": 0.,
"Very Lenient": 0.167,
"Lenient": 0.33,
"Average": 0.5,
"Precise": 0.667,
"Very Precise": 0.834,
"Extremely Precise": 1.}
alt_req = {"LEO": 0.,
"Sun-Sync": 0.333,
"Semi-Sync": 0.667,
"Geo-Sync": 1.}
remote = {"No": 0.,
"If Possible": 0.5,
"Yes": 1.}
rs_wave = {"Ions": 0.,
"Electrons": 0.167,
"Ultraviolet": 0.33,
"Visual": 0.5,
# "Visual + Near IR": 0.4, # Removed due to lack of need
# "Near Infrared": 0.5, # Removed due to lack of need
"Infrared": 0.667,
# "Far Infrared": 0.7, # Removed due to lack of need
"Thermal Infrared": 0.9,
# "Radar": 0.9, # Removed because unable to find OTS components to do it
"Radio": 1.}
rs_accuracy = {"No Detail": 0,
"Vague": 0.167,
"Not Detailed": 0.333,
"Average": 0.5,
"Detailed": 0.667,
"Very Detailed": 0.834,
"Extremely Detailed": 1.}
generic_vals = {"VL": 0., "L": 0.167, "ML": 0.333, "M": 0.5, "MH": 0.667, "H": 0.834, "VH": 1.}
def create_value_array(size_lang, size_imp_lang, mass_imp_lang, down_lang, up_lang, alt_lang, att_lang, remote_lang,
rs_wave_lang, rs_acc_lang):
"""
This function takes the natural language values and converts them into the appropriate fuzzy logic value
:param size_lang: Takes a value natural language input for the size dict.
:param size_imp_lang: Natural language input for the size importance dict.
:param mass_imp_lang: Natural language input for the mass importance dict.
:param down_lang: Natural language input for the down bandwidth dict.
:param up_lang: Natural language input for the uplink bandwidth dict.
:param alt_lang: Natural language input for the altitude requirement dict.
:param att_lang: Natural language input for the attitude control performance dict.
:param remote_lang: Natural language input for the remote sensing requirement dict.
:param rs_wave_lang: Natural language input for the remote sensing wavelength dict.
:param rs_acc_lang: Natural language input for the remote sensing accuracy dict.
:return: Single column 2D numpy array with numerical fuzzy logic values.
"""
size_val = size[size_lang]
size_imp_val = size_imp[size_imp_lang]
mass_imp_val = mass_imp[mass_imp_lang]
down_val = down_sp[down_lang]
up_val = up_sp[up_lang]
alt_val = alt_req[alt_lang]
att_val = att_ctrl[att_lang]
remote_val = remote[remote_lang]
rs_wave_val = rs_wave[rs_wave_lang]
rs_acc_val = rs_accuracy[rs_acc_lang]
return np.array([[size_val, size_imp_val, mass_imp_val, down_val, up_val, alt_val, att_val, remote_val,
rs_wave_val, rs_acc_val]]).T | [
"numpy.array"
] | [((3939, 4066), 'numpy.array', 'np.array', (['[[size_val, size_imp_val, mass_imp_val, down_val, up_val, alt_val, att_val,\n remote_val, rs_wave_val, rs_acc_val]]'], {}), '([[size_val, size_imp_val, mass_imp_val, down_val, up_val, alt_val,\n att_val, remote_val, rs_wave_val, rs_acc_val]])\n', (3947, 4066), True, 'import numpy as np\n')] |
import json
import numpy as np
import os
import cv2
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
If ``is_aligned`` is ``False``, then calculate the ious between each bbox
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (m, 4) in <x1, y1, x2, y2> format.
bboxes2 (Tensor): shape (n, 4) in <x1, y1, x2, y2> format.
If is_aligned is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or iof (intersection over
foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert mode in ['iou', 'iof']
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
if is_aligned:
lt = np.maximum(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
rb = np.minimum(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clip(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
else:
ious = overlap / area1
else:
lt = np.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
rb = np.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
overlap = wh[:, :, 0] * wh[:, :, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1[:, None] + area2 - overlap)
else:
ious = overlap / (area1[:, None])
return ious
def match_eval(video_list,
match_result,
video_ann_dir,
img_ann_dir,
img_dir):
with open(match_result, 'r') as f:
match_result = json.load(f)
TP = np.zeros(3)
FP = np.zeros(3)
FN = np.zeros(3)
for video in video_list:
video_name = video.split('.')[0]
pred_result = match_result.get(video_name, {})
with open(os.path.join(video_ann_dir, '{}.json'.format(video_name)), 'r') as f:
gt = json.load(f)
if pred_result == {}:
# no prediction for this video
frames = gt['frames']
gt_frame = 0
gt_matches = 0
for frame in frames:
match_count = 0
annotations = frame['annotations']
frame_index = frame['frame_index']
for ann in annotations:
if ann['instance_id'] != 0:
match_count += 1
if match_count >= gt_matches:
gt_matches = match_count
gt_frame = frame_index
if gt_matches > 0:
FN += np.full(3, 1)
else:
FN += np.full(3, 0)
else:
item_id = pred_result['item_id']
frame_index = pred_result['frame_index']
results = pred_result['result']
# check if item id is in the video gt annotations
flag_s1 = 0
for frame in gt['frames']:
for ann in frame['annotations']:
video_item_id = str(ann['instance_id'])[:6]
if video_item_id != '0':
video_item_id = '0' + video_item_id[1:]
if item_id == video_item_id:
flag_s1 = 1
break
if flag_s1:
TP += np.array([1, 0, 0])
else:
FP += np.array([1, 0, 0])
FN += np.array([1, 0, 0])
# check if item id is in the video frame
flag_s2 = 0
for frame in gt['frames']:
if frame['frame_index'] == frame_index:
for ann in frame['annotations']:
video_item_id = str(ann['instance_id'])[:6]
if video_item_id != '0':
video_item_id = '0' + video_item_id[1:]
if item_id == video_item_id:
flag_s2 = 1
break
if flag_s2:
TP += np.array([0, 1, 0])
else:
FP += np.array([0, 1, 0])
FN += np.array([0, 1, 0])
# check if bbox is matched
flag_s3 = 0
if not flag_s2:
flag_s3 = 0
else:
for result in results:
img_name = result['img_name']
item_box = result['item_box']
img_ann_path = os.path.join(img_ann_dir, item_id, img_name + '.json')
img_path = os.path.join(img_dir, item_id, img_name + '.jpg')
if os.path.exists(img_ann_path):
with open(img_ann_path, 'r') as f:
img_annotations = json.load(f)
else:
raise ValueError
if img_annotations['annotations'] == []:
# if there are no annotations, set image bbox to the whole image
img = cv2.imread(img_path)
h, w, c = img.shape
box = np.array([0, 0, w, h])
if bbox_overlaps(np.array([box]), np.array([item_box]), is_aligned=True) > 0.5:
flag_s3 = 1
for ann_ in img_annotations['annotations']:
box = ann_['box']
if bbox_overlaps(np.array([box]), np.array([item_box]), is_aligned=True) > 0.5:
flag_s3 = 1
if flag_s3:
TP += np.array([0, 0, 1])
else:
FP += np.array([0, 0, 1])
FN += np.array([0, 0, 1])
P = TP / (TP + FP)
R = TP / (FN + TP)
S = 2 * P * R / (P + R)
score_weigths = [0.2, 0.6, 0.2]
final_score = sum(S * score_weigths)
return S, final_score
if __name__ == '__main__':
video_list = os.listdir(r'/data/sdv2/taobao/data/val_demo/video')
match_result = r'/data/sdv2/taobao/mmdet_taobao/default_tools/test/result.json'
video_ann_dir = r'/data/sdv2/taobao/data/val_demo/video_annotation'
img_ann_dir = r'/data/sdv2/taobao/data/val_demo/image_annotation'
img = r'/data/sdv2/taobao/data/val_demo/image'
print(match_eval(video_list, match_result, video_ann_dir, img_ann_dir, img))
| [
"numpy.full",
"numpy.minimum",
"numpy.maximum",
"json.load",
"numpy.zeros",
"os.path.exists",
"cv2.imread",
"numpy.max",
"numpy.min",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((2496, 2507), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2504, 2507), True, 'import numpy as np\n'), ((2517, 2528), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2525, 2528), True, 'import numpy as np\n'), ((2538, 2549), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2546, 2549), True, 'import numpy as np\n'), ((6773, 6824), 'os.listdir', 'os.listdir', (['"""/data/sdv2/taobao/data/val_demo/video"""'], {}), "('/data/sdv2/taobao/data/val_demo/video')\n", (6783, 6824), False, 'import os\n'), ((1043, 1085), 'numpy.maximum', 'np.maximum', (['bboxes1[:, :2]', 'bboxes2[:, :2]'], {}), '(bboxes1[:, :2], bboxes2[:, :2])\n', (1053, 1085), True, 'import numpy as np\n'), ((1112, 1154), 'numpy.minimum', 'np.minimum', (['bboxes1[:, 2:]', 'bboxes2[:, 2:]'], {}), '(bboxes1[:, 2:], bboxes2[:, 2:])\n', (1122, 1154), True, 'import numpy as np\n'), ((1635, 1679), 'numpy.max', 'np.max', (['bboxes1[:, None, :2]', 'bboxes2[:, :2]'], {}), '(bboxes1[:, None, :2], bboxes2[:, :2])\n', (1641, 1679), True, 'import numpy as np\n'), ((1712, 1756), 'numpy.min', 'np.min', (['bboxes1[:, None, 2:]', 'bboxes2[:, 2:]'], {}), '(bboxes1[:, None, 2:], bboxes2[:, 2:])\n', (1718, 1756), True, 'import numpy as np\n'), ((2473, 2485), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2482, 2485), False, 'import json\n'), ((2780, 2792), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2789, 2792), False, 'import json\n'), ((3437, 3450), 'numpy.full', 'np.full', (['(3)', '(1)'], {}), '(3, 1)\n', (3444, 3450), True, 'import numpy as np\n'), ((3491, 3504), 'numpy.full', 'np.full', (['(3)', '(0)'], {}), '(3, 0)\n', (3498, 3504), True, 'import numpy as np\n'), ((4170, 4189), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4178, 4189), True, 'import numpy as np\n'), ((4230, 4249), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4238, 4249), True, 'import numpy as np\n'), ((4272, 4291), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4280, 4291), True, 'import numpy as np\n'), ((4876, 4895), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (4884, 4895), True, 'import numpy as np\n'), ((4936, 4955), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (4944, 4955), True, 'import numpy as np\n'), ((4978, 4997), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (4986, 4997), True, 'import numpy as np\n'), ((6425, 6444), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6433, 6444), True, 'import numpy as np\n'), ((6485, 6504), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6493, 6504), True, 'import numpy as np\n'), ((6527, 6546), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6535, 6546), True, 'import numpy as np\n'), ((5310, 5364), 'os.path.join', 'os.path.join', (['img_ann_dir', 'item_id', "(img_name + '.json')"], {}), "(img_ann_dir, item_id, img_name + '.json')\n", (5322, 5364), False, 'import os\n'), ((5396, 5445), 'os.path.join', 'os.path.join', (['img_dir', 'item_id', "(img_name + '.jpg')"], {}), "(img_dir, item_id, img_name + '.jpg')\n", (5408, 5445), False, 'import os\n'), ((5469, 5497), 'os.path.exists', 'os.path.exists', (['img_ann_path'], {}), '(img_ann_path)\n', (5483, 5497), False, 'import os\n'), ((5865, 5885), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (5875, 5885), False, 'import cv2\n'), ((5960, 5982), 'numpy.array', 'np.array', (['[0, 0, w, h]'], {}), '([0, 0, w, h])\n', (5968, 5982), True, 'import numpy as np\n'), ((5604, 5616), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5613, 5616), False, 'import json\n'), ((6024, 6039), 'numpy.array', 'np.array', (['[box]'], {}), '([box])\n', (6032, 6039), True, 'import numpy as np\n'), ((6041, 6061), 'numpy.array', 'np.array', (['[item_box]'], {}), '([item_box])\n', (6049, 6061), True, 'import numpy as np\n'), ((6275, 6290), 'numpy.array', 'np.array', (['[box]'], {}), '([box])\n', (6283, 6290), True, 'import numpy as np\n'), ((6292, 6312), 'numpy.array', 'np.array', (['[item_box]'], {}), '([item_box])\n', (6300, 6312), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Refer to the work of OpenAI and DeepMind.
Algorithm:
OpenAI's Proximal Policy Optimization (PPO). [https://arxiv.org/abs/1707.06347]
Emergence of Locomotion Behaviours in Rich Environments (Google Deepmind): [https://arxiv.org/abs/1707.02286]
Dependencies:
tensorflow
gym
gym_OptClang
Thanks to MorvanZhou's implementation: https://morvanzhou.github.io/tutorials
The basic structure is derived from him.
However, the internal structure is tuned for gym_OptClang.
"""
import tensorflow as tf
import numpy as np
import matplotlib
# do not use x-server
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import gym, gym_OptClang
import random, threading, queue, operator, os, sys, re
from operator import itemgetter
from random import shuffle
import random
from colorama import Fore, Style
from datetime import datetime
from sklearn.preprocessing import StandardScaler
import time
import io
from time import gmtime, strftime
import argparse
import pytz
import Helpers as hp
class PPO(object):
def __init__(self, env, ckptLocBase, ckptName, isTraining, EP_MAX, GAMMA, A_LR, C_LR, ClippingEpsilon, UpdateDepth, L1Neurons, L2Neurons, LR_DECAY=1, LR_DECAY_FREQ=1000,SharedStorage=None):
tf.reset_default_graph()
# if SharedStorage is None, it must be in inference mode without "update()"
self.SharedStorage = SharedStorage
self.EP_MAX = EP_MAX
self.GAMMA = GAMMA
self.A_LR = A_LR
self.C_LR = C_LR
self.LR_DECAY = LR_DECAY
self.LR_DECAY_FREQ = LR_DECAY_FREQ
self.ClippingEpsilon = ClippingEpsilon
self.UpdateDepth = UpdateDepth
self.L1Neurons = L1Neurons
self.L2Neurons = L2Neurons
self.S_DIM = len(env.observation_space.low)
self.A_DIM = env.action_space.n
self.A_SPACE = 1
self.sess = tf.Session(graph=tf.get_default_graph())
self.tfs = tf.placeholder(tf.float32, [None, self.S_DIM], 'state')
self.ckptLocBase = ckptLocBase
self.UpdateStepFile = self.ckptLocBase + '/UpdateStep'
self.ActorLrFile = self.ckptLocBase + '/ActorLrFile'
self.CriticLrFile = self.ckptLocBase + '/CrticLrFile'
hp.ColorPrint(Fore.LIGHTCYAN_EX, "Log dir={}".format(self.ckptLocBase))
self.ckptLoc = ckptLocBase + '/' + ckptName
self.UpdateStep = 0
if not os.path.exists(self.ckptLocBase):
os.makedirs(self.ckptLocBase)
if os.path.exists(self.UpdateStepFile):
with open(self.UpdateStepFile, 'r') as f:
self.UpdateStep = int(f.read())
hp.ColorPrint(Fore.GREEN, "Restored episode step={}".format(self.UpdateStep))
if os.path.exists(self.ActorLrFile):
with open(self.ActorLrFile, 'r') as f:
self.A_LR = float(f.read())
hp.ColorPrint(Fore.GREEN, "Restored A_LR={}".format(self.A_LR))
else:
with open(self.ActorLrFile, 'w') as f:
f.write(str(self.A_LR))
if os.path.exists(self.CriticLrFile):
with open(self.CriticLrFile, 'r') as f:
self.C_LR = float(f.read())
hp.ColorPrint(Fore.GREEN, "Restored C_LR={}".format(self.C_LR))
else:
with open(self.CriticLrFile, 'w') as f:
f.write(str(self.C_LR))
if isTraining == 'N':
self.isTraining = False
hp.ColorPrint(Fore.LIGHTCYAN_EX, "This is inference procedure")
else:
self.isTraining = True
hp.ColorPrint(Fore.LIGHTCYAN_EX, "This is training procedure with UpdateStep={}".format(self.UpdateStep))
# critic
with tf.variable_scope('Critic'):
with tf.variable_scope('Fully_Connected'):
l1 = self.add_layer(self.tfs, self.L1Neurons, activation_function=tf.nn.relu, norm=True)
if self.L2Neurons != 0:
l2 = self.add_layer(l1, self.L2Neurons, activation_function=tf.nn.relu, norm=True)
with tf.variable_scope('Value'):
if self.L2Neurons != 0:
self.v = tf.layers.dense(l2, 1)
else:
self.v = tf.layers.dense(l1, 1)
with tf.variable_scope('Loss'):
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.CriticLossSummary = tf.summary.scalar('CriticLoss', self.closs)
with tf.variable_scope('CriticTrain'):
self.ctrain_op = tf.train.AdamOptimizer(self.C_LR).minimize(self.closs)
# pi: act_probs
pi, pi_params = self._build_anet('Actor', trainable=True)
oldpi, oldpi_params = self._build_anet('oldActor', trainable=False)
# operation of choosing action
with tf.variable_scope('ActionsExp.'):
self.acts_expect = tf.squeeze(pi, axis=0)
with tf.variable_scope('Update'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
with tf.variable_scope('Actor/PPO-Loss'):
self.tfa = tf.placeholder(tf.int32, [None, 1], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# probabilities of actions which agent took with policy
# depth=pi.shape[0] <-- each column is viewed as a vector
# depth=pi.shape[1] <-- each row is viewed as a vector <-- we use this
act_probs = pi * tf.one_hot(indices=self.tfa, depth=pi.shape[1])
act_probs = tf.reduce_sum(act_probs, axis=1)
# probabilities of actions which old agent took with policy
act_probs_old = oldpi * tf.one_hot(indices=self.tfa, depth=oldpi.shape[1])
act_probs_old = tf.reduce_sum(act_probs_old, axis=1)
# add a small number to avoid NaN
#ratio = tf.divide(act_probs + 1e-10, act_probs_old + 1e-10)
ratio = tf.exp(tf.log(act_probs + 1e-10) - tf.log(act_probs_old + 1e-10))
surr = tf.multiply(ratio, self.tfadv)
clip = tf.clip_by_value(ratio, 1.-self.ClippingEpsilon, 1.+self.ClippingEpsilon)*self.tfadv
# clipped surrogate objective
self.aloss = -tf.reduce_mean(tf.minimum(surr, clip))
# visualizing
self.ppoRatioSummary = tf.summary.tensor_summary('ppoRatio', ratio)
self.ActorLossSummary = tf.summary.scalar('ActorLoss', self.aloss)
with tf.variable_scope('ActorTrain'):
self.atrain_op = tf.train.AdamOptimizer(self.A_LR).minimize(self.aloss)
with tf.variable_scope('Summary'):
self.OverallSpeedup = tf.placeholder(tf.float32, name='OverallSpeedup')
self.EpisodeReward = tf.placeholder(tf.float32, name='EpisodeReward')
self.one = tf.constant(1.0, dtype=tf.float32)
self.RecordSpeedup_op = tf.multiply(self.OverallSpeedup, self.one)
self.SpeedupSummary = tf.summary.scalar('OverallSpeedup', self.RecordSpeedup_op)
self.RecordEpiReward_op = tf.multiply(self.EpisodeReward, self.one)
self.EpiRewardSummary = tf.summary.scalar('EpisodeReward', self.RecordEpiReward_op)
self.writer = tf.summary.FileWriter(self.ckptLocBase, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
'''
If the ckpt exist, restore it.
'''
if tf.train.checkpoint_exists(self.ckptLoc):
#self.saver.restore(self.sess, self.ckptLoc)
self.saver.restore(self.sess, tf.train.latest_checkpoint(self.ckptLocBase))
hp.ColorPrint(Fore.LIGHTGREEN_EX, 'Restore the previous model.')
elif self.isTraining == False:
hp.ColorPrint(Fore.LIGHTRED_EX, "Missing trained model to inference, exit.")
sys.exit(1)
def save(self):
"""
Save model
"""
self.saver.save(self.sess, self.ckptLoc)
def update(self):
while not self.SharedStorage['Coordinator'].should_stop():
if self.SharedStorage['Counters']['ep'] < self.EP_MAX:
# blocking wait until get batch of data
self.SharedStorage['Events']['update'].wait()
# save the model
if self.UpdateStep % 50 == 0:
self.save()
hp.ColorPrint(Fore.LIGHTRED_EX, "Save for every 50 updates.")
else:
hp.ColorPrint(Fore.LIGHTBLUE_EX,
"This update does not need to be saved: {}".format(self.UpdateStep))
# learning rate decay
if self.UpdateStep % self.LR_DECAY_FREQ == (self.LR_DECAY_FREQ-1):
# decay
self.A_LR = self.A_LR * self.LR_DECAY
self.C_LR = self.C_LR * self.LR_DECAY
# save
with open(self.ActorLrFile, 'w') as f:
f.write(str(self.A_LR))
with open(self.CriticLrFile, 'w') as f:
f.write(str(self.C_LR))
hp.ColorPrint(Fore.LIGHTRED_EX,
"Decay LR: A_LR={}, C_LR={}".format(self.A_LR, self.C_LR))
# copy pi to old pi
self.sess.run(self.update_oldpi_op)
# collect data from all workers
data = [self.SharedStorage['DataQueue'].get() for _ in range(self.SharedStorage['DataQueue'].qsize())]
data = np.vstack(data)
s, a, r = data[:, :self.S_DIM], data[:, self.S_DIM: self.S_DIM + self.A_SPACE], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# update actor and critic in a update loop
for _ in range(self.UpdateDepth):
self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv})
self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r})
'''
write summary
'''
# actor and critic loss
result = self.sess.run(
tf.summary.merge([self.ActorLossSummary, self.CriticLossSummary,
self.ppoRatioSummary]),
feed_dict={self.tfs: s, self.tfa: a, self.tfadv: adv, self.tfdc_r: r})
self.writer.add_summary(result, self.UpdateStep)
self.UpdateStep += 1
# re-train will not overlap the summaries
with open(self.UpdateStepFile, 'w') as f:
f.write(str(self.UpdateStep))
# updating finished
self.SharedStorage['Events']['update'].clear()
self.SharedStorage['Locks']['counter'].acquire()
# reset counter
self.SharedStorage['Counters']['update_counter'] = 0
self.SharedStorage['Locks']['counter'].release()
# set collecting available
self.SharedStorage['Events']['collect'].set()
hp.ColorPrint(Fore.YELLOW, 'Updator stopped')
def add_layer(self, inputs, out_size, trainable=True,activation_function=None, norm=False):
in_size = inputs.get_shape().as_list()[1]
Weights = tf.Variable(tf.random_normal([in_size, out_size], mean=1.0, stddev=1.0), trainable=trainable)
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, trainable=trainable)
# fully connected product
Wx_plus_b = tf.matmul(inputs, Weights) + biases
# normalize fully connected product
if norm:
# Batch Normalize
Wx_plus_b = tf.contrib.layers.batch_norm(
Wx_plus_b, updates_collections=None, is_training=self.isTraining)
# activation
if activation_function is None:
outputs = Wx_plus_b
else:
with tf.variable_scope('ActivationFunction'):
outputs = activation_function(Wx_plus_b)
return outputs
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
with tf.variable_scope('Fully_Connected'):
l1 = self.add_layer(self.tfs, self.L1Neurons, trainable,activation_function=tf.nn.relu, norm=True)
if self.L2Neurons != 0:
l2 = self.add_layer(l1, self.L2Neurons, trainable,activation_function=tf.nn.relu, norm=True)
with tf.variable_scope('Action_Expectation'):
# softmax may lead to NaN
if self.L2Neurons != 0:
expectation = \
self.add_layer(l2, self.A_DIM, activation_function=tf.nn.softmax, norm=True)
else:
expectation = \
self.add_layer(l1, self.A_DIM, activation_function=tf.nn.softmax, norm=True)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return expectation, params
def choose_action(self, s, PassHistory):
"""
return a int from 0 to 33
Input "s" must be numpy array.
In the world of reinforcement learning, the action space is from 0 to 33.
However, in the world of modified-clang, the accepted passes are from 1 to 34.
Therefore, "gym-OptClang" already done this effort for us.
We don't have to be bothered by this.
However, if you use the model withou gym-OptClang, you have to convert by yourself.
e.g. Inference example in our examples.
"""
s = s[np.newaxis, :]
a_expect = self.sess.run(self.acts_expect, {self.tfs: s})
print(a_expect)
'''
choose the one that was not applied yet
'''
# split the probabilities into list of [index ,probablities]
aList = a_expect.tolist()
probList = []
idx = 0
for prob in aList:
probList.append([idx, prob])
idx += 1
# some probs may be the same.
# Try to avoid that every time choose the same action
if self.isTraining == True:
shuffle(probList)
# sort with probs in descending order
probList.sort(key=itemgetter(1), reverse=True)
# find the one that is not applied yet
idx = 0
while True:
'''
During training, we need some chance to get unexpected action to let
the agent face different conditions as much as possible.
'''
# Use different strategies for different situations
if self.isTraining == True:
prob = random.uniform(0, 1)
if prob < 0.8:
# the most possible action
PassIdx = probList[idx][0]
idx += 1
else:
# random action
PassIdx = np.random.choice(np.arange(self.A_DIM))
else:
PassIdx = probList[idx][0]
idx += 1
#print('PassIdx={} with {} prob'.format(PassIdx, actionProb[1]))
if PassIdx not in PassHistory:
PassHistory[PassIdx] = 'Used'
return PassIdx
# the code should never come to here
return 'Error'
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
def DrawToTf(self, speedup, overall_reward, step):
"""
This is not thread-safe
"""
try:
result = self.sess.run(
tf.summary.merge([self.SpeedupSummary, self.EpiRewardSummary]),
feed_dict={self.OverallSpeedup: speedup,
self.EpisodeReward: overall_reward})
self.writer.add_summary(result, step)
with open(self.ckptLocBase + '/EpiStepFile', 'w') as f:
f.write(str(step))
self.writer.flush()
except Exception as e:
ColorPrint(Fore.LIGHTRED_EX, "SpeedupSummary or EpiRewardSummary failed: {}".fomat(e))
| [
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"random.shuffle",
"tensorflow.train.AdamOptimizer",
"tensorflow.multiply",
"tensorflow.matmul",
"tensorflow.train.latest_checkpoint",
"numpy.arange",
"tensorflow.get_default_graph"... | [((580, 601), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (594, 601), False, 'import matplotlib\n'), ((1226, 1250), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1248, 1250), True, 'import tensorflow as tf\n'), ((1913, 1968), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.S_DIM]', '"""state"""'], {}), "(tf.float32, [None, self.S_DIM], 'state')\n", (1927, 1968), True, 'import tensorflow as tf\n'), ((2456, 2491), 'os.path.exists', 'os.path.exists', (['self.UpdateStepFile'], {}), '(self.UpdateStepFile)\n', (2470, 2491), False, 'import random, threading, queue, operator, os, sys, re\n'), ((2696, 2728), 'os.path.exists', 'os.path.exists', (['self.ActorLrFile'], {}), '(self.ActorLrFile)\n', (2710, 2728), False, 'import random, threading, queue, operator, os, sys, re\n'), ((3017, 3050), 'os.path.exists', 'os.path.exists', (['self.CriticLrFile'], {}), '(self.CriticLrFile)\n', (3031, 3050), False, 'import random, threading, queue, operator, os, sys, re\n'), ((7332, 7388), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.ckptLocBase', 'self.sess.graph'], {}), '(self.ckptLocBase, self.sess.graph)\n', (7353, 7388), True, 'import tensorflow as tf\n'), ((7467, 7483), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7481, 7483), True, 'import tensorflow as tf\n'), ((7558, 7598), 'tensorflow.train.checkpoint_exists', 'tf.train.checkpoint_exists', (['self.ckptLoc'], {}), '(self.ckptLoc)\n', (7584, 7598), True, 'import tensorflow as tf\n'), ((11255, 11300), 'Helpers.ColorPrint', 'hp.ColorPrint', (['Fore.YELLOW', '"""Updator stopped"""'], {}), "(Fore.YELLOW, 'Updator stopped')\n", (11268, 11300), True, 'import Helpers as hp\n'), ((13078, 13138), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)\n', (13095, 13138), True, 'import tensorflow as tf\n'), ((2369, 2401), 'os.path.exists', 'os.path.exists', (['self.ckptLocBase'], {}), '(self.ckptLocBase)\n', (2383, 2401), False, 'import random, threading, queue, operator, os, sys, re\n'), ((2415, 2444), 'os.makedirs', 'os.makedirs', (['self.ckptLocBase'], {}), '(self.ckptLocBase)\n', (2426, 2444), False, 'import random, threading, queue, operator, os, sys, re\n'), ((3409, 3472), 'Helpers.ColorPrint', 'hp.ColorPrint', (['Fore.LIGHTCYAN_EX', '"""This is inference procedure"""'], {}), "(Fore.LIGHTCYAN_EX, 'This is inference procedure')\n", (3422, 3472), True, 'import Helpers as hp\n'), ((3671, 3698), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Critic"""'], {}), "('Critic')\n", (3688, 3698), True, 'import tensorflow as tf\n'), ((4910, 4942), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ActionsExp."""'], {}), "('ActionsExp.')\n", (4927, 4942), True, 'import tensorflow as tf\n'), ((4975, 4997), 'tensorflow.squeeze', 'tf.squeeze', (['pi'], {'axis': '(0)'}), '(pi, axis=0)\n', (4985, 4997), True, 'import tensorflow as tf\n'), ((5011, 5038), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Update"""'], {}), "('Update')\n", (5028, 5038), True, 'import tensorflow as tf\n'), ((5150, 5185), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Actor/PPO-Loss"""'], {}), "('Actor/PPO-Loss')\n", (5167, 5185), True, 'import tensorflow as tf\n'), ((5210, 5255), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, 1]', '"""action"""'], {}), "(tf.int32, [None, 1], 'action')\n", (5224, 5255), True, 'import tensorflow as tf\n'), ((5281, 5331), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""advantage"""'], {}), "(tf.float32, [None, 1], 'advantage')\n", (5295, 5331), True, 'import tensorflow as tf\n'), ((5654, 5686), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['act_probs'], {'axis': '(1)'}), '(act_probs, axis=1)\n', (5667, 5686), True, 'import tensorflow as tf\n'), ((5874, 5910), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['act_probs_old'], {'axis': '(1)'}), '(act_probs_old, axis=1)\n', (5887, 5910), True, 'import tensorflow as tf\n'), ((6135, 6165), 'tensorflow.multiply', 'tf.multiply', (['ratio', 'self.tfadv'], {}), '(ratio, self.tfadv)\n', (6146, 6165), True, 'import tensorflow as tf\n'), ((6438, 6482), 'tensorflow.summary.tensor_summary', 'tf.summary.tensor_summary', (['"""ppoRatio"""', 'ratio'], {}), "('ppoRatio', ratio)\n", (6463, 6482), True, 'import tensorflow as tf\n'), ((6519, 6561), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ActorLoss"""', 'self.aloss'], {}), "('ActorLoss', self.aloss)\n", (6536, 6561), True, 'import tensorflow as tf\n'), ((6576, 6607), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ActorTrain"""'], {}), "('ActorTrain')\n", (6593, 6607), True, 'import tensorflow as tf\n'), ((6707, 6735), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Summary"""'], {}), "('Summary')\n", (6724, 6735), True, 'import tensorflow as tf\n'), ((6771, 6820), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""OverallSpeedup"""'}), "(tf.float32, name='OverallSpeedup')\n", (6785, 6820), True, 'import tensorflow as tf\n'), ((6854, 6902), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""EpisodeReward"""'}), "(tf.float32, name='EpisodeReward')\n", (6868, 6902), True, 'import tensorflow as tf\n'), ((6926, 6960), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'dtype': 'tf.float32'}), '(1.0, dtype=tf.float32)\n', (6937, 6960), True, 'import tensorflow as tf\n'), ((6997, 7039), 'tensorflow.multiply', 'tf.multiply', (['self.OverallSpeedup', 'self.one'], {}), '(self.OverallSpeedup, self.one)\n', (7008, 7039), True, 'import tensorflow as tf\n'), ((7074, 7132), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""OverallSpeedup"""', 'self.RecordSpeedup_op'], {}), "('OverallSpeedup', self.RecordSpeedup_op)\n", (7091, 7132), True, 'import tensorflow as tf\n'), ((7171, 7212), 'tensorflow.multiply', 'tf.multiply', (['self.EpisodeReward', 'self.one'], {}), '(self.EpisodeReward, self.one)\n', (7182, 7212), True, 'import tensorflow as tf\n'), ((7249, 7308), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""EpisodeReward"""', 'self.RecordEpiReward_op'], {}), "('EpisodeReward', self.RecordEpiReward_op)\n", (7266, 7308), True, 'import tensorflow as tf\n'), ((7411, 7444), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7442, 7444), True, 'import tensorflow as tf\n'), ((7757, 7821), 'Helpers.ColorPrint', 'hp.ColorPrint', (['Fore.LIGHTGREEN_EX', '"""Restore the previous model."""'], {}), "(Fore.LIGHTGREEN_EX, 'Restore the previous model.')\n", (7770, 7821), True, 'import Helpers as hp\n'), ((11478, 11537), 'tensorflow.random_normal', 'tf.random_normal', (['[in_size, out_size]'], {'mean': '(1.0)', 'stddev': '(1.0)'}), '([in_size, out_size], mean=1.0, stddev=1.0)\n', (11494, 11537), True, 'import tensorflow as tf\n'), ((11696, 11722), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'Weights'], {}), '(inputs, Weights)\n', (11705, 11722), True, 'import tensorflow as tf\n'), ((11848, 11946), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['Wx_plus_b'], {'updates_collections': 'None', 'is_training': 'self.isTraining'}), '(Wx_plus_b, updates_collections=None,\n is_training=self.isTraining)\n', (11876, 11946), True, 'import tensorflow as tf\n'), ((12269, 12292), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (12286, 12292), True, 'import tensorflow as tf\n'), ((14308, 14325), 'random.shuffle', 'shuffle', (['probList'], {}), '(probList)\n', (14315, 14325), False, 'from random import shuffle\n'), ((1870, 1892), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1890, 1892), True, 'import tensorflow as tf\n'), ((3717, 3753), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Fully_Connected"""'], {}), "('Fully_Connected')\n", (3734, 3753), True, 'import tensorflow as tf\n'), ((4020, 4046), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Value"""'], {}), "('Value')\n", (4037, 4046), True, 'import tensorflow as tf\n'), ((4231, 4256), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Loss"""'], {}), "('Loss')\n", (4248, 4256), True, 'import tensorflow as tf\n'), ((4288, 4341), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""discounted_r"""'], {}), "(tf.float32, [None, 1], 'discounted_r')\n", (4302, 4341), True, 'import tensorflow as tf\n'), ((4508, 4551), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""CriticLoss"""', 'self.closs'], {}), "('CriticLoss', self.closs)\n", (4525, 4551), True, 'import tensorflow as tf\n'), ((4569, 4601), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""CriticTrain"""'], {}), "('CriticTrain')\n", (4586, 4601), True, 'import tensorflow as tf\n'), ((5582, 5629), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'self.tfa', 'depth': 'pi.shape[1]'}), '(indices=self.tfa, depth=pi.shape[1])\n', (5592, 5629), True, 'import tensorflow as tf\n'), ((5795, 5845), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'self.tfa', 'depth': 'oldpi.shape[1]'}), '(indices=self.tfa, depth=oldpi.shape[1])\n', (5805, 5845), True, 'import tensorflow as tf\n'), ((6185, 6264), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(1.0 - self.ClippingEpsilon)', '(1.0 + self.ClippingEpsilon)'], {}), '(ratio, 1.0 - self.ClippingEpsilon, 1.0 + self.ClippingEpsilon)\n', (6201, 6264), True, 'import tensorflow as tf\n'), ((7699, 7743), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.ckptLocBase'], {}), '(self.ckptLocBase)\n', (7725, 7743), True, 'import tensorflow as tf\n'), ((7873, 7949), 'Helpers.ColorPrint', 'hp.ColorPrint', (['Fore.LIGHTRED_EX', '"""Missing trained model to inference, exit."""'], {}), "(Fore.LIGHTRED_EX, 'Missing trained model to inference, exit.')\n", (7886, 7949), True, 'import Helpers as hp\n'), ((7962, 7973), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7970, 7973), False, 'import random, threading, queue, operator, os, sys, re\n'), ((9651, 9666), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (9660, 9666), True, 'import numpy as np\n'), ((11589, 11612), 'tensorflow.zeros', 'tf.zeros', (['[1, out_size]'], {}), '([1, out_size])\n', (11597, 11612), True, 'import tensorflow as tf\n'), ((12089, 12128), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ActivationFunction"""'], {}), "('ActivationFunction')\n", (12106, 12128), True, 'import tensorflow as tf\n'), ((12311, 12347), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Fully_Connected"""'], {}), "('Fully_Connected')\n", (12328, 12347), True, 'import tensorflow as tf\n'), ((12634, 12673), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Action_Expectation"""'], {}), "('Action_Expectation')\n", (12651, 12673), True, 'import tensorflow as tf\n'), ((14398, 14411), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (14408, 14411), False, 'from operator import itemgetter\n'), ((14819, 14839), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (14833, 14839), False, 'import random\n'), ((15785, 15847), 'tensorflow.summary.merge', 'tf.summary.merge', (['[self.SpeedupSummary, self.EpiRewardSummary]'], {}), '([self.SpeedupSummary, self.EpiRewardSummary])\n', (15801, 15847), True, 'import tensorflow as tf\n'), ((4117, 4139), 'tensorflow.layers.dense', 'tf.layers.dense', (['l2', '(1)'], {}), '(l2, 1)\n', (4132, 4139), True, 'import tensorflow as tf\n'), ((4191, 4213), 'tensorflow.layers.dense', 'tf.layers.dense', (['l1', '(1)'], {}), '(l1, 1)\n', (4206, 4213), True, 'import tensorflow as tf\n'), ((4440, 4465), 'tensorflow.square', 'tf.square', (['self.advantage'], {}), '(self.advantage)\n', (4449, 4465), True, 'import tensorflow as tf\n'), ((6057, 6082), 'tensorflow.log', 'tf.log', (['(act_probs + 1e-10)'], {}), '(act_probs + 1e-10)\n', (6063, 6082), True, 'import tensorflow as tf\n'), ((6085, 6114), 'tensorflow.log', 'tf.log', (['(act_probs_old + 1e-10)'], {}), '(act_probs_old + 1e-10)\n', (6091, 6114), True, 'import tensorflow as tf\n'), ((6353, 6375), 'tensorflow.minimum', 'tf.minimum', (['surr', 'clip'], {}), '(surr, clip)\n', (6363, 6375), True, 'import tensorflow as tf\n'), ((6638, 6671), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.A_LR'], {}), '(self.A_LR)\n', (6660, 6671), True, 'import tensorflow as tf\n'), ((8493, 8554), 'Helpers.ColorPrint', 'hp.ColorPrint', (['Fore.LIGHTRED_EX', '"""Save for every 50 updates."""'], {}), "(Fore.LIGHTRED_EX, 'Save for every 50 updates.')\n", (8506, 8554), True, 'import Helpers as hp\n'), ((10322, 10414), 'tensorflow.summary.merge', 'tf.summary.merge', (['[self.ActorLossSummary, self.CriticLossSummary, self.ppoRatioSummary]'], {}), '([self.ActorLossSummary, self.CriticLossSummary, self.\n ppoRatioSummary])\n', (10338, 10414), True, 'import tensorflow as tf\n'), ((4636, 4669), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.C_LR'], {}), '(self.C_LR)\n', (4658, 4669), True, 'import tensorflow as tf\n'), ((15099, 15120), 'numpy.arange', 'np.arange', (['self.A_DIM'], {}), '(self.A_DIM)\n', (15108, 15120), True, 'import numpy as np\n')] |
from tkinter import *
from tkinter import messagebox
import numpy as np
import pandas as pd
l1=['itching','skin_rash','nodal_skin_eruptions','continuous_sneezing','shivering','chills','joint_pain',
'stomach_pain','acidity','ulcers_on_tongue','muscle_wasting','vomiting','burning_micturition','spotting_ urination','fatigue',
'weight_gain','anxiety','cold_hands_and_feets','mood_swings','weight_loss','restlessness','lethargy','patches_in_throat',
'irregular_sugar_level','cough','high_fever','sunken_eyes','breathlessness','sweating','dehydration','indigestion',
'headache','yellowish_skin','dark_urine','nausea','loss_of_appetite','pain_behind_the_eyes','back_pain','constipation',
'abdominal_pain','diarrhoea','mild_fever','yellow_urine','yellowing_of_eyes','acute_liver_failure','fluid_overload',
'swelling_of_stomach','swelled_lymph_nodes','malaise','blurred_and_distorted_vision','phlegm','throat_irritation',
'redness_of_eyes','sinus_pressure','runny_nose','congestion','chest_pain','weakness_in_limbs','fast_heart_rate',
'pain_during_bowel_movements','pain_in_anal_region','bloody_stool','irritation_in_anus','neck_pain','dizziness','cramps',
'bruising','obesity','swollen_legs','swollen_blood_vessels','puffy_face_and_eyes','enlarged_thyroid','brittle_nails',
'swollen_extremeties','excessive_hunger','extra_marital_contacts','drying_and_tingling_lips','slurred_speech','knee_pain','hip_joint_pain',
'muscle_weakness','stiff_neck','swelling_joints','movement_stiffness','spinning_movements','loss_of_balance','unsteadiness','weakness_of_one_body_side',
'loss_of_smell','bladder_discomfort','foul_smell_of urine','continuous_feel_of_urine','passage_of_gases','internal_itching','toxic_look_(typhos)',
'depression','irritability','muscle_pain','altered_sensorium','red_spots_over_body','belly_pain','abnormal_menstruation','dischromic _patches',
'watering_from_eyes','increased_appetite','polyuria','family_history','mucoid_sputum','rusty_sputum','lack_of_concentration','visual_disturbances',
'receiving_blood_transfusion','receiving_unsterile_injections','coma','stomach_bleeding','distention_of_abdomen','history_of_alcohol_consumption',
'fluid_overload','blood_in_sputum','prominent_veins_on_calf','palpitations','painful_walking','pus_filled_pimples','blackheads','scurring','skin_peeling',
'silver_like_dusting','small_dents_in_nails','inflammatory_nails','blister','red_sore_around_nose','yellow_crust_ooze']
disease=['Fungal infection','Allergy','GERD','Chronic cholestasis','Drug Reaction',
'Peptic ulcer diseae','AIDS','Diabetes','Gastroenteritis','Bronchial Asthma','Hypertension',
' Migraine','Cervical spondylosis',
'Paralysis (brain hemorrhage)','Jaundice','Malaria','Chicken pox','Dengue','Typhoid','hepatitis A',
'Hepatitis B','Hepatitis C','Hepatitis D','Hepatitis E','Alcoholic hepatitis','Tuberculosis',
'Common Cold','Pneumonia','Dimorphic hemmorhoids(piles)',
'Heartattack','Varicoseveins','Hypothyroidism','Hyperthyroidism','Hypoglycemia','Osteoarthristis',
'Arthritis','(vertigo) Paroymsal Positional Vertigo','Acne','Urinary tract infection','Psoriasis',
'Impetigo']
l2=[]
for x in range(0,len(l1)):
l2.append(0)
# TESTING DATA
tr=pd.read_csv("Testing.csv")
tr.replace({'prognosis':{'Fungal infection':0,'Allergy':1,'GERD':2,'Chronic cholestasis':3,'Drug Reaction':4,
'Peptic ulcer diseae':5,'AIDS':6,'Diabetes ':7,'Gastroenteritis':8,'Bronchial Asthma':9,'Hypertension ':10,
'Migraine':11,'Cervical spondylosis':12,
'Paralysis (brain hemorrhage)':13,'Jaundice':14,'Malaria':15,'Chicken pox':16,'Dengue':17,'Typhoid':18,'hepatitis A':19,
'Hepatitis B':20,'Hepatitis C':21,'Hepatitis D':22,'Hepatitis E':23,'Alcoholic hepatitis':24,'Tuberculosis':25,
'Common Cold':26,'Pneumonia':27,'Dimorphic hemmorhoids(piles)':28,'Heart attack':29,'Varicose veins':30,'Hypothyroidism':31,
'Hyperthyroidism':32,'Hypoglycemia':33,'Osteoarthristis':34,'Arthritis':35,
'(vertigo) Paroymsal Positional Vertigo':36,'Acne':37,'Urinary tract infection':38,'Psoriasis':39,
'Impetigo':40}},inplace=True)
X_test= tr[l1]
y_test = tr[["prognosis"]]
np.ravel(y_test)
# TRAINING DATA
df=pd.read_csv("Training.csv")
df.replace({'prognosis':{'Fungal infection':0,'Allergy':1,'GERD':2,'Chronic cholestasis':3,'Drug Reaction':4,
'Peptic ulcer diseae':5,'AIDS':6,'Diabetes ':7,'Gastroenteritis':8,'Bronchial Asthma':9,'Hypertension ':10,
'Migraine':11,'Cervical spondylosis':12,
'Paralysis (brain hemorrhage)':13,'Jaundice':14,'Malaria':15,'Chicken pox':16,'Dengue':17,'Typhoid':18,'hepatitis A':19,
'Hepatitis B':20,'Hepatitis C':21,'Hepatitis D':22,'Hepatitis E':23,'Alcoholic hepatitis':24,'Tuberculosis':25,
'Common Cold':26,'Pneumonia':27,'Dimorphic hemmorhoids(piles)':28,'Heart attack':29,'Varicose veins':30,'Hypothyroidism':31,
'Hyperthyroidism':32,'Hypoglycemia':33,'Osteoarthristis':34,'Arthritis':35,
'(vertigo) Paroymsal Positional Vertigo':36,'Acne':37,'Urinary tract infection':38,'Psoriasis':39,
'Impetigo':40}},inplace=True)
X= df[l1]
y = df[["prognosis"]]
np.ravel(y)
def message():
if (Symptom1.get() == "None" and Symptom2.get() == "None" and Symptom3.get() == "None" and Symptom4.get() == "None" and Symptom5.get() == "None"):
messagebox.showinfo("OPPS!!", "ENTER SYMPTOMS PLEASE")
else :
NaiveBayes()
def NaiveBayes():
from sklearn.naive_bayes import MultinomialNB
gnb = MultinomialNB()
gnb=gnb.fit(X,np.ravel(y))
from sklearn.metrics import accuracy_score
y_pred = gnb.predict(X_test)
print(accuracy_score(y_test, y_pred))
print(accuracy_score(y_test, y_pred, normalize=False))
psymptoms = [Symptom1.get(),Symptom2.get(),Symptom3.get(),Symptom4.get(),Symptom5.get()]
for k in range(0,len(l1)):
for z in psymptoms:
if(z==l1[k]):
l2[k]=1
inputtest = [l2]
predict = gnb.predict(inputtest)
predicted=predict[0]
h='no'
for a in range(0,len(disease)):
if(disease[predicted] == disease[a]):
h='yes'
break
if (h=='yes'):
t3.delete("1.0", END)
t3.insert(END, disease[a])
else:
t3.delete("1.0", END)
t3.insert(END, "No Disease")
root = Tk()
root.title(" Disease Prediction From Symptoms")
root.configure()
Symptom1 = StringVar()
Symptom1.set(None)
Symptom2 = StringVar()
Symptom2.set(None)
Symptom3 = StringVar()
Symptom3.set(None)
Symptom4 = StringVar()
Symptom4.set(None)
Symptom5 = StringVar()
Symptom5.set(None)
w2 = Label(root, justify=LEFT, text=" Disease Prediction From Symptoms ")
w2.config(font=("Elephant", 30))
w2.grid(row=1, column=0, columnspan=2, padx=100)
NameLb1 = Label(root, text="")
NameLb1.config(font=("Elephant", 20))
NameLb1.grid(row=5, column=1, pady=10, sticky=W)
S1Lb = Label(root, text="Symptom 1")
S1Lb.config(font=("Elephant", 15))
S1Lb.grid(row=7, column=1, pady=10 , sticky=W)
S2Lb = Label(root, text="Symptom 2")
S2Lb.config(font=("Elephant", 15))
S2Lb.grid(row=8, column=1, pady=10, sticky=W)
S3Lb = Label(root, text="Symptom 3")
S3Lb.config(font=("Elephant", 15))
S3Lb.grid(row=9, column=1, pady=10, sticky=W)
S4Lb = Label(root, text="Symptom 4")
S4Lb.config(font=("Elephant", 15))
S4Lb.grid(row=10, column=1, pady=10, sticky=W)
S5Lb = Label(root, text="Symptom 5")
S5Lb.config(font=("Elephant", 15))
S5Lb.grid(row=11, column=1, pady=10, sticky=W)
lr = Button(root, text="Predict",height=2, width=20, command=message)
lr.config(font=("Elephant", 15))
lr.grid(row=15, column=1,pady=20)
OPTIONS = sorted(l1)
S1En = OptionMenu(root, Symptom1,*OPTIONS)
S1En.grid(row=7, column=2)
S2En = OptionMenu(root, Symptom2,*OPTIONS)
S2En.grid(row=8, column=2)
S3En = OptionMenu(root, Symptom3,*OPTIONS)
S3En.grid(row=9, column=2)
S4En = OptionMenu(root, Symptom4,*OPTIONS)
S4En.grid(row=10, column=2)
S5En = OptionMenu(root, Symptom5,*OPTIONS)
S5En.grid(row=11, column=2)
NameLb = Label(root, text="")
NameLb.config(font=("Elephant", 20))
NameLb.grid(row=13, column=1, pady=10, sticky=W)
NameLb = Label(root, text="")
NameLb.config(font=("Elephant", 15))
NameLb.grid(row=18, column=1, pady=10, sticky=W)
t3 = Text(root, height=2, width=30)
t3.config(font=("Elephant", 20))
t3.grid(row=20, column=1 , padx=10)
root.mainloop()
| [
"sklearn.naive_bayes.MultinomialNB",
"numpy.ravel",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"tkinter.messagebox.showinfo"
] | [((3262, 3288), 'pandas.read_csv', 'pd.read_csv', (['"""Testing.csv"""'], {}), "('Testing.csv')\n", (3273, 3288), True, 'import pandas as pd\n'), ((4155, 4171), 'numpy.ravel', 'np.ravel', (['y_test'], {}), '(y_test)\n', (4163, 4171), True, 'import numpy as np\n'), ((4192, 4219), 'pandas.read_csv', 'pd.read_csv', (['"""Training.csv"""'], {}), "('Training.csv')\n", (4203, 4219), True, 'import pandas as pd\n'), ((5078, 5089), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (5086, 5089), True, 'import numpy as np\n'), ((5433, 5448), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (5446, 5448), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((5266, 5321), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""OPPS!!"""', '"""ENTER SYMPTOMS PLEASE"""'], {}), "('OPPS!!', 'ENTER SYMPTOMS PLEASE')\n", (5285, 5321), False, 'from tkinter import messagebox\n'), ((5467, 5478), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (5475, 5478), True, 'import numpy as np\n'), ((5570, 5600), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5584, 5600), False, 'from sklearn.metrics import accuracy_score\n'), ((5612, 5659), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {'normalize': '(False)'}), '(y_test, y_pred, normalize=False)\n', (5626, 5659), False, 'from sklearn.metrics import accuracy_score\n')] |
import numpy as np
def doubleQ(initial_Q1,initial_Q2,initial_state,transition,
num_episodes,gamma, alpha, epsilon=0.1):
#This function implements double Q-learning. It returns Q1, Q2 and their sum Q
Q1 = np.copy(initial_Q1)
Q2 = np.copy(initial_Q2)
num_states, num_actions = Q1.shape
Q = Q1 + Q2
for ep in range(num_episodes):
crnState = initial_state
cnt = 0
imdRewards = 0.0
while True:
cnt +=1
uniformScl = epsilon / num_actions
greedyScl = (1 - epsilon) + epsilon / num_actions
actionPrb = uniformScl * np.ones(num_actions,dtype= float)
actionPrb[np.argmax(Q[crnState,:])] = greedyScl
crnAction = np.random.choice(num_actions,p = actionPrb)
nxtState,imdReward, terminal = transition(crnState, crnAction)
if terminal:
break
if np.random.random() > 0.5: # representaion of 0.5 probability
Q1[crnState,crnAction] += alpha * (imdReward + gamma *(Q2[nxtState,np.argmax(Q1[nxtState,:])]) - Q1[crnState,crnAction] )
else:
Q2[crnState,crnAction] += alpha * (imdReward + gamma *(Q1[nxtState,np.argmax(Q2[nxtState,:])]) - Q2[crnState,crnAction] )
Q = Q1 + Q2
crnState = nxtState
return Q1, Q2, Q
| [
"numpy.copy",
"numpy.argmax",
"numpy.ones",
"numpy.random.random",
"numpy.random.choice"
] | [((229, 248), 'numpy.copy', 'np.copy', (['initial_Q1'], {}), '(initial_Q1)\n', (236, 248), True, 'import numpy as np\n'), ((258, 277), 'numpy.copy', 'np.copy', (['initial_Q2'], {}), '(initial_Q2)\n', (265, 277), True, 'import numpy as np\n'), ((751, 793), 'numpy.random.choice', 'np.random.choice', (['num_actions'], {'p': 'actionPrb'}), '(num_actions, p=actionPrb)\n', (767, 793), True, 'import numpy as np\n'), ((633, 666), 'numpy.ones', 'np.ones', (['num_actions'], {'dtype': 'float'}), '(num_actions, dtype=float)\n', (640, 666), True, 'import numpy as np\n'), ((689, 714), 'numpy.argmax', 'np.argmax', (['Q[crnState, :]'], {}), '(Q[crnState, :])\n', (698, 714), True, 'import numpy as np\n'), ((960, 978), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (976, 978), True, 'import numpy as np\n'), ((1105, 1131), 'numpy.argmax', 'np.argmax', (['Q1[nxtState, :]'], {}), '(Q1[nxtState, :])\n', (1114, 1131), True, 'import numpy as np\n'), ((1274, 1300), 'numpy.argmax', 'np.argmax', (['Q2[nxtState, :]'], {}), '(Q2[nxtState, :])\n', (1283, 1300), True, 'import numpy as np\n')] |
import numpy as np
import os
#from learning_to_adapt.utils.serializable import Serializable
#from gym.envs.mujoco.mujoco_env import MujocoEnv
from ad_envs.online_adaptation_suite.mj_env import MujocoEnv
# class MujocoEnv(gym.Env):
#
# def __init__(self, model_path, frame_skip):
# if model_path.startswith("/"):
# fullpath = model_path
# else:
# fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
# if not path.exists(fullpath):
# raise IOError("File %s does not exist" % fullpath)
# self.frame_skip = frame_skip
# self.model = mujoco_py.load_model_from_path(fullpath)
# self.sim = mujoco_py.MjSim(self.model)
# self.data = self.sim.data
# # MjSim = MjModel + MjData``
#
class HalfCheetahHFieldEnv(MujocoEnv):
def __init__(self, task='hfield', max_episode_steps=200, reset_every_episode=False, reward=True, *args, **kwargs):
self.cripple_mask = None
self.reset_every_episode = reset_every_episode
self.first = True
MujocoEnv.__init__(self,
os.path.join(os.path.abspath(os.path.dirname(__file__)),
"assets",
"half_cheetah_hfield.xml"))
task = None if task == 'None' else task
# rgba when material is omitted (ngeom x 4)
self._init_geom_rgba = self.model.geom_rgba.copy()
# geom_contype : geom contact type (ngeom x 1)
self._init_geom_contype = self.model.geom_contype.copy()
# geom-specific size parameters (ngeom x 3)
self._init_geom_size = self.model.geom_size.copy()
# local position offset rel. to body
self.init_geom_pos = self.model.geom_pos.copy()
# Opt : options for mj_setLengthRange
# timestep : simulation timestep; 0: use mjOption.timestep
## self.dt = self.model.opt.timestep
assert task in [None, 'hfield', 'hill', 'basin', 'steep', 'gentle']
self.task = task
self.x_walls = np.array([250, 260, 261, 270, 280, 285])
self.height_walls = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2])
self.height = 0.8
self.width = 15
self._max_episode_steps = max_episode_steps
# LEGACY_CODE
# action_noise
#self.action_noise = 0.0
#self._body_comvels = None
def get_current_obs(self):
return np.concatenate([
#self.model.data.qpos.flatten()[1:],
#self.model.data.qvel.flat,
self.data.qpos.flatten()[1:],
self.data.qvel.flat,
self.get_body_com("torso").flat,
])
def get_body_xmat(self, body_name):
idx = self.model.body_names.index(body_name)
#return self.model.data.xmat[idx].reshape((3, 3))
return self.data.xmat[idx].reshape((3, 3))
def get_body_com(self, body_name):
idx = self.model.body_names.index(body_name)
#return self.model.data.com_subtree[idx]
return self.data.subtree_com[idx]
def get_task(self):
return 1 ## dummy #self.task
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
done = False
info = dict(reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
task=self.get_task(),
done_mdp=done)
return next_obs, reward, done, info
def reward(self, obs, action, next_obs):
assert obs.ndim == 2
assert obs.shape == next_obs.shape
assert obs.shape[0] == action.shape[0]
ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action), axis=1)
forward_reward = (next_obs[:, -3] - obs[:, -3]) / self.dt
reward = forward_reward - ctrl_cost
return reward
def reset_mujoco(self, init_state=None):
super(HalfCheetahHFieldEnv, self).reset_mujoco(init_state=init_state)
if self.reset_every_episode and not self.first:
self.reset_task()
if self.first:
self.first = False
def reset_task(self, value=None):
if self.task == 'hfield':
height = np.random.uniform(0.2, 1)
width = 10
n_walls = 6
self.model.hfield_size[:] = np.array([50, 5, height, 0.1])
x_walls = np.random.choice(np.arange(255, 310, width), replace=False, size=n_walls)
x_walls.sort()
sign = np.random.choice([1, -1], size=n_walls)
sign[:2] = 1
height_walls = np.random.uniform(0.2, 0.6, n_walls) * sign
row = np.zeros((500,))
for i, x in enumerate(x_walls):
terrain = np.cumsum([height_walls[i]] * width)
row[x:x + width] += terrain
row[x + width:] = row[x + width - 1]
row = (row - np.min(row)) / (np.max(row) - np.min(row))
hfield = np.tile(row.reshape(-1, 1), (1, 528)).T.reshape(-1, 1)
self.model.hfield_data[:] = hfield.reshape(-1)
elif self.task == 'basin':
self.height_walls = np.array([-1, 1, 0., 0., 0., 0.])
self.height = 0.55
height = self.height
width = self.width
self.x_walls = np.array([255, 270, 285, 300, 315, 330]) - 5
self.model.hfield_size[:] = np.array([50, 5, height, 0.1])
row = np.zeros((500,))
for i, x in enumerate(self.x_walls):
terrain = np.cumsum([self.height_walls[i]] * width)
row[x:x + width] += terrain
row[x + width:] = row[x + width - 1]
row = (row - np.min(row)) / (np.max(row) - np.min(row))
hfield = np.tile(row.reshape(-1, 1), (1, 528)).T.reshape(-1, 1)
self.model.hfield_data[:] = hfield.reshape(-1)
elif self.task == 'hill':
self.height_walls = np.array([1, -1, 0, 0., 0, 0])
self.height = 0.6
height = self.height
width = self.width
self.x_walls = np.array([255, 270, 285, 300, 315, 330]) - 5
self.model.hfield_size[:] = np.array([50, 5, height, 0.1])
row = np.zeros((500,))
for i, x in enumerate(self.x_walls):
terrain = np.cumsum([self.height_walls[i]] * width)
row[x:x + width] += terrain
row[x + width:] = row[x + width - 1]
row = (row - np.min(row)) / (np.max(row) - np.min(row))
hfield = np.tile(row.reshape(-1, 1), (1, 528)).T.reshape(-1, 1)
self.model.hfield_data[:] = hfield.reshape(-1)
elif self.task == 'gentle':
self.height_walls = np.array([1, 1, 1, 1, 1, 1])
self.height = 1
height = self.height
width = self.width
self.x_walls = np.array([255, 270, 285, 300, 315, 330]) - 5
self.model.hfield_size[:] = np.array([50, 5, height, 0.1])
row = np.zeros((500,))
for i, x in enumerate(self.x_walls):
terrain = np.cumsum([self.height_walls[i]] * width)
row[x:x + width] += terrain
row[x + width:] = row[x + width - 1]
row = (row - np.min(row)) / (np.max(row) - np.min(row))
hfield = np.tile(row.reshape(-1, 1), (1, 528)).T.reshape(-1, 1)
self.model.hfield_data[:] = hfield.reshape(-1)
elif self.task == 'steep':
self.height_walls = np.array([1, 1, 1, 1, 1, 1])
self.height = 4
height = self.height
width = self.width
self.x_walls = np.array([255, 270, 285, 300, 315, 330]) - 5
self.model.hfield_size[:] = np.array([50, 5, height, 0.1])
row = np.zeros((500,))
for i, x in enumerate(self.x_walls):
terrain = np.cumsum([self.height_walls[i]] * width)
row[x:x + width] += terrain
row[x + width:] = row[x + width - 1]
row = (row - np.min(row)) / (np.max(row) - np.min(row))
hfield = np.tile(row.reshape(-1, 1), (1, 528)).T.reshape(-1, 1)
self.model.hfield_data[:] = hfield.reshape(-1)
elif self.task is None:
pass
else:
raise NotImplementedError
#self.model.forward()
self.sim.forward()
#def log_diagnostics(self, paths, prefix):
# progs = [
# path["observations"][-1][-3] - path["observations"][0][-3]
# for path in paths
# ]
# logger.logkv(prefix + 'AverageForwardProgress', np.mean(progs))
# logger.logkv(prefix + 'MaxForwardProgress', np.max(progs))
# logger.logkv(prefix + 'MinForwardProgress', np.min(progs))
# logger.logkv(prefix + 'StdForwardProgress', np.std(progs))
if __name__ == '__main__':
env = HalfCheetahHFieldEnv(task='hfield')
while True:
env.reset()
env.reset_task()
for _ in range(1000):
env.render()
env.step(env.action_space.sample())
#env.stop_viewer()
# @property
# def action_bounds(self):
# return self.action_space.bounds
#
# def inject_action_noise(self, action): # l2a mujoco_env
# # generate action noise
# noise = self.action_noise * np.random.normal(size=action.shape)
# # rescale the noise to make it proportional to the action bounds
# lb, ub = self.action_bounds
# noise = 0.5 * (ub - lb) * noise
# return action + noise
#
# def forward_dynamics(self, action): # l2a mujoco env
# ctrl = self.inject_action_noise(action)
# self.do_simulation(ctrl, self.frame_skip)
# self.model.forward() #TODO why this part is needed?
# # subtree_com : center of mass of each subtree (nbody x 3)
# # Therefore, new_com is torco's com
# new_com = self.model.data.com_subtree[0]
# self.dcom = new_com - self.current_com
# self.current_com = new_com
#
# def _compute_subtree(self): # class MjModel
# body_vels = np.zeros((self.model.nbody, 6))
# #
# mass = self.body_mass.flatten()
# for i in range(self.model.nbody):
# # body velocity
# mujoco_py.cymj._mj_objectVelocity()
#
#
#
# @property
# def body_comvels(self):
# if self._body_comvels is None:
# self._body_comvels = self._compute_subtree()
# return self._body_comvels
#
# def get_body_comvel(self, body_name): # l2a mujoco env
# idx = self.model.body_names.index(body_name)
# return self.model.data.body_comvels[idx]
#
# def step(self, action):
# self.forward_dynamics(action)
# next_obs = self.get_current_obs()
# ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action)) # 1/20 * sum of square
# # comVel // Compute cvel, cdof_dot
# # cvel // com-based velocity [3D rot; 3D tran] (nbody x 6)
# # cdof_dot // time-derivative of cdof (com-based motion axis of each dof)
# forward_reward = self.get_body_comvel("torso")[0] # x axis c
| [
"numpy.random.uniform",
"os.path.dirname",
"numpy.square",
"numpy.zeros",
"numpy.cumsum",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.arange",
"numpy.random.choice"
] | [((2085, 2125), 'numpy.array', 'np.array', (['[250, 260, 261, 270, 280, 285]'], {}), '([250, 260, 261, 270, 280, 285])\n', (2093, 2125), True, 'import numpy as np\n'), ((2154, 2194), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.2, 0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2, 0.2, 0.2, 0.2])\n', (2162, 2194), True, 'import numpy as np\n'), ((4376, 4401), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(1)'], {}), '(0.2, 1)\n', (4393, 4401), True, 'import numpy as np\n'), ((4489, 4519), 'numpy.array', 'np.array', (['[50, 5, height, 0.1]'], {}), '([50, 5, height, 0.1])\n', (4497, 4519), True, 'import numpy as np\n'), ((4662, 4701), 'numpy.random.choice', 'np.random.choice', (['[1, -1]'], {'size': 'n_walls'}), '([1, -1], size=n_walls)\n', (4678, 4701), True, 'import numpy as np\n'), ((4816, 4832), 'numpy.zeros', 'np.zeros', (['(500,)'], {}), '((500,))\n', (4824, 4832), True, 'import numpy as np\n'), ((3295, 3312), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (3304, 3312), True, 'import numpy as np\n'), ((3858, 3875), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (3867, 3875), True, 'import numpy as np\n'), ((4559, 4585), 'numpy.arange', 'np.arange', (['(255)', '(310)', 'width'], {}), '(255, 310, width)\n', (4568, 4585), True, 'import numpy as np\n'), ((4754, 4790), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(0.6)', 'n_walls'], {}), '(0.2, 0.6, n_walls)\n', (4771, 4790), True, 'import numpy as np\n'), ((4903, 4939), 'numpy.cumsum', 'np.cumsum', (['([height_walls[i]] * width)'], {}), '([height_walls[i]] * width)\n', (4912, 4939), True, 'import numpy as np\n'), ((5308, 5345), 'numpy.array', 'np.array', (['[-1, 1, 0.0, 0.0, 0.0, 0.0]'], {}), '([-1, 1, 0.0, 0.0, 0.0, 0.0])\n', (5316, 5345), True, 'import numpy as np\n'), ((5549, 5579), 'numpy.array', 'np.array', (['[50, 5, height, 0.1]'], {}), '([50, 5, height, 0.1])\n', (5557, 5579), True, 'import numpy as np\n'), ((5598, 5614), 'numpy.zeros', 'np.zeros', (['(500,)'], {}), '((500,))\n', (5606, 5614), True, 'import numpy as np\n'), ((1162, 1187), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1177, 1187), False, 'import os\n'), ((5062, 5073), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (5068, 5073), True, 'import numpy as np\n'), ((5078, 5089), 'numpy.max', 'np.max', (['row'], {}), '(row)\n', (5084, 5089), True, 'import numpy as np\n'), ((5092, 5103), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (5098, 5103), True, 'import numpy as np\n'), ((5464, 5504), 'numpy.array', 'np.array', (['[255, 270, 285, 300, 315, 330]'], {}), '([255, 270, 285, 300, 315, 330])\n', (5472, 5504), True, 'import numpy as np\n'), ((5690, 5731), 'numpy.cumsum', 'np.cumsum', (['([self.height_walls[i]] * width)'], {}), '([self.height_walls[i]] * width)\n', (5699, 5731), True, 'import numpy as np\n'), ((6099, 6130), 'numpy.array', 'np.array', (['[1, -1, 0, 0.0, 0, 0]'], {}), '([1, -1, 0, 0.0, 0, 0])\n', (6107, 6130), True, 'import numpy as np\n'), ((6336, 6366), 'numpy.array', 'np.array', (['[50, 5, height, 0.1]'], {}), '([50, 5, height, 0.1])\n', (6344, 6366), True, 'import numpy as np\n'), ((6385, 6401), 'numpy.zeros', 'np.zeros', (['(500,)'], {}), '((500,))\n', (6393, 6401), True, 'import numpy as np\n'), ((5854, 5865), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (5860, 5865), True, 'import numpy as np\n'), ((5870, 5881), 'numpy.max', 'np.max', (['row'], {}), '(row)\n', (5876, 5881), True, 'import numpy as np\n'), ((5884, 5895), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (5890, 5895), True, 'import numpy as np\n'), ((6251, 6291), 'numpy.array', 'np.array', (['[255, 270, 285, 300, 315, 330]'], {}), '([255, 270, 285, 300, 315, 330])\n', (6259, 6291), True, 'import numpy as np\n'), ((6477, 6518), 'numpy.cumsum', 'np.cumsum', (['([self.height_walls[i]] * width)'], {}), '([self.height_walls[i]] * width)\n', (6486, 6518), True, 'import numpy as np\n'), ((6888, 6916), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (6896, 6916), True, 'import numpy as np\n'), ((7121, 7151), 'numpy.array', 'np.array', (['[50, 5, height, 0.1]'], {}), '([50, 5, height, 0.1])\n', (7129, 7151), True, 'import numpy as np\n'), ((7170, 7186), 'numpy.zeros', 'np.zeros', (['(500,)'], {}), '((500,))\n', (7178, 7186), True, 'import numpy as np\n'), ((6641, 6652), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (6647, 6652), True, 'import numpy as np\n'), ((6657, 6668), 'numpy.max', 'np.max', (['row'], {}), '(row)\n', (6663, 6668), True, 'import numpy as np\n'), ((6671, 6682), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (6677, 6682), True, 'import numpy as np\n'), ((7036, 7076), 'numpy.array', 'np.array', (['[255, 270, 285, 300, 315, 330]'], {}), '([255, 270, 285, 300, 315, 330])\n', (7044, 7076), True, 'import numpy as np\n'), ((7262, 7303), 'numpy.cumsum', 'np.cumsum', (['([self.height_walls[i]] * width)'], {}), '([self.height_walls[i]] * width)\n', (7271, 7303), True, 'import numpy as np\n'), ((7672, 7700), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (7680, 7700), True, 'import numpy as np\n'), ((7905, 7935), 'numpy.array', 'np.array', (['[50, 5, height, 0.1]'], {}), '([50, 5, height, 0.1])\n', (7913, 7935), True, 'import numpy as np\n'), ((7954, 7970), 'numpy.zeros', 'np.zeros', (['(500,)'], {}), '((500,))\n', (7962, 7970), True, 'import numpy as np\n'), ((7426, 7437), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (7432, 7437), True, 'import numpy as np\n'), ((7442, 7453), 'numpy.max', 'np.max', (['row'], {}), '(row)\n', (7448, 7453), True, 'import numpy as np\n'), ((7456, 7467), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (7462, 7467), True, 'import numpy as np\n'), ((7820, 7860), 'numpy.array', 'np.array', (['[255, 270, 285, 300, 315, 330]'], {}), '([255, 270, 285, 300, 315, 330])\n', (7828, 7860), True, 'import numpy as np\n'), ((8046, 8087), 'numpy.cumsum', 'np.cumsum', (['([self.height_walls[i]] * width)'], {}), '([self.height_walls[i]] * width)\n', (8055, 8087), True, 'import numpy as np\n'), ((8210, 8221), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (8216, 8221), True, 'import numpy as np\n'), ((8226, 8237), 'numpy.max', 'np.max', (['row'], {}), '(row)\n', (8232, 8237), True, 'import numpy as np\n'), ((8240, 8251), 'numpy.min', 'np.min', (['row'], {}), '(row)\n', (8246, 8251), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# less required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train Retinaface_resnet50."""
from __future__ import print_function
import random
import math
import numpy as np
import mindspore.nn as nn
import mindspore.dataset as de
from mindspore import context
from mindspore.context import ParallelMode
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.config import cfg_res50
from src.network import RetinaFace, RetinaFaceWithLossCell, TrainingWrapper, resnet50
from src.loss import MultiBoxLoss
from src.dataset import create_dataset
def setup_seed(seed):
random.seed(seed)
np.random.seed(seed)
de.config.set_seed(seed)
def adjust_learning_rate(initial_lr, gamma, stepvalues, steps_per_epoch, total_epochs, warmup_epoch=5):
lr_each_step = []
for epoch in range(1, total_epochs+1):
for step in range(steps_per_epoch):
if epoch <= warmup_epoch:
lr = 1e-6 + (initial_lr - 1e-6) * ((epoch - 1) * steps_per_epoch + step) / \
(steps_per_epoch * warmup_epoch)
else:
if stepvalues[0] <= epoch <= stepvalues[1]:
lr = initial_lr * (gamma ** (1))
elif epoch > stepvalues[1]:
lr = initial_lr * (gamma ** (2))
else:
lr = initial_lr
lr_each_step.append(lr)
return lr_each_step
def train(cfg):
context.set_context(mode=context.GRAPH_MODE, device_target='GPU', save_graphs=False)
if cfg['ngpu'] > 1:
init("nccl")
context.set_auto_parallel_context(device_num=get_group_size(), parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
cfg['ckpt_path'] = cfg['ckpt_path'] + "ckpt_" + str(get_rank()) + "/"
else:
raise ValueError('cfg_num_gpu <= 1')
batch_size = cfg['batch_size']
max_epoch = cfg['epoch']
momentum = cfg['momentum']
weight_decay = cfg['weight_decay']
initial_lr = cfg['initial_lr']
gamma = cfg['gamma']
training_dataset = cfg['training_dataset']
num_classes = 2
negative_ratio = 7
stepvalues = (cfg['decay1'], cfg['decay2'])
ds_train = create_dataset(training_dataset, cfg, batch_size, multiprocessing=True, num_worker=cfg['num_workers'])
print('dataset size is : \n', ds_train.get_dataset_size())
steps_per_epoch = math.ceil(ds_train.get_dataset_size())
multibox_loss = MultiBoxLoss(num_classes, cfg['num_anchor'], negative_ratio, cfg['batch_size'])
backbone = resnet50(1001)
backbone.set_train(True)
if cfg['pretrain'] and cfg['resume_net'] is None:
pretrained_res50 = cfg['pretrain_path']
param_dict_res50 = load_checkpoint(pretrained_res50)
load_param_into_net(backbone, param_dict_res50)
print('Load resnet50 from [{}] done.'.format(pretrained_res50))
net = RetinaFace(phase='train', backbone=backbone)
net.set_train(True)
if cfg['resume_net'] is not None:
pretrain_model_path = cfg['resume_net']
param_dict_retinaface = load_checkpoint(pretrain_model_path)
load_param_into_net(net, param_dict_retinaface)
print('Resume Model from [{}] Done.'.format(cfg['resume_net']))
net = RetinaFaceWithLossCell(net, multibox_loss, cfg)
lr = adjust_learning_rate(initial_lr, gamma, stepvalues, steps_per_epoch, max_epoch,
warmup_epoch=cfg['warmup_epoch'])
if cfg['optim'] == 'momentum':
opt = nn.Momentum(net.trainable_params(), lr, momentum)
elif cfg['optim'] == 'sgd':
opt = nn.SGD(params=net.trainable_params(), learning_rate=lr, momentum=momentum,
weight_decay=weight_decay, loss_scale=1)
else:
raise ValueError('optim is not define.')
net = TrainingWrapper(net, opt)
model = Model(net)
config_ck = CheckpointConfig(save_checkpoint_steps=cfg['save_checkpoint_steps'],
keep_checkpoint_max=cfg['keep_checkpoint_max'])
ckpoint_cb = ModelCheckpoint(prefix="RetinaFace", directory=cfg['ckpt_path'], config=config_ck)
time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
callback_list = [LossMonitor(), time_cb, ckpoint_cb]
print("============== Starting Training ==============")
model.train(max_epoch, ds_train, callbacks=callback_list,
dataset_sink_mode=False)
if __name__ == '__main__':
setup_seed(1)
config = cfg_res50
print('train config:\n', config)
train(cfg=config)
| [
"numpy.random.seed",
"mindspore.train.callback.ModelCheckpoint",
"mindspore.train.serialization.load_checkpoint",
"mindspore.communication.management.get_group_size",
"src.dataset.create_dataset",
"mindspore.train.serialization.load_param_into_net",
"src.loss.MultiBoxLoss",
"mindspore.context.set_cont... | [((1455, 1472), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1466, 1472), False, 'import random\n'), ((1478, 1498), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1492, 1498), True, 'import numpy as np\n'), ((1504, 1528), 'mindspore.dataset.config.set_seed', 'de.config.set_seed', (['seed'], {}), '(seed)\n', (1522, 1528), True, 'import mindspore.dataset as de\n'), ((2317, 2405), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""', 'save_graphs': '(False)'}), "(mode=context.GRAPH_MODE, device_target='GPU',\n save_graphs=False)\n", (2336, 2405), False, 'from mindspore import context\n'), ((3127, 3233), 'src.dataset.create_dataset', 'create_dataset', (['training_dataset', 'cfg', 'batch_size'], {'multiprocessing': '(True)', 'num_worker': "cfg['num_workers']"}), "(training_dataset, cfg, batch_size, multiprocessing=True,\n num_worker=cfg['num_workers'])\n", (3141, 3233), False, 'from src.dataset import create_dataset\n'), ((3381, 3460), 'src.loss.MultiBoxLoss', 'MultiBoxLoss', (['num_classes', "cfg['num_anchor']", 'negative_ratio', "cfg['batch_size']"], {}), "(num_classes, cfg['num_anchor'], negative_ratio, cfg['batch_size'])\n", (3393, 3460), False, 'from src.loss import MultiBoxLoss\n'), ((3477, 3491), 'src.network.resnet50', 'resnet50', (['(1001)'], {}), '(1001)\n', (3485, 3491), False, 'from src.network import RetinaFace, RetinaFaceWithLossCell, TrainingWrapper, resnet50\n'), ((3833, 3877), 'src.network.RetinaFace', 'RetinaFace', ([], {'phase': '"""train"""', 'backbone': 'backbone'}), "(phase='train', backbone=backbone)\n", (3843, 3877), False, 'from src.network import RetinaFace, RetinaFaceWithLossCell, TrainingWrapper, resnet50\n'), ((4206, 4253), 'src.network.RetinaFaceWithLossCell', 'RetinaFaceWithLossCell', (['net', 'multibox_loss', 'cfg'], {}), '(net, multibox_loss, cfg)\n', (4228, 4253), False, 'from src.network import RetinaFace, RetinaFaceWithLossCell, TrainingWrapper, resnet50\n'), ((4774, 4799), 'src.network.TrainingWrapper', 'TrainingWrapper', (['net', 'opt'], {}), '(net, opt)\n', (4789, 4799), False, 'from src.network import RetinaFace, RetinaFaceWithLossCell, TrainingWrapper, resnet50\n'), ((4815, 4825), 'mindspore.train.Model', 'Model', (['net'], {}), '(net)\n', (4820, 4825), False, 'from mindspore.train import Model\n'), ((4845, 4965), 'mindspore.train.callback.CheckpointConfig', 'CheckpointConfig', ([], {'save_checkpoint_steps': "cfg['save_checkpoint_steps']", 'keep_checkpoint_max': "cfg['keep_checkpoint_max']"}), "(save_checkpoint_steps=cfg['save_checkpoint_steps'],\n keep_checkpoint_max=cfg['keep_checkpoint_max'])\n", (4861, 4965), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor\n'), ((5014, 5101), 'mindspore.train.callback.ModelCheckpoint', 'ModelCheckpoint', ([], {'prefix': '"""RetinaFace"""', 'directory': "cfg['ckpt_path']", 'config': 'config_ck'}), "(prefix='RetinaFace', directory=cfg['ckpt_path'], config=\n config_ck)\n", (5029, 5101), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor\n'), ((2436, 2448), 'mindspore.communication.management.init', 'init', (['"""nccl"""'], {}), "('nccl')\n", (2440, 2448), False, 'from mindspore.communication.management import init, get_rank, get_group_size\n'), ((3656, 3689), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['pretrained_res50'], {}), '(pretrained_res50)\n', (3671, 3689), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((3699, 3746), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['backbone', 'param_dict_res50'], {}), '(backbone, param_dict_res50)\n', (3718, 3746), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((4026, 4062), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['pretrain_model_path'], {}), '(pretrain_model_path)\n', (4041, 4062), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((4072, 4119), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['net', 'param_dict_retinaface'], {}), '(net, param_dict_retinaface)\n', (4091, 4119), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((5187, 5200), 'mindspore.train.callback.LossMonitor', 'LossMonitor', ([], {}), '()\n', (5198, 5200), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor\n'), ((2503, 2519), 'mindspore.communication.management.get_group_size', 'get_group_size', ([], {}), '()\n', (2517, 2519), False, 'from mindspore.communication.management import init, get_rank, get_group_size\n'), ((2688, 2698), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (2696, 2698), False, 'from mindspore.communication.management import init, get_rank, get_group_size\n')] |
from __future__ import print_function
"""
Test systems for perses automated design.
Examples
--------
Alanine dipeptide in various environments (vacuum, implicit, explicit):
>>> from perses.tests.testsystems import AlaninDipeptideSAMS
>>> testsystem = AlanineDipeptideTestSystem()
>>> system_generator = testsystem.system_generator['explicit']
>>> sams_sampler = testsystem.sams_sampler['explicit']
TODO
----
* Have all PersesTestSystem subclasses automatically subjected to a battery of tests.
* Add short descriptions to each class through a class property.
"""
# TODO: Use inexpensive charging methods for small molecules in tests
__author__ = '<NAME>'
################################################################################
# IMPORTS
################################################################################
from simtk import openmm, unit
from simtk.openmm import app
import os
import os.path
import numpy as np
from functools import partial
from openeye import oechem
from openmmtools import states
from openmmtools.mcmc import MCMCSampler, LangevinSplittingDynamicsMove
from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES
from perses.storage import NetCDFStorage, NetCDFStorageView
from perses.rjmc.topology_proposal import OESMILES_OPTIONS
from perses.rjmc.geometry import FFAllAngleGeometryEngine
import tempfile
import copy
from perses.dispersed.utils import minimize
from openmmtools.states import ThermodynamicState, SamplerState
from openmmforcefields.generators import SystemGenerator
from openforcefield.topology import Molecule
from perses.utils.openeye import smiles_to_oemol
#global variables
forcefield_files = ['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml']
small_molecule_forcefield = 'gaff-2.11'
# TODO: Use dummy system generator to work around SystemGenerator issues
#from perses.rjmc.topology_proposal import DummySystemGenerator
#SystemGenerator = DummySystemGenerator
################################################################################
# TEST SYSTEMS
################################################################################
running_on_github_actions = os.environ.get('GITHUB_ACTIONS', None) == 'true'
class PersesTestSystem(object):
"""
Create a consistent set of samplers useful for testing.
Properties
----------
environments : list of str
Available environments
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler
"""
def __init__(self, storage_filename=None, mode='w', ncmc_nsteps=5, mcmc_nsteps=100):
"""Create a testsystem.
Parameters
----------
storage_filename : str, optional, default=None
If specified, bind to this storage file.
mode : str, optional, default='w'
File open mode, 'w' for (over)write, 'a' for append.
"""
self.storage = None
if storage_filename is not None:
self.storage = NetCDFStorage(storage_filename, mode='w')
self.environments = list()
self.topologies = dict()
self.positions = dict()
self.system_generators = dict()
self.proposal_engines = dict()
self.thermodynamic_states = dict()
self.mcmc_samplers = dict()
self.exen_samplers = dict()
self.sams_samplers = dict()
self.designer = None
self.geometry_engine = FFAllAngleGeometryEngine(metadata={})
self._splitting = "V R O R V"
self._timestep = 1.0*unit.femtosecond
self._ncmc_nsteps = ncmc_nsteps
self._mcmc_nsteps = mcmc_nsteps
self._move = LangevinSplittingDynamicsMove(timestep=self._timestep, splitting=self._splitting, n_restart_attempts=10)
self._move.n_restart_attempts = 10
class AlanineDipeptideTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on alanine dipeptide in various solvents.
This is useful for testing a variety of components.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AlanineDipeptideTestSystem
>>> testsystem = AlanineDipeptideTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].create_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum']
"""
def __init__(self, constraints=app.HBonds, **kwargs):
super(AlanineDipeptideTestSystem, self).__init__(**kwargs)
#environments = ['explicit', 'implicit', 'vacuum']
environments = ['explicit', 'vacuum']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Use sterics in proposals
self.geometry_engine.use_sterics = True
# Write atom-by-atom geometry output.
self.geometry_engine.write_proposal_pdb = True
self.geometry_engine.pdb_filename_prefix = 'geometry'
# Create a system generator for our desired forcefields.
barostat = openmm.MonteCarloBarostat(pressure, temperature)
#forcefield_kwargs = {'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'nonbondedMethod': app.NoCutoff, 'constraints' : app.HBonds, 'hydrogenMass' : 4 * unit.amus}
#small_molecule_forcefield = 'gaff-2.11'
system_generators = dict()
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = {'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : constraints }, periodic_forcefield_kwargs={'nonbondedMethod' : app.CutoffPeriodic})
# NOTE implicit solvent not supported by this SystemGenerator
# system_generators['implicit'] = SystemGenerator(forcefields = forcefield_files,
# forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : constraints })
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = {'implicitSolvent' : None, 'constraints' : constraints }, nonperiodic_forcefield_kwargs={'nonbondedMethod' : app.NoCutoff})
# Create peptide in solvent.
from openmmtools.testsystems import AlanineDipeptideExplicit, AlanineDipeptideImplicit, AlanineDipeptideVacuum
from pkg_resources import resource_filename
pdb_filename = resource_filename('openmmtools', 'data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb')
from simtk.openmm.app import PDBFile
topologies = dict()
positions = dict()
pdbfile = PDBFile(pdb_filename)
topologies['vacuum'] = pdbfile.getTopology()
positions['vacuum'] = pdbfile.getPositions(asNumpy=True)
#topologies['implicit'] = pdbfile.getTopology()
#positions['implicit'] = pdbfile.getPositions(asNumpy=True)
# Create molecule in explicit solvent.
modeller = app.Modeller(topologies['vacuum'], positions['vacuum'])
modeller.addSolvent(system_generators['explicit'].forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies['explicit'] = modeller.getTopology()
positions['explicit'] = modeller.getPositions()
# Set up the proposal engines.
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = {
'ffxmls' : ['amber99sbildn.xml'], # take sidechain definitions from this ffxml file
'always_change' : True # don't propose self-transitions
}
proposal_engines = dict()
chain_id = ' '
allowed_mutations = [('2','VAL'),('2','LEU'),('2','ILE')]
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment],system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
thermodynamic_states['explicit'] = states.ThermodynamicState(system=systems['explicit'], temperature=temperature, pressure=pressure)
#thermodynamic_states['implicit'] = states.ThermodynamicState(system=systems['implicit'], temperature=temperature)
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment == 'explicit':
sampler_state = states.SamplerState(positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
mcmc_samplers[environment].timestep = 1.0 * unit.femtoseconds
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps': 0}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
#target_samplers = { sams_samplers['implicit'] : 1.0, sams_samplers['vacuum'] : -1.0 }
target_samplers = { sams_samplers['vacuum'] : 1.0, sams_samplers['vacuum'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class AlanineDipeptideValenceTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on alanine dipeptide in various solvents.
Only valence terms are included---no sterics.
Properties
----------
environments : list of str
Available environments: ['vacuum']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AlanineDipeptideValenceTestSystem
>>> testsystem = AlanineDipeptideValenceTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].create_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum']
"""
def __init__(self, **kwargs):
super(AlanineDipeptideValenceTestSystem, self).__init__(**kwargs)
environments = ['vacuum']
# Write atom-by-atom geometry output.
self.geometry_engine.write_proposal_pdb = False
#self.geometry_engine.pdb_filename_prefix = 'geometry2'
# Create a system generator for our desired forcefields.
system_generators = dict()
from pkg_resources import resource_filename
valence_xml_filename = resource_filename('perses', 'data/amber99sbildn-valence-only.xml')
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = { 'implicitSolvent' : None, 'constraints' : constraints }, nonperiodic_forcefield_kwargs={'nonbondedMethod' : app.NoCutoff})
# Create peptide in solvent.
from openmmtools.testsystems import AlanineDipeptideExplicit, AlanineDipeptideImplicit, AlanineDipeptideVacuum
from pkg_resources import resource_filename
pdb_filename = resource_filename('openmmtools', 'data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb')
from simtk.openmm.app import PDBFile
topologies = dict()
positions = dict()
pdbfile = PDBFile(pdb_filename)
topologies['vacuum'] = pdbfile.getTopology()
positions['vacuum'] = pdbfile.getPositions(asNumpy=True)
# Set up the proposal engines.
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = {
'ffxmls' : ['amber99sbildn.xml'], # take sidechain definitions from this ffxml file
'always_change' : True # don't propose self-transitions
}
proposal_engines = dict()
chain_id = ' '
allowed_mutations = [('2','PHE')]
proposal_metadata = {"always_change":True}
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment],system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations, always_change=True)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':50}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum'] : 1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
def load_via_pdbfixer(filename=None, pdbid=None):
"""
Load a PDB file via PDBFixer, keeping all heterogens and building in protons for any crystallographic waters.
"""
from pdbfixer import PDBFixer
fixer = PDBFixer(filename=filename, pdbid=pdbid)
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.addMissingHydrogens(7.0)
return [fixer.topology, fixer.positions]
class T4LysozymeMutationTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on T4 lysozyme in various solvents.
Wild Type is T4 L99A
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import T4LysozymeTestSystem
>>> testsystem = T4LysozymeTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].create_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['implicit']
"""
def __init__(self, **kwargs):
super(T4LysozymeMutationTestSystem, self).__init__(**kwargs)
# environments = ['explicit-complex', 'explicit-receptor', 'implicit-complex', 'implicit-receptor', 'vacuum-complex', 'vacuum-receptor']
environments = ['explicit-complex', 'explicit-receptor', 'vacuum-complex', 'vacuum-receptor']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Create a system generator for our desired forcefields.
from pkg_resources import resource_filename
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = {'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None}, periodic_forcefield_kwargs={'nonbondedMethod' : app.CutoffPeriodic})
# NOTE implicit solvent not supported by this SystemGenerator
# system_generators['implicit'] = SystemGenerator(forcefields = forcefield_files,
# forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2})
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = {'implicitSolvent' : None}, nonperiodic_forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff})
system_generators['explicit-complex'] = system_generators['explicit']
system_generators['explicit-receptor'] = system_generators['explicit']
#system_generators['implicit-complex'] = system_generators['implicit']
#system_generators['implicit-receptor'] = system_generators['implicit']
system_generators['vacuum-complex'] = system_generators['vacuum']
system_generators['vacuum-receptor'] = system_generators['vacuum']
# Create receptor in solvent.
from pkg_resources import resource_filename
pdb_filename = resource_filename('perses', 'data/181L.pdb')
import pdbfixer
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
[fixer_topology, fixer_positions] = load_via_pdbfixer(pdb_filename)
modeller = Modeller(fixer_topology, fixer_positions)
residues_to_delete = [ residue for residue in modeller.getTopology().residues() if residue.name in ['HED','CL','HOH'] ]
modeller.delete(residues_to_delete)
receptor_modeller = copy.deepcopy(modeller)
ligand_modeller = copy.deepcopy(modeller)
for chain in receptor_modeller.getTopology().chains():
pass
chains_to_delete = [chain]
receptor_modeller.delete(chains_to_delete)
topologies['receptor'] = receptor_modeller.getTopology()
positions['receptor'] = receptor_modeller.getPositions()
for chain in ligand_modeller.getTopology().chains():
break
chains_to_delete = [chain]
ligand_modeller.delete(chains_to_delete)
for residue in ligand_modeller.getTopology().residues():
if residue.name == 'BNZ':
break
from perses.utils.openeye import extractPositionsFromOEMol, giveOpenmmPositionsToOEMol
import perses.rjmc.geometry as geometry
from perses.rjmc.topology_proposal import TopologyProposal
# create OEMol version of benzene
mol = smiles_to_oemol('c1ccccc1')
new_residue = forcefield_generators.generateTopologyFromOEMol(mol)
for res in new_residue.residues():
res.name = 'BNZ'
bnz_new_sys = system_generators['vacuum'].create_system(new_residue)
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
temperature = 300.0 * unit.kelvin
kT = kB * temperature
beta = 1.0/kT
adding_hydrogen_proposal = TopologyProposal(new_topology=new_residue, new_system =bnz_new_sys, old_topology=ligand_modeller.topology, old_system =bnz_new_sys, logp_proposal = 0.0, new_to_old_atom_map = {0:0,1:1,2:2,3:3,4:4,5:5}, old_chemical_state_key='',new_chemical_state_key='')
geometry_engine = geometry.FFAllAngleGeometryEngine()
new_positions, logp = geometry_engine.propose(adding_hydrogen_proposal, ligand_modeller.positions, beta)
modeller = copy.deepcopy(receptor_modeller)
modeller.add(new_residue, new_positions)
topologies['complex'] = modeller.getTopology()
positions['complex'] = modeller.getPositions()
# Create all environments.
for environment in ['implicit', 'vacuum']:
for component in ['receptor', 'complex']:
topologies[environment + '-' + component] = topologies[component]
positions[environment + '-' + component] = positions[component]
# Set up in explicit solvent.
for component in ['receptor', 'complex']:
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators['explicit'].forcefield, model='tip3p', padding=9.0*unit.angstrom)
atoms = list(modeller.topology.atoms())
print('Solvated %s has %s atoms' % (component, len(atoms)))
topologies['explicit' + '-' + component] = modeller.getTopology()
positions['explicit' + '-' + component] = modeller.getPositions()
# Set up the proposal engines.
allowed_mutations = [
('99','GLY'),
('102','GLN'),
('102','HIS'),
('102','GLU'),
('102','LEU'),
('153','ALA'),
('108','VAL'),
('99','GLY'),
('108','VAL')]
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = { 'ffxmls' : ['amber99sbildn.xml'] }
proposal_engines = dict()
chain_id = 'A'
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment], system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems
systems = dict()
for environment in environments:
print(environment)
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
for component in ['receptor', 'complex']:
thermodynamic_states['explicit' + '-' + component] = states.ThermodynamicState(system=systems['explicit' + '-' + component], temperature=temperature, pressure=pressure)
#thermodynamic_states['implicit' + '-' + component] = ThermodynamicState(system=systems['implicit' + '-' + component], temperature=temperature)
thermodynamic_states['vacuum' + '-' + component] = states.ThermodynamicState(system=systems['vacuum' + '-' + component], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment[0:8] == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['explicit-complex'] : 1.0, sams_samplers['explicit-receptor'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class MybTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on Myb:peptide interaction in various solvents.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import MybTestSystem
>>> testsystem = MybTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum-peptide'].create_system(testsystem.topologies['vacuum-peptide'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['implicit-peptide']
"""
def __init__(self, **kwargs):
super(MybTestSystem, self).__init__(**kwargs)
environments = ['explicit-complex', 'explicit-peptide', 'implicit-complex', 'implicit-peptide', 'vacuum-complex', 'vacuum-peptide']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Use sterics in proposals
self.geometry_engine.use_sterics = True
# Write atom-by-atom geometry output.
self.geometry_engine.write_proposal_pdb = True
self.geometry_engine.pdb_filename_prefix = 'geometry'
# Create a system generator for our desired forcefields.
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = {'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None}, periodic_forcefield_kwargs={'nonbondedMethod' : app.CutoffPeriodic})
# NOTE implicit solvent not supported by this SystemGenerator
# system_generators['implicit'] = SystemGenerator(forcefields = forcefield_files,
# forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2})
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = {'implicitSolvent' : None}, nonperiodic_forcefield_kwargs={'nonbondedMethod' : app.NoCutoff})
system_generators['explicit-complex'] = system_generators['explicit']
system_generators['explicit-peptide'] = system_generators['explicit']
# system_generators['implicit-complex'] = system_generators['implicit']
# system_generators['implicit-peptide'] = system_generators['implicit']
system_generators['vacuum-complex'] = system_generators['vacuum']
system_generators['vacuum-peptide'] = system_generators['vacuum']
# Create peptide in solvent.
from pkg_resources import resource_filename
pdb_filename = resource_filename('perses', 'data/1sb0.pdb')
import pdbfixer
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
#pdbfile = PDBFile(pdb_filename)
[fixer_topology, fixer_positions] = load_via_pdbfixer(pdb_filename)
topologies['complex'] = fixer_topology
positions['complex'] = fixer_positions
modeller = Modeller(topologies['complex'], positions['complex'])
chains_to_delete = [ chain for chain in modeller.getTopology().chains() if chain.id == 'A' ] # remove chain A
modeller.delete(chains_to_delete)
topologies['peptide'] = modeller.getTopology()
positions['peptide'] = modeller.getPositions()
# Create all environments.
for environment in ['vacuum']:
for component in ['peptide', 'complex']:
topologies[environment + '-' + component] = topologies[component]
positions[environment + '-' + component] = positions[component]
# Set up in explicit solvent.
for component in ['peptide', 'complex']:
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators['explicit'].forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies['explicit' + '-' + component] = modeller.getTopology()
positions['explicit' + '-' + component] = modeller.getPositions()
# Set up the proposal engines.
allowed_mutations = list()
for resid in ['91', '99', '103', '105']:
for resname in ['ALA', 'LEU', 'VAL', 'PHE', 'CYS', 'THR', 'TRP', 'TYR', 'GLU', 'ASP', 'LYS', 'ARG', 'ASN']:
allowed_mutations.append((resid, resname))
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = {
'ffxmls' : ['amber99sbildn.xml'], # take sidechain definitions from this ffxml file
'always_change' : True # don't propose self-transitions
}
proposal_engines = dict()
chain_id = 'B'
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment], system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
for component in ['peptide', 'complex']:
thermodynamic_states['explicit' + '-' + component] = states.ThermodynamicState(system=systems['explicit' + '-' + component], temperature=temperature, pressure=pressure)
#thermodynamic_states['implicit' + '-' + component] = states.ThermodynamicState(system=systems['implicit' + '-' + component], temperature=temperature)
thermodynamic_states['vacuum' + '-' + component] = states.ThermodynamicState(system=systems['vacuum' + '-' + component], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment[0:8] == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
00 # reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':0}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum-complex'] : 1.0, sams_samplers['vacuum-peptide'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class AblImatinibResistanceTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on Abl:imatinib.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblImatinibResistanceTestSystem
>>> testsystem = AblImatinibResistanceTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum-inhibitor'].create_system(testsystem.topologies['vacuum-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum-inhibitor']
"""
def __init__(self, **kwargs):
super(AblImatinibResistanceTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
# solvents = ['vacuum'] # DEBUG
components = ['receptor', 'complex'] # TODO: Add 'ATP:kinase' complex to enable resistance design
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/abl-imatinib'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Create a system generator for desired forcefields
from pkg_resources import resource_filename
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = { 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None})
# NOTE implicit solvent not supported by this SystemGenerator
# system_generators['implicit'] = SystemGenerator(forcefields = forcefield_files,
# forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2})
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None})
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
# Set up resistance mutation proposal engines
allowed_mutations = list()
# TODO: Expand this beyond the ATP binding site
for resid in ['22', '37', '52', '55', '65', '81', '125', '128', '147', '148']:
for resname in ['ALA', 'CYS', 'ASP', 'GLU', 'PHE', 'HIS', 'ILE', 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL', 'TRP', 'TYR']:
allowed_mutations.append((resid, resname))
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = { 'ffxmls' : ['amber99sbildn.xml'] }
proposal_engines = dict()
chain_id = 'A'
for solvent in solvents:
for component in ['complex', 'receptor']: # Mutations only apply to components that contain the kinase
environment = solvent + '-' + component
proposal_engines[environment] = PointMutationEngine(topologies[environment], system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems ror all environments
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
thermodynamic_states = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, self.geometry_engine, proposal_engines[environment], options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Create test MultiTargetDesign sampler.
# TODO: Replace this with inhibitor:kinase and ATP:kinase ratio
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum-complex'] : 1.0, sams_samplers['vacuum-receptor'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.components = components
self.solvents = solvents
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
# This system must currently be minimized.
minimize_wrapper(self)
class AblAffinityTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for optimizing kinase inhibitor affinity to Abl.
TODO: Generalize to standard inhibitor:protein test system and extend to T4 lysozyme small molecules.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblAffinityTestSystem
>>> testsystem = AblAffinityestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum-inhibitor'].create_system(testsystem.topologies['vacuum-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum-inhibitor']
"""
def __init__(self, **kwargs):
super(AblAffinityTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
solvents = ['vacuum'] # DEBUG
components = ['inhibitor', 'complex'] # TODO: Add 'ATP:kinase' complex to enable resistance design
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/abl-imatinib'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Read SMILES from CSV file of clinical kinase inhibitors.
from pkg_resources import resource_filename
smiles_filename = resource_filename('perses', 'data/clinical-kinase-inhibitors.csv')
import csv
molecules = list()
with open(smiles_filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
name = row[0]
smiles = row[1]
molecules.append(smiles)
# Add current molecule
molecules.append('Cc1ccc(cc1Nc2nccc(n2)c3cccnc3)NC(=O)c4ccc(cc4)C[NH+]5CCN(CC5)C')
self.molecules = molecules
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
molecules = canonicalize_SMILES(molecules)
# Create a system generator for desired forcefields
from pkg_resources import resource_filename
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = { 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None},
molecules = [Molecule.from_openeye(molecule) for molecule in molecules], small_molecule_forcefield = small_molecule_forcefield)
# NOTE implicit solvent not supported by this SystemGenerator
# system_generators['implicit'] = SystemGenerator(forcefields = forcefield_files,
# forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2},
# molecules = [Molecule.from_openeye(molecule) for molecule in molecules],
# small_molecule_forcefield = small_molecule_forcefield)
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None},
molecules = [Molecule.from_openeye(molecule) for molecule in molecules],
small_molecule_forcefield = small_molecule_forcefield)
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
print(pdb_filename)
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
# Set up the proposal engines.
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_metadata = { }
proposal_engines = dict()
from perses.utils.openeye import smiles_to_oemol
list_of_oemols = []
for smi in molecules:
mol = smiles_to_oemol(smi)
list_of_oemols.append(mol)
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
proposal_engines[environment] = SmallMoleculeSetProposalEngine(list_of_oemols, system_generators[environment], residue_name='MOL', storage=storage)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
for component in components:
for solvent in solvents:
environment = solvent + '-' + component
if solvent == 'explicit':
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
else:
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Create test MultiTargetDesign sampler.
# TODO: Replace this with inhibitor:kinase and ATP:kinase ratio
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum-complex'] : 1.0, sams_samplers['vacuum-inhibitor'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.systems = systems
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
# This system must currently be minimized.
minimize_wrapper(self)
class AblImatinibProtonationStateTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for sampling protonation states of the Abl:imatinib complex.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblImatinibProtonationStateTestSystem
>>> testsystem = AblImatinibProtonationStateTestSystem()
# Build a system
>>> system = testsystem.system_generators['explicit-inhibitor'].create_system(testsystem.topologies['explicit-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['explicit-inhibitor']
"""
def __init__(self, **kwargs):
super(AblImatinibProtonationStateTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
components = ['inhibitor', 'complex'] # TODO: Add 'ATP:kinase' complex to enable resistance design
#solvents = ['vacuum'] # DEBUG: Just try vacuum for now
#components = ['inhibitor'] # DEBUG: Just try inhibitor for now
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/constant-pH/abl-imatinib'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Read mol2 file containing protonation states and extract canonical isomeric SMILES from this.
from pkg_resources import resource_filename
molecules = list()
mol2_filename = resource_filename('perses', os.path.join(setup_path, 'Imatinib-epik-charged.mol2'))
ifs = oechem.oemolistream(mol2_filename)
mol = oechem.OEMol()
while oechem.OEReadMolecule(ifs, mol):
smiles = oechem.OEMolToSmiles(mol)
molecules.append(smiles)
# Read log probabilities
log_state_penalties = dict()
state_penalties_filename = resource_filename('perses', os.path.join(setup_path, 'Imatinib-state-penalties.out'))
for (smiles, log_state_penalty) in zip(molecules, np.fromfile(state_penalties_filename, sep='\n')):
log_state_penalties[smiles] = log_state_penalty
# Add current molecule
smiles = 'Cc1ccc(cc1Nc2nccc(n2)c3cccnc3)NC(=O)c4ccc(cc4)C[NH+]5CCN(CC5)C'
molecules.append(smiles)
self.molecules = molecules
log_state_penalties[smiles] = 100.0 # this should have zero weight
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
# Create a system generator for desired forcefields
# TODO: Debug why we can't ue pregenerated molecule ffxml parameters. This may be an openmoltools issue.
molecules_xml_filename = resource_filename('perses', os.path.join(setup_path, 'Imatinib-epik-charged.ffxml'))
print('Creating system generators...')
barostat = MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = { 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None},
molecules = [Molecule.from_openeye(molecule) for molecule in molecules], small_molecule_forcefield = small_molecule_forcefield)
# NOTE implicit solvent not supported by this SystemGenerator
# system_generators['implicit'] = SystemGenerator(forcefields = forcefield_files,
# forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2},
# molecules = [Molecule.from_openeye(molecule) for molecule in molecules],
# small_molecule_forcefield = small_molecule_forcefield)
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None},
molecules = [Molecule.from_openeye(molecule) for molecule in molecules],
small_molecule_forcefield = small_molecule_forcefield)
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
print(pdb_filename)
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
print('Constructing positions and topologies...')
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
natoms = sum( 1 for atom in topologies[environment].atoms() )
print("System '%s' has %d atoms" % (environment, natoms))
# Set up the proposal engines.
print('Initializing proposal engines...')
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_engines = dict()
list_of_oemols = []
from perses.utils.openeye import smiles_to_oemol
for smiles in molecules:
mol = smiles_to_oemol(smiles)
list_of_oemols.append(mol)
for environment in environments:
proposal_engines[environment] = SmallMoleculeSetProposalEngine(list_of_oemols, system_generators[environment], residue_name='MOL')
# Generate systems
print('Building systems...')
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
print('Defining thermodynamic states...')
thermodynamic_states = dict()
for component in components:
for solvent in solvents:
environment = solvent + '-' + component
if solvent == 'explicit':
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
else:
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature)
# Create SAMS samplers
print('Creating SAMS samplers...')
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Create a constant-pH sampler
from perses.samplers.samplers import ProtonationStateSampler
designer = ProtonationStateSampler(complex_sampler=exen_samplers['explicit-complex'], solvent_sampler=sams_samplers['explicit-inhibitor'], log_state_penalties=log_state_penalties, storage=self.storage)
designer.verbose = True
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.systems = systems
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
# This system must currently be minimized.
minimize_wrapper(self)
print('AblImatinibProtonationStateTestSystem initialized.')
class ImidazoleProtonationStateTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for sampling protonation states of imidazole in water.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblImatinibProtonationStateTestSystem
>>> testsystem = AblImatinibProtonationStateTestSystem()
# Build a system
>>> system = testsystem.system_generators['explicit-inhibitor'].create_system(testsystem.topologies['explicit-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['explicit-inhibitor']
"""
def __init__(self, **kwargs):
super(ImidazoleProtonationStateTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
components = ['imidazole']
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/constant-pH/imidazole/'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Read mol2 file containing protonation states and extract canonical isomeric SMILES from this.
from pkg_resources import resource_filename
molecules = list()
mol2_filename = resource_filename('perses', os.path.join(setup_path, 'imidazole/imidazole-epik-charged.mol2'))
ifs = oechem.oemolistream(mol2_filename)
mol = oechem.OEMol()
while oechem.OEReadMolecule(ifs, mol):
smiles = oechem.OEMolToSmiles(mol)
molecules.append(smiles)
# Read log probabilities
log_state_penalties = dict()
state_penalties_filename = resource_filename('perses', os.path.join(setup_path, 'imidazole/imidazole-state-penalties.out'))
for (smiles, log_state_penalty) in zip(molecules, np.fromfile(state_penalties_filename, sep='\n')):
log_state_penalties[smiles] = log_state_penalty
# Add current molecule
smiles = 'C1=CN=CN1'
molecules.append(smiles)
self.molecules = molecules
log_state_penalties[smiles] = 0.0
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
# Create a system generator for desired forcefields
print('Creating system generators...')
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = {'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None},periodic_forcefield_kwargs={'nonbondedMethod' : app.CutoffPeriodic},
molecules = [Molecule.from_openeye(molecule) for molecule in molecules], small_molecule_forcefield = small_molecule_forcefield)
# NOTE implicit solvent not supported by this SystemGenerator
# system_generators['implicit'] = SystemGenerator(forcefields = forcefield_files,
# forcefield_kwargs = { 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2},
# molecules = [Molecule.from_openeye(molecule) for molecule in molecules],
# small_molecule_forcefield = small_molecule_forcefield)
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = {'implicitSolvent' : None},nonperiodic_forcefield_kwargs={'nonbondedMethod' : app.NoCutoff},
molecules = [Molecule.from_openeye(molecule) for molecule in molecules],
small_molecule_forcefield = small_molecule_forcefield)
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
print(pdb_filename)
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
print('Constructing positions and topologies...')
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
natoms = sum( 1 for atom in topologies[environment].atoms() )
print("System '%s' has %d atoms" % (environment, natoms))
# DEBUG: Write initial PDB file
outfile = open(environment + '.initial.pdb', 'w')
PDBFile.writeFile(topologies[environment], positions[environment], file=outfile)
outfile.close()
# Set up the proposal engines.
print('Initializing proposal engines...')
residue_name = 'UNL' # TODO: Figure out residue name automatically
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_engines = dict()
from perses.utils.openeye import smiles_to_oemol
list_of_oemols = []
for smiles in molecules:
mol = smiles_to_oemol(smiles)
list_of_oemols.append(mol)
for environment in environments:
storage = None
if self.storage is not None:
storage = NetCDFStorageView(self.storage, envname=environment)
proposal_engines[environment] = SmallMoleculeSetProposalEngine(list_of_oemols, system_generators[environment], residue_name=residue_name, storage=storage)
# Generate systems
print('Building systems...')
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
print('Defining thermodynamic states...')
thermodynamic_states = dict()
for component in components:
for solvent in solvents:
environment = solvent + '-' + component
if solvent == 'explicit':
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
else:
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature)
# Create SAMS samplers
print('Creating SAMS samplers...')
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage is not None:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.systems = systems
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = None
print('ImidazoleProtonationStateTestSystem initialized.')
def minimize_wrapper(testsystem):
"""
Minimize all structures in test system.
TODO
----
Use sampler thermodynamic states instead of testsystem.systems
Parameters
----------
testystem : PersesTestSystem
The testsystem to minimize.
"""
for environment in testsystem.environments:
print("Minimizing '%s'..." % environment)
thermostate = ThermodynamicState(system = testsystem.systems[environment], temperature = 300.0 * unit.kelvin) #minimizer is temperature-independent
sampler_state = SamplerState(positions = testsystem.positions[environment])
minimize(thermostate, sampler_state)
testsystem.positions[environment] = sampler_state.positions
testsystem.mcmc_samplers[environment].sampler_state = sampler_state
class SmallMoleculeLibraryTestSystem(PersesTestSystem):
"""
Create a consistent set of samplers useful for testing SmallMoleculeProposalEngine on alkanes in various solvents.
This is useful for testing a variety of components.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for explicit solvent hydration free energies
molecules : list
Molecules in library. Currently only SMILES format is supported.
Examples
--------
>>> from perses.tests.testsystems import AlkanesTestSystem
>>> testsystem = AlkanesTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].create_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['explicit']
"""
def __init__(self, constraints=app.HBonds, premapped_json_dict=None, **kwargs):
super(SmallMoleculeLibraryTestSystem, self).__init__(**kwargs)
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
molecules = canonicalize_SMILES(molecules)
environments = ['explicit', 'vacuum']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Create a system generator for our desired forcefields.
from pkg_resources import resource_filename
system_generators = dict()
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators['explicit'] = SystemGenerator(forcefields = forcefield_files, barostat = barostat,
forcefield_kwargs = {'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints': constraints}, periodic_forcefield_kwargs={'nonbondedMethod' : app.CutoffPeriodic},
small_molecule_forcefield = small_molecule_forcefield)
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = {'implicitSolvent' : None}, nonperiodic_forcefield_kwargs={'nonbondedMethod' : app.NoCutoff},
small_molecule_forcefield = small_molecule_forcefield)
# Create topologies and positions
topologies = dict()
positions = dict()
# # Parametrize and generate residue templates for small molecule set
from openmoltools.forcefield_generators import generateForceFieldFromMolecules, generateTopologyFromOEMol, gaffTemplateGenerator
from io import StringIO
from perses.utils.openeye import smiles_to_oemol, extractPositionsFromOEMol, has_undefined_stereocenters
# skipping molecules with undefined stereocenters
d_smiles_to_oemol = {}
good_molecules = []
for i, smiles in enumerate(molecules):
mol = smiles_to_oemol(smiles, f"MOL_{i}")
if has_undefined_stereocenters(mol):
print(f"MOL_{i} has undefined stereochemistry so leaving out of test")
else:
d_smiles_to_oemol[smiles] = mol
good_molecules.append(smiles)
for environment in ['vacuum', 'explicit']:
system_generators[environment].add_molecules([Molecule.from_openeye(q) for q in d_smiles_to_oemol.values()])
# Create molecule in vacuum.
smiles = good_molecules[0] # getting the first smiles that works
print("smiles: ", smiles)
molecule = smiles_to_oemol(smiles)
topologies['vacuum'] = generateTopologyFromOEMol(molecule)
positions['vacuum'] = extractPositionsFromOEMol(molecule)
# Create molecule in solvent.
modeller = app.Modeller(topologies['vacuum'], positions['vacuum'])
modeller.addSolvent(system_generators['explicit'].forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies['explicit'] = modeller.getTopology()
positions['explicit'] = modeller.getPositions()
# Set up the proposal engines.
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_metadata = { }
proposal_engines = dict()
list_of_oemols = []
for smiles in good_molecules:
mol = smiles_to_oemol(smiles)
list_of_oemols.append(mol)
for environment in environments:
proposal_engines[environment] = SmallMoleculeSetProposalEngine(list_of_oemols, system_generators[environment], residue_name=d_smiles_to_oemol[smiles].GetTitle())
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
thermodynamic_states['explicit'] = states.ThermodynamicState(system=systems['explicit'], temperature=temperature, pressure=pressure)
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['explicit'] : 1.0, sams_samplers['vacuum'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class AlkanesTestSystem(SmallMoleculeLibraryTestSystem):
"""
Library of small alkanes in various solvent environments.
"""
def __init__(self, **kwargs):
self.molecules = ['CCC', 'CCCC', 'CCCCC', 'CCCCCC']
super(AlkanesTestSystem, self).__init__(**kwargs)
class KinaseInhibitorsTestSystem(SmallMoleculeLibraryTestSystem):
"""
Library of clinical kinase inhibitors in various solvent environments. This is often problematic.
"""
def __init__(self, **kwargs):
# Read SMILES from CSV file of clinical kinase inhibitors.
from pkg_resources import resource_filename
smiles_filename = resource_filename('perses', 'data/clinical-kinase-inhibitors.csv')
import csv
molecules = list()
with open(smiles_filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
name = row[0]
smiles = row[1]
molecules.append(smiles)
self.molecules = molecules
# Intialize
super(KinaseInhibitorsTestSystem, self).__init__(**kwargs)
#TODO fix this test system
class T4LysozymeInhibitorsTestSystem(SmallMoleculeLibraryTestSystem):
"""
Library of T4 lysozyme L99A inhibitors in various solvent environments.
"""
def read_smiles(self, filename):
import csv
molecules = list()
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t', quotechar='"')
for row in csvreader:
name = row[0]
smiles = row[1]
reference = row[2]
molecules.append(smiles)
return molecules
def __init__(self, **kwargs):
# Read SMILES from CSV file of clinical kinase inhibitors.
from pkg_resources import resource_filename
molecules = list()
molecules += self.read_smiles(resource_filename('perses', 'data/L99A-binders.txt'))
molecules += self.read_smiles(resource_filename('perses', 'data/L99A-non-binders.txt'))
# Filter only molecules with benzene substructure (c1ccccc1)
def contains_benzene(smiles):
from openeye import oechem
mol = oechem.OEGraphMol()
oechem.OESmilesToMol(mol, smiles)
# create a substructure search object
ss = oechem.OESubSearch("c1ccccc1") # benzene
oechem.OEPrepareSearch(mol, ss)
if ss.SingleMatch(mol):
return True
else:
return False
print('Filtering out molecules that do not contain benzene substructure')
print(f'{len(molecules)} before filtering')
molecules = [smiles for smiles in molecules if contains_benzene(smiles)]
print(f'{len(molecules)} remain after filtering')
# Store molecules
self.molecules = molecules
# Intialize
super(T4LysozymeInhibitorsTestSystem, self).__init__(**kwargs)
class FusedRingsTestSystem(SmallMoleculeLibraryTestSystem):
"""
Simple test system containing fused rings (benzene <--> naphtalene) in explicit solvent.
"""
def __init__(self, **kwargs):
self.molecules = ['c1ccccc1', 'c1ccc2ccccc2c1'] # benzene, naphthalene
super(FusedRingsTestSystem, self).__init__(**kwargs)
class ValenceSmallMoleculeLibraryTestSystem(PersesTestSystem):
"""
Create a consistent set of samplers useful for testing SmallMoleculeProposalEngine on alkanes with a valence-only forcefield.
Properties
----------
environments : list of str
Available environments: ['vacuum']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for explicit solvent hydration free energies
molecules : list
Molecules in library. Currently only SMILES format is supported.
Examples
--------
>>> from perses.tests.testsystems import ValenceSmallMoleculeLibraryTestSystem
>>> testsystem = ValenceSmallMoleculeLibraryTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].create_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum']
"""
def __init__(self, **kwargs):
super(ValenceSmallMoleculeLibraryTestSystem, self).__init__(**kwargs)
initial_molecules = ['CCCCC','CC(C)CC', 'CCC(C)C', 'CCCCC', 'C(CC)CCC']
molecules = self._canonicalize_smiles(initial_molecules)
environments = ['vacuum']
# Create a system generator for our desired forcefields.
system_generators = dict()
from pkg_resources import resource_filename
from perses.utils.openeye import smiles_to_oemol,extractPositionsFromOEMol
system_generators['vacuum'] = SystemGenerator(forcefields = forcefield_files,
forcefield_kwargs = {'implicitSolvent' : None}, nonperiodic_forcefield_kwargs={ 'nonbondedMethod':app.NoCutoff},
molecules = [Molecule.from_openeye(smiles_to_oemol(q)) for q in molecules],
small_molecule_forcefield = small_molecule_forcefield)
#
# Create topologies and positions
#
topologies = dict()
positions = dict()
# Create molecule in vacuum.
from openmoltools.forcefield_generators import generateTopologyFromOEMol
smiles = molecules[0] # current sampler state
molecule = smiles_to_oemol(smiles)
topologies['vacuum'] = generateTopologyFromOEMol(molecule)
positions['vacuum'] = extractPositionsFromOEMol(molecule)
# Set up the proposal engines.
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
from perses.utils.openeye import smiles_to_oemol
list_of_oemols = []
for smiles in molecules:
mol = smiles_to_oemol(smiles)
list_of_oemols.append(mol)
proposal_engines = dict()
for environment in environments:
proposal_engines[environment] = SmallMoleculeSetProposalEngine(list_of_oemols, system_generators[environment])
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].create_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
00 # reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':0}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum'] : 1.0, sams_samplers['vacuum'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
def _canonicalize_smiles(self, list_of_smiles):
"""
Turn a list of smiles strings into openeye canonical
isomeric smiles.
Parameters
----------
list_of_smiles : list of str
input smiles
Returns
-------
list_of_canonicalized_smiles : list of str
canonical isomeric smiles
"""
list_of_canonicalized_smiles = []
ofs = oechem.oemolostream('current.mol2') # DEBUG
for smiles in list_of_smiles:
mol = oechem.OEMol()
oechem.OESmilesToMol(mol, smiles)
oechem.OEAddExplicitHydrogens(mol)
can_smi = oechem.OECreateSmiString(mol, OESMILES_OPTIONS)
list_of_canonicalized_smiles.append(can_smi)
ofs.close() # DEBUG
return list_of_canonicalized_smiles
def check_topologies(testsystem):
"""
Check that all SystemGenerators can build systems for their corresponding Topology objects.
"""
for environment in testsystem.environments:
topology = testsystem.topologies[environment]
try:
testsystem.system_generators[environment].create_system(topology)
except Exception as e:
msg = str(e)
msg += '\n'
msg += "topology for environment '%s' cannot be built into a system" % environment
from perses.utils.smallmolecules import show_topology
show_topology(topology)
raise Exception(msg)
def checktestsystem(testsystem_class):
# Instantiate test system.
tmpfile = tempfile.NamedTemporaryFile()
storage_filename = tmpfile.name
testsystem = testsystem_class(storage_filename=storage_filename)
# Check topologies
check_topologies(testsystem)
def test_testsystems():
"""
Test instantiation of all test systems.
"""
testsystem_names = [ 'KinaseInhibitorsTestSystem', 'T4LysozymeInhibitorsTestSystem','AlkanesTestSystem', 'AlanineDipeptideTestSystem']
niterations = 2 # number of iterations to run
for testsystem_name in testsystem_names:
import perses.tests.testsystems
testsystem_class = getattr(perses.tests.testsystems, testsystem_name)
f = partial(checktestsystem, testsystem_class)
f.description = "Testing %s" % (testsystem_name)
yield f
def run_t4_inhibitors():
"""
Run T4 lysozyme inhibitors in solvents test system.
"""
testsystem = T4LysozymeInhibitorsTestSystem(storage_filename='output.nc', ncmc_nsteps=5000, mcmc_nsteps=100)
for environment in ['explicit', 'vacuum']:
#testsystem.exen_samplers[environment].pdbfile = open('t4-' + component + '.pdb', 'w')
#testsystem.exen_samplers[environment].options={'nsteps':50} # instantaneous MC
testsystem.exen_samplers[environment].verbose = True
testsystem.sams_samplers[environment].verbose = True
testsystem.designer.verbose = True
testsystem.sams_samplers['explicit'].run(niterations=50)
# Analyze data.
#from perses.analysis import Analysis
#analysis = Analysis(storage_filename='output.nc')
#analysis.plot_sams_weights('sams.pdf')
#analysis.plot_ncmc_work('ncmc.pdf')
def run_alkanes():
"""
Run alkanes in solvents test system.
"""
testsystem = AlkanesTestSystem(storage_filename='output.nc', ncmc_nsteps=5000, mcmc_nsteps=100)
for environment in ['explicit', 'vacuum']:
#testsystem.exen_samplers[environment].pdbfile = open('t4-' + component + '.pdb', 'w')
#testsystem.exen_samplers[environment].options={'nsteps':50} # instantaneous MC
testsystem.exen_samplers[environment].verbose = True
testsystem.sams_samplers[environment].verbose = True
testsystem.designer.verbose = True
testsystem.sams_samplers['explicit'].run(niterations=50)
def run_t4():
"""
Run T4 lysozyme test system.
"""
testsystem = T4LysozymeTestSystem(ncmc_nsteps=0)
solvent = 'explicit'
for component in ['complex', 'receptor']:
testsystem.exen_samplers[solvent + '-' + component].pdbfile = open('t4-' + component + '.pdb', 'w')
testsystem.sams_samplers[solvent + '-' + component].run(niterations=5)
testsystem.designer.verbose = True
testsystem.designer.run(niterations=5)
# Analyze data.
#from perses.analysis import Analysis
#analysis = Analysis(storage_filename='output.nc')
#analysis.plot_sams_weights('sams.pdf')
#analysis.plot_ncmc_work('ncmc.pdf')
def run_myb():
"""
Run myb test system.
"""
testsystem = MybTestSystem(ncmc_nsteps=0, mcmc_nsteps=100)
solvent = 'explicit'
testsystem.exen_samplers[solvent + '-peptide'].pdbfile = open('myb-vacuum.pdb', 'w')
testsystem.exen_samplers[solvent + '-complex'].pdbfile = open('myb-complex.pdb', 'w')
testsystem.sams_samplers[solvent + '-complex'].run(niterations=5)
#testsystem.designer.verbose = True
#testsystem.designer.run(niterations=500)
#testsystem.exen_samplers[solvent + '-peptide'].verbose=True
#testsystem.exen_samplers[solvent + '-peptide'].run(niterations=100)
def run_abl_imatinib_resistance():
"""
Run abl test system.
"""
testsystem = AblImatinibResistanceTestSystem(ncmc_nsteps=20000, mcmc_nsteps=20000)
#for environment in testsystem.environments:
for environment in ['vacuum-complex']:
testsystem.exen_samplers[environment].pdbfile = open('abl-imatinib-%s.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('abl-imatinib-%s-geometry-proposals.pdb' % environment, 'w')
#testsystem.mcmc_samplers[environment].run(niterations=5)
testsystem.exen_samplers[environment].run(niterations=100)
#testsystem.sams_samplers[environment].run(niterations=5)
#testsystem.designer.verbose = True
#testsystem.designer.run(niterations=500)
#testsystem.exen_samplers[solvent + '-peptide'].verbose=True
#testsystem.exen_samplers[solvent + '-peptide'].run(niterations=100)
def run_kinase_inhibitors():
"""
Run kinase inhibitors test system.
"""
with open("mapperkinase3.json", 'r') as jsoninput:
json_dict = jsoninput.read()
testsystem = KinaseInhibitorsTestSystem(ncmc_nsteps=100, mcmc_nsteps=10, premapped_json_dict=json_dict, constraints=None)
environment = 'vacuum'
testsystem.exen_samplers[environment].pdbfile = open('kinase-inhibitors-vacuum.pdb', 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('kinase-inhibitors-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_engine.write_proposal_pdb = True # write proposal PDBs
testsystem.exen_samplers[environment].geometry_engine.verbose = True
testsystem.sams_samplers[environment].run(niterations=100)
def run_valence_system():
"""
Run valence molecules test system.
This system only has one environment (vacuum), so SAMS is used.
"""
testsystem = ValenceSmallMoleculeLibraryTestSystem(storage_filename='output.nc', ncmc_nsteps=0, mcmc_nsteps=10)
environment = 'vacuum'
testsystem.exen_samplers[environment].pdbfile = open('valence.pdb', 'w')
testsystem.sams_samplers[environment].run(niterations=50)
def run_alanine_system(sterics=False):
"""
Run alanine dipeptide in vacuum test system.
If `sterics == True`, then sterics will be included.
Otherwise, only valence terms are used.
"""
if sterics:
testsystem = AlanineDipeptideTestSystem(storage_filename='output.nc', ncmc_nsteps=0, mcmc_nsteps=100)
else:
testsystem = AlanineDipeptideValenceTestSystem(storage_filename='output.nc', ncmc_nsteps=0, mcmc_nsteps=100)
environment = 'vacuum'
print(testsystem.__class__.__name__)
testsystem.exen_samplers[environment].pdbfile = open('valence.pdb', 'w')
testsystem.sams_samplers[environment].update_method = 'two-stage'
testsystem.sams_samplers[environment].second_stage_start = 100 # iteration to start second stage
testsystem.sams_samplers[environment].run(niterations=200)
def test_valence_write_pdb_ncmc_switching():
"""
Run abl test system.
"""
testsystem = ValenceSmallMoleculeLibraryTestSystem(ncmc_nsteps=10, mcmc_nsteps=10)
environment = 'vacuum'
testsystem.exen_samplers[environment].run(niterations=1)
def run_abl_affinity_write_pdb_ncmc_switching():
"""
Run abl test system.
"""
testsystem = AblAffinityTestSystem(ncmc_nsteps=10000, mcmc_nsteps=10000)
#for environment in testsystem.environments:
for environment in ['vacuum-complex']:
print(environment)
testsystem.exen_samplers[environment].pdbfile = open('abl-imatinib-%s.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('abl-imatinib-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].verbose = True
testsystem.sams_samplers[environment].verbose = True
#testsystem.mcmc_samplers[environment].run(niterations=5)
testsystem.exen_samplers[environment].run(niterations=5)
#testsystem.sams_samplers[environment].run(niterations=5)
#testsystem.designer.verbose = True
#testsystem.designer.run(niterations=500)
#testsystem.exen_samplers[solvent + '-peptide'].verbose=True
#testsystem.exen_samplers[solvent + '-peptide'].run(niterations=100)
def run_constph_abl():
"""
Run Abl:imatinib constant-pH test system.
"""
testsystem = AblImatinibProtonationStateTestSystem(ncmc_nsteps=50, mcmc_nsteps=2500)
for environment in testsystem.environments:
#for environment in ['explicit-inhibitor', 'explicit-complex']:
#for environment in ['vacuum-inhibitor', 'vacuum-complex']:
if environment not in testsystem.exen_samplers:
print("Skipping '%s' for now..." % environment)
continue
print(environment)
testsystem.exen_samplers[environment].pdbfile = open('abl-imatinib-constph-%s.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('abl-imatinib-constph-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].verbose = True
testsystem.exen_samplers[environment].proposal_engine.verbose = True
testsystem.sams_samplers[environment].verbose = True
#testsystem.mcmc_samplers[environment].run(niterations=5)
#testsystem.exen_samplers[environment].run(niterations=5)
#testsystem.sams_samplers[environment].run(niterations=5)
# Run ligand in solvent constant-pH sampler calibration
testsystem.sams_samplers['explicit-inhibitor'].verbose=True
testsystem.sams_samplers['explicit-inhibitor'].run(niterations=100)
#testsystem.exen_samplers['vacuum-inhibitor'].verbose=True
#testsystem.exen_samplers['vacuum-inhibitor'].run(niterations=100)
#testsystem.exen_samplers['explicit-complex'].verbose=True
#testsystem.exen_samplers['explicit-complex'].run(niterations=100)
# Run constant-pH sampler
testsystem.designer.verbose = True
testsystem.designer.update_target_probabilities() # update log weights from inhibitor in solvent calibration
testsystem.designer.run(niterations=500)
def run_imidazole():
"""
Run imidazole constant-pH test system.
"""
testsystem = ImidazoleProtonationStateTestSystem(storage_filename='output.nc', ncmc_nsteps=500, mcmc_nsteps=1000)
for environment in testsystem.environments:
if environment not in testsystem.exen_samplers:
print("Skipping '%s' for now..." % environment)
continue
print(environment)
#testsystem.exen_samplers[environment].pdbfile = open('imidazole-constph-%s.pdb' % environment, 'w')
#testsystem.exen_samplers[environment].geometry_pdbfile = open('imidazole-constph-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].verbose = True
testsystem.exen_samplers[environment].proposal_engine.verbose = True
testsystem.sams_samplers[environment].verbose = True
# Run ligand in solvent constant-pH sampler calibration
testsystem.sams_samplers['explicit-imidazole'].verbose=True
testsystem.sams_samplers['explicit-imidazole'].run(niterations=100)
def run_fused_rings():
"""
Run fused rings test system.
Vary number of NCMC steps
"""
#nsteps_to_try = [1, 10, 100, 1000, 10000, 100000] # number of NCMC steps
nsteps_to_try = [10, 100, 1000, 10000, 100000] # number of NCMC steps
for ncmc_steps in nsteps_to_try:
storage_filename = 'output-%d.nc' % ncmc_steps
testsystem = FusedRingsTestSystem(storage_filename=storage_filename, ncmc_nsteps=nsteps_to_try, mcmc_nsteps=100)
for environment in ['explicit', 'vacuum']:
testsystem.exen_samplers[environment].ncmc_engine.verbose = True # verbose output of work
testsystem.sams_samplers[environment].verbose = True
testsystem.designer.verbose = True
testsystem.designer.run(niterations=100)
# Analyze data.
from perses.analysis import Analysis
analysis = Analysis(storage_filename=storage_filename)
#analysis.plot_sams_weights('sams.pdf')
analysis.plot_ncmc_work('ncmc-%d.pdf' % ncmc_steps)
if __name__ == '__main__':
#testsystem = PropaneTestSystem(scheme='geometry-ncmc-geometry', options = {'nsteps':10})
#run_null_system(testsystem)
#run_alanine_system(sterics=False)
#run_fused_rings()
#run_valence_system()
run_alkanes()
#run_imidazole()
#run_constph_abl()
#run_abl_affinity_write_pdb_ncmc_switching()
#run_kinase_inhibitors()
#run_abl_imatinib()
#run_myb()
| [
"perses.utils.smallmolecules.sanitizeSMILES",
"csv.reader",
"perses.dispersed.utils.minimize",
"openforcefield.topology.Molecule.from_openeye",
"perses.utils.openeye.has_undefined_stereocenters",
"pkg_resources.resource_filename",
"openeye.oechem.OESmilesToMol",
"openmmtools.states.SamplerState",
"o... | [((2155, 2193), 'os.environ.get', 'os.environ.get', (['"""GITHUB_ACTIONS"""', 'None'], {}), "('GITHUB_ACTIONS', None)\n", (2169, 2193), False, 'import os\n'), ((19723, 19763), 'pdbfixer.PDBFixer', 'PDBFixer', ([], {'filename': 'filename', 'pdbid': 'pdbid'}), '(filename=filename, pdbid=pdbid)\n', (19731, 19763), False, 'from pdbfixer import PDBFixer\n'), ((106044, 106073), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (106071, 106073), False, 'import tempfile\n'), ((4305, 4342), 'perses.rjmc.geometry.FFAllAngleGeometryEngine', 'FFAllAngleGeometryEngine', ([], {'metadata': '{}'}), '(metadata={})\n', (4329, 4342), False, 'from perses.rjmc.geometry import FFAllAngleGeometryEngine\n'), ((4528, 4637), 'openmmtools.mcmc.LangevinSplittingDynamicsMove', 'LangevinSplittingDynamicsMove', ([], {'timestep': 'self._timestep', 'splitting': 'self._splitting', 'n_restart_attempts': '(10)'}), '(timestep=self._timestep, splitting=self.\n _splitting, n_restart_attempts=10)\n', (4557, 4637), False, 'from openmmtools.mcmc import MCMCSampler, LangevinSplittingDynamicsMove\n'), ((7078, 7126), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (7103, 7126), False, 'from simtk import openmm, unit\n'), ((7430, 7687), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'barostat': 'barostat', 'forcefield_kwargs': "{'nonbondedCutoff': 9.0 * unit.angstrom, 'implicitSolvent': None,\n 'constraints': constraints}", 'periodic_forcefield_kwargs': "{'nonbondedMethod': app.CutoffPeriodic}"}), "(forcefields=forcefield_files, barostat=barostat,\n forcefield_kwargs={'nonbondedCutoff': 9.0 * unit.angstrom,\n 'implicitSolvent': None, 'constraints': constraints},\n periodic_forcefield_kwargs={'nonbondedMethod': app.CutoffPeriodic})\n", (7445, 7687), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((8106, 8298), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'forcefield_kwargs': "{'implicitSolvent': None, 'constraints': constraints}", 'nonperiodic_forcefield_kwargs': "{'nonbondedMethod': app.NoCutoff}"}), "(forcefields=forcefield_files, forcefield_kwargs={\n 'implicitSolvent': None, 'constraints': constraints},\n nonperiodic_forcefield_kwargs={'nonbondedMethod': app.NoCutoff})\n", (8121, 8298), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((8587, 8676), 'pkg_resources.resource_filename', 'resource_filename', (['"""openmmtools"""', '"""data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb"""'], {}), "('openmmtools',\n 'data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb')\n", (8604, 8676), False, 'from pkg_resources import resource_filename\n'), ((8791, 8812), 'simtk.openmm.app.PDBFile', 'PDBFile', (['pdb_filename'], {}), '(pdb_filename)\n', (8798, 8812), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((9122, 9177), 'simtk.openmm.app.Modeller', 'app.Modeller', (["topologies['vacuum']", "positions['vacuum']"], {}), "(topologies['vacuum'], positions['vacuum'])\n", (9134, 9177), False, 'from simtk.openmm import app\n'), ((10420, 10522), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['explicit']", 'temperature': 'temperature', 'pressure': 'pressure'}), "(system=systems['explicit'], temperature=\n temperature, pressure=pressure)\n", (10445, 10522), False, 'from openmmtools import states\n'), ((10684, 10760), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['vacuum']", 'temperature': 'temperature'}), "(system=systems['vacuum'], temperature=temperature)\n", (10709, 10760), False, 'from openmmtools import states\n'), ((12549, 12605), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (12566, 12605), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((15381, 15447), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/amber99sbildn-valence-only.xml"""'], {}), "('perses', 'data/amber99sbildn-valence-only.xml')\n", (15398, 15447), False, 'from pkg_resources import resource_filename\n'), ((15486, 15678), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'forcefield_kwargs': "{'implicitSolvent': None, 'constraints': constraints}", 'nonperiodic_forcefield_kwargs': "{'nonbondedMethod': app.NoCutoff}"}), "(forcefields=forcefield_files, forcefield_kwargs={\n 'implicitSolvent': None, 'constraints': constraints},\n nonperiodic_forcefield_kwargs={'nonbondedMethod': app.NoCutoff})\n", (15501, 15678), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((15975, 16064), 'pkg_resources.resource_filename', 'resource_filename', (['"""openmmtools"""', '"""data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb"""'], {}), "('openmmtools',\n 'data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb')\n", (15992, 16064), False, 'from pkg_resources import resource_filename\n'), ((16179, 16200), 'simtk.openmm.app.PDBFile', 'PDBFile', (['pdb_filename'], {}), '(pdb_filename)\n', (16186, 16200), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((17460, 17536), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['vacuum']", 'temperature': 'temperature'}), "(system=systems['vacuum'], temperature=temperature)\n", (17485, 17536), False, 'from openmmtools import states\n'), ((18919, 18975), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (18936, 18975), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((22298, 22346), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (22323, 22346), False, 'from simtk import openmm, unit\n'), ((22423, 22652), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'barostat': 'barostat', 'forcefield_kwargs': "{'nonbondedCutoff': 9.0 * unit.angstrom, 'implicitSolvent': None}", 'periodic_forcefield_kwargs': "{'nonbondedMethod': app.CutoffPeriodic}"}), "(forcefields=forcefield_files, barostat=barostat,\n forcefield_kwargs={'nonbondedCutoff': 9.0 * unit.angstrom,\n 'implicitSolvent': None}, periodic_forcefield_kwargs={'nonbondedMethod':\n app.CutoffPeriodic})\n", (22438, 22652), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((23039, 23204), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'forcefield_kwargs': "{'implicitSolvent': None}", 'nonperiodic_forcefield_kwargs': "{'nonbondedMethod': app.NoCutoff}"}), "(forcefields=forcefield_files, forcefield_kwargs={\n 'implicitSolvent': None}, nonperiodic_forcefield_kwargs={\n 'nonbondedMethod': app.NoCutoff})\n", (23054, 23204), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((23838, 23882), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/181L.pdb"""'], {}), "('perses', 'data/181L.pdb')\n", (23855, 23882), False, 'from pkg_resources import resource_filename\n'), ((24112, 24153), 'simtk.openmm.app.Modeller', 'Modeller', (['fixer_topology', 'fixer_positions'], {}), '(fixer_topology, fixer_positions)\n', (24120, 24153), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((24356, 24379), 'copy.deepcopy', 'copy.deepcopy', (['modeller'], {}), '(modeller)\n', (24369, 24379), False, 'import copy\n'), ((24406, 24429), 'copy.deepcopy', 'copy.deepcopy', (['modeller'], {}), '(modeller)\n', (24419, 24429), False, 'import copy\n'), ((25283, 25310), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['"""c1ccccc1"""'], {}), "('c1ccccc1')\n", (25298, 25310), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((25733, 26017), 'perses.rjmc.topology_proposal.TopologyProposal', 'TopologyProposal', ([], {'new_topology': 'new_residue', 'new_system': 'bnz_new_sys', 'old_topology': 'ligand_modeller.topology', 'old_system': 'bnz_new_sys', 'logp_proposal': '(0.0)', 'new_to_old_atom_map': '{(0): 0, (1): 1, (2): 2, (3): 3, (4): 4, (5): 5}', 'old_chemical_state_key': '""""""', 'new_chemical_state_key': '""""""'}), "(new_topology=new_residue, new_system=bnz_new_sys,\n old_topology=ligand_modeller.topology, old_system=bnz_new_sys,\n logp_proposal=0.0, new_to_old_atom_map={(0): 0, (1): 1, (2): 2, (3): 3,\n (4): 4, (5): 5}, old_chemical_state_key='', new_chemical_state_key='')\n", (25749, 26017), False, 'from perses.rjmc.topology_proposal import TopologyProposal\n'), ((26014, 26049), 'perses.rjmc.geometry.FFAllAngleGeometryEngine', 'geometry.FFAllAngleGeometryEngine', ([], {}), '()\n', (26047, 26049), True, 'import perses.rjmc.geometry as geometry\n'), ((26183, 26215), 'copy.deepcopy', 'copy.deepcopy', (['receptor_modeller'], {}), '(receptor_modeller)\n', (26196, 26215), False, 'import copy\n'), ((30563, 30619), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (30580, 30619), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((33487, 33535), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (33512, 33535), False, 'from simtk import openmm, unit\n'), ((33612, 33841), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'barostat': 'barostat', 'forcefield_kwargs': "{'nonbondedCutoff': 9.0 * unit.angstrom, 'implicitSolvent': None}", 'periodic_forcefield_kwargs': "{'nonbondedMethod': app.CutoffPeriodic}"}), "(forcefields=forcefield_files, barostat=barostat,\n forcefield_kwargs={'nonbondedCutoff': 9.0 * unit.angstrom,\n 'implicitSolvent': None}, periodic_forcefield_kwargs={'nonbondedMethod':\n app.CutoffPeriodic})\n", (33627, 33841), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((34228, 34393), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'forcefield_kwargs': "{'implicitSolvent': None}", 'nonperiodic_forcefield_kwargs': "{'nonbondedMethod': app.NoCutoff}"}), "(forcefields=forcefield_files, forcefield_kwargs={\n 'implicitSolvent': None}, nonperiodic_forcefield_kwargs={\n 'nonbondedMethod': app.NoCutoff})\n", (34243, 34393), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((35024, 35068), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/1sb0.pdb"""'], {}), "('perses', 'data/1sb0.pdb')\n", (35041, 35068), False, 'from pkg_resources import resource_filename\n'), ((35433, 35486), 'simtk.openmm.app.Modeller', 'Modeller', (["topologies['complex']", "positions['complex']"], {}), "(topologies['complex'], positions['complex'])\n", (35441, 35486), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((39865, 39921), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (39882, 39921), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((43170, 43214), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/gaff.xml"""'], {}), "('perses', 'data/gaff.xml')\n", (43187, 43214), False, 'from pkg_resources import resource_filename\n'), ((43234, 43282), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (43259, 43282), False, 'from simtk import openmm, unit\n'), ((43359, 43555), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'barostat': 'barostat', 'forcefield_kwargs': "{'nonbondedMethod': app.CutoffPeriodic, 'nonbondedCutoff': 9.0 * unit.\n angstrom, 'implicitSolvent': None}"}), "(forcefields=forcefield_files, barostat=barostat,\n forcefield_kwargs={'nonbondedMethod': app.CutoffPeriodic,\n 'nonbondedCutoff': 9.0 * unit.angstrom, 'implicitSolvent': None})\n", (43374, 43555), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((43947, 44075), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'forcefield_kwargs': "{'nonbondedMethod': app.NoCutoff, 'implicitSolvent': None}"}), "(forcefields=forcefield_files, forcefield_kwargs={\n 'nonbondedMethod': app.NoCutoff, 'implicitSolvent': None})\n", (43962, 44075), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((49259, 49315), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (49276, 49315), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((52787, 52853), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/clinical-kinase-inhibitors.csv"""'], {}), "('perses', 'data/clinical-kinase-inhibitors.csv')\n", (52804, 52853), False, 'from pkg_resources import resource_filename\n'), ((53437, 53467), 'perses.utils.smallmolecules.sanitizeSMILES', 'sanitizeSMILES', (['self.molecules'], {}), '(self.molecules)\n', (53451, 53467), False, 'from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES\n'), ((53488, 53518), 'perses.utils.smallmolecules.canonicalize_SMILES', 'canonicalize_SMILES', (['molecules'], {}), '(molecules)\n', (53507, 53518), False, 'from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES\n'), ((53652, 53700), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (53677, 53700), False, 'from simtk import openmm, unit\n'), ((60551, 60607), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (60568, 60607), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((64290, 64324), 'openeye.oechem.oemolistream', 'oechem.oemolistream', (['mol2_filename'], {}), '(mol2_filename)\n', (64309, 64324), False, 'from openeye import oechem\n'), ((64339, 64353), 'openeye.oechem.OEMol', 'oechem.OEMol', ([], {}), '()\n', (64351, 64353), False, 'from openeye import oechem\n'), ((64368, 64399), 'openeye.oechem.OEReadMolecule', 'oechem.OEReadMolecule', (['ifs', 'mol'], {}), '(ifs, mol)\n', (64389, 64399), False, 'from openeye import oechem\n'), ((65218, 65248), 'perses.utils.smallmolecules.sanitizeSMILES', 'sanitizeSMILES', (['self.molecules'], {}), '(self.molecules)\n', (65232, 65248), False, 'from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES\n'), ((72527, 72725), 'perses.samplers.samplers.ProtonationStateSampler', 'ProtonationStateSampler', ([], {'complex_sampler': "exen_samplers['explicit-complex']", 'solvent_sampler': "sams_samplers['explicit-inhibitor']", 'log_state_penalties': 'log_state_penalties', 'storage': 'self.storage'}), "(complex_sampler=exen_samplers['explicit-complex'],\n solvent_sampler=sams_samplers['explicit-inhibitor'],\n log_state_penalties=log_state_penalties, storage=self.storage)\n", (72550, 72725), False, 'from perses.samplers.samplers import ProtonationStateSampler\n'), ((76259, 76293), 'openeye.oechem.oemolistream', 'oechem.oemolistream', (['mol2_filename'], {}), '(mol2_filename)\n', (76278, 76293), False, 'from openeye import oechem\n'), ((76308, 76322), 'openeye.oechem.OEMol', 'oechem.OEMol', ([], {}), '()\n', (76320, 76322), False, 'from openeye import oechem\n'), ((76337, 76368), 'openeye.oechem.OEReadMolecule', 'oechem.OEReadMolecule', (['ifs', 'mol'], {}), '(ifs, mol)\n', (76358, 76368), False, 'from openeye import oechem\n'), ((77112, 77142), 'perses.utils.smallmolecules.sanitizeSMILES', 'sanitizeSMILES', (['self.molecules'], {}), '(self.molecules)\n', (77126, 77142), False, 'from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES\n'), ((77280, 77324), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/gaff.xml"""'], {}), "('perses', 'data/gaff.xml')\n", (77297, 77324), False, 'from pkg_resources import resource_filename\n'), ((77344, 77392), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (77369, 77392), False, 'from simtk import openmm, unit\n'), ((85686, 85782), 'openmmtools.states.ThermodynamicState', 'ThermodynamicState', ([], {'system': 'testsystem.systems[environment]', 'temperature': '(300.0 * unit.kelvin)'}), '(system=testsystem.systems[environment], temperature=\n 300.0 * unit.kelvin)\n', (85704, 85782), False, 'from openmmtools.states import ThermodynamicState, SamplerState\n'), ((85844, 85901), 'openmmtools.states.SamplerState', 'SamplerState', ([], {'positions': 'testsystem.positions[environment]'}), '(positions=testsystem.positions[environment])\n', (85856, 85901), False, 'from openmmtools.states import ThermodynamicState, SamplerState\n'), ((85912, 85948), 'perses.dispersed.utils.minimize', 'minimize', (['thermostate', 'sampler_state'], {}), '(thermostate, sampler_state)\n', (85920, 85948), False, 'from perses.dispersed.utils import minimize\n'), ((88199, 88229), 'perses.utils.smallmolecules.sanitizeSMILES', 'sanitizeSMILES', (['self.molecules'], {}), '(self.molecules)\n', (88213, 88229), False, 'from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES\n'), ((88250, 88280), 'perses.utils.smallmolecules.canonicalize_SMILES', 'canonicalize_SMILES', (['molecules'], {}), '(molecules)\n', (88269, 88280), False, 'from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES\n'), ((88587, 88631), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/gaff.xml"""'], {}), "('perses', 'data/gaff.xml')\n", (88604, 88631), False, 'from pkg_resources import resource_filename\n'), ((88651, 88699), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (88676, 88699), False, 'from simtk import openmm, unit\n'), ((88740, 89054), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'barostat': 'barostat', 'forcefield_kwargs': "{'nonbondedCutoff': 9.0 * unit.angstrom, 'implicitSolvent': None,\n 'constraints': constraints}", 'periodic_forcefield_kwargs': "{'nonbondedMethod': app.CutoffPeriodic}", 'small_molecule_forcefield': 'small_molecule_forcefield'}), "(forcefields=forcefield_files, barostat=barostat,\n forcefield_kwargs={'nonbondedCutoff': 9.0 * unit.angstrom,\n 'implicitSolvent': None, 'constraints': constraints},\n periodic_forcefield_kwargs={'nonbondedMethod': app.CutoffPeriodic},\n small_molecule_forcefield=small_molecule_forcefield)\n", (88755, 89054), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((89200, 89423), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': 'forcefield_files', 'forcefield_kwargs': "{'implicitSolvent': None}", 'nonperiodic_forcefield_kwargs': "{'nonbondedMethod': app.NoCutoff}", 'small_molecule_forcefield': 'small_molecule_forcefield'}), "(forcefields=forcefield_files, forcefield_kwargs={\n 'implicitSolvent': None}, nonperiodic_forcefield_kwargs={\n 'nonbondedMethod': app.NoCutoff}, small_molecule_forcefield=\n small_molecule_forcefield)\n", (89215, 89423), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((90793, 90816), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (90808, 90816), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((90849, 90884), 'openmoltools.forcefield_generators.generateTopologyFromOEMol', 'generateTopologyFromOEMol', (['molecule'], {}), '(molecule)\n', (90874, 90884), False, 'from openmoltools.forcefield_generators import generateTopologyFromOEMol\n'), ((90915, 90950), 'perses.utils.openeye.extractPositionsFromOEMol', 'extractPositionsFromOEMol', (['molecule'], {}), '(molecule)\n', (90940, 90950), False, 'from perses.utils.openeye import smiles_to_oemol, extractPositionsFromOEMol\n'), ((91009, 91064), 'simtk.openmm.app.Modeller', 'app.Modeller', (["topologies['vacuum']", "positions['vacuum']"], {}), "(topologies['vacuum'], positions['vacuum'])\n", (91021, 91064), False, 'from simtk.openmm import app\n'), ((92172, 92274), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['explicit']", 'temperature': 'temperature', 'pressure': 'pressure'}), "(system=systems['explicit'], temperature=\n temperature, pressure=pressure)\n", (92197, 92274), False, 'from openmmtools import states\n'), ((92313, 92389), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['vacuum']", 'temperature': 'temperature'}), "(system=systems['vacuum'], temperature=temperature)\n", (92338, 92389), False, 'from openmmtools import states\n'), ((94036, 94092), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (94053, 94092), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((95238, 95304), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/clinical-kinase-inhibitors.csv"""'], {}), "('perses', 'data/clinical-kinase-inhibitors.csv')\n", (95255, 95304), False, 'from pkg_resources import resource_filename\n'), ((101115, 101138), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (101130, 101138), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((101170, 101205), 'openmoltools.forcefield_generators.generateTopologyFromOEMol', 'generateTopologyFromOEMol', (['molecule'], {}), '(molecule)\n', (101195, 101205), False, 'from openmoltools.forcefield_generators import generateTopologyFromOEMol\n'), ((101236, 101271), 'perses.utils.openeye.extractPositionsFromOEMol', 'extractPositionsFromOEMol', (['molecule'], {}), '(molecule)\n', (101261, 101271), False, 'from perses.utils.openeye import smiles_to_oemol, extractPositionsFromOEMol\n'), ((102201, 102277), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['vacuum']", 'temperature': 'temperature'}), "(system=systems['vacuum'], temperature=temperature)\n", (102226, 102277), False, 'from openmmtools import states\n'), ((103908, 103964), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self.storage'}), '(target_samplers, storage=self.storage)\n', (103925, 103964), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((104899, 104934), 'openeye.oechem.oemolostream', 'oechem.oemolostream', (['"""current.mol2"""'], {}), "('current.mol2')\n", (104918, 104934), False, 'from openeye import oechem\n'), ((106684, 106726), 'functools.partial', 'partial', (['checktestsystem', 'testsystem_class'], {}), '(checktestsystem, testsystem_class)\n', (106691, 106726), False, 'from functools import partial\n'), ((117672, 117715), 'perses.analysis.Analysis', 'Analysis', ([], {'storage_filename': 'storage_filename'}), '(storage_filename=storage_filename)\n', (117680, 117715), False, 'from perses.analysis import Analysis\n'), ((3873, 3914), 'perses.storage.NetCDFStorage', 'NetCDFStorage', (['storage_filename'], {'mode': '"""w"""'}), "(storage_filename, mode='w')\n", (3886, 3914), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((9928, 10097), 'perses.rjmc.topology_proposal.PointMutationEngine', 'PointMutationEngine', (['topologies[environment]', 'system_generators[environment]', 'chain_id'], {'proposal_metadata': 'proposal_metadata', 'allowed_mutations': 'allowed_mutations'}), '(topologies[environment], system_generators[environment],\n chain_id, proposal_metadata=proposal_metadata, allowed_mutations=\n allowed_mutations)\n', (9947, 10097), False, 'from perses.rjmc.topology_proposal import PointMutationEngine\n'), ((11834, 12031), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': 0}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': 0}, storage=storage)\n", (11857, 12031), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((12119, 12175), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (12130, 12175), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((16872, 17061), 'perses.rjmc.topology_proposal.PointMutationEngine', 'PointMutationEngine', (['topologies[environment]', 'system_generators[environment]', 'chain_id'], {'proposal_metadata': 'proposal_metadata', 'allowed_mutations': 'allowed_mutations', 'always_change': '(True)'}), '(topologies[environment], system_generators[environment],\n chain_id, proposal_metadata=proposal_metadata, allowed_mutations=\n allowed_mutations, always_change=True)\n', (16891, 17061), False, 'from perses.rjmc.topology_proposal import PointMutationEngine\n'), ((18055, 18108), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (18074, 18108), False, 'from openmmtools import states\n'), ((18331, 18529), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': 50}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': 50}, storage=storage)\n", (18354, 18529), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((18616, 18672), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (18627, 18672), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((26790, 26847), 'simtk.openmm.app.Modeller', 'app.Modeller', (['topologies[component]', 'positions[component]'], {}), '(topologies[component], positions[component])\n', (26802, 26847), False, 'from simtk.openmm import app\n'), ((27832, 28001), 'perses.rjmc.topology_proposal.PointMutationEngine', 'PointMutationEngine', (['topologies[environment]', 'system_generators[environment]', 'chain_id'], {'proposal_metadata': 'proposal_metadata', 'allowed_mutations': 'allowed_mutations'}), '(topologies[environment], system_generators[environment],\n chain_id, proposal_metadata=proposal_metadata, allowed_mutations=\n allowed_mutations)\n', (27851, 28001), False, 'from perses.rjmc.topology_proposal import PointMutationEngine\n'), ((28428, 28547), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['explicit' + '-' + component]", 'temperature': 'temperature', 'pressure': 'pressure'}), "(system=systems['explicit' + '-' + component],\n temperature=temperature, pressure=pressure)\n", (28453, 28547), False, 'from openmmtools import states\n'), ((28765, 28863), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['vacuum' + '-' + component]", 'temperature': 'temperature'}), "(system=systems['vacuum' + '-' + component],\n temperature=temperature)\n", (28790, 28863), False, 'from openmmtools import states\n'), ((29874, 30125), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps},\n storage=storage)\n", (29897, 30125), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((30207, 30263), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (30218, 30263), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((36158, 36215), 'simtk.openmm.app.Modeller', 'app.Modeller', (['topologies[component]', 'positions[component]'], {}), '(topologies[component], positions[component])\n', (36170, 36215), False, 'from simtk.openmm import app\n'), ((37211, 37380), 'perses.rjmc.topology_proposal.PointMutationEngine', 'PointMutationEngine', (['topologies[environment]', 'system_generators[environment]', 'chain_id'], {'proposal_metadata': 'proposal_metadata', 'allowed_mutations': 'allowed_mutations'}), '(topologies[environment], system_generators[environment],\n chain_id, proposal_metadata=proposal_metadata, allowed_mutations=\n allowed_mutations)\n', (37230, 37380), False, 'from perses.rjmc.topology_proposal import PointMutationEngine\n'), ((37775, 37894), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['explicit' + '-' + component]", 'temperature': 'temperature', 'pressure': 'pressure'}), "(system=systems['explicit' + '-' + component],\n temperature=temperature, pressure=pressure)\n", (37800, 37894), False, 'from openmmtools import states\n'), ((38119, 38217), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['vacuum' + '-' + component]", 'temperature': 'temperature'}), "(system=systems['vacuum' + '-' + component],\n temperature=temperature)\n", (38144, 38217), False, 'from openmmtools import states\n'), ((39230, 39427), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': 0}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': 0}, storage=storage)\n", (39253, 39427), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((39514, 39570), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (39525, 39570), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((44726, 44747), 'simtk.openmm.app.PDBFile', 'PDBFile', (['pdb_filename'], {}), '(pdb_filename)\n', (44733, 44747), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((52976, 53025), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', quotechar=\'"\')\n', (52986, 53025), False, 'import csv\n'), ((55841, 55862), 'simtk.openmm.app.PDBFile', 'PDBFile', (['pdb_filename'], {}), '(pdb_filename)\n', (55848, 55862), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((57147, 57167), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smi'], {}), '(smi)\n', (57162, 57167), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((57428, 57548), 'perses.rjmc.topology_proposal.SmallMoleculeSetProposalEngine', 'SmallMoleculeSetProposalEngine', (['list_of_oemols', 'system_generators[environment]'], {'residue_name': '"""MOL"""', 'storage': 'storage'}), "(list_of_oemols, system_generators[\n environment], residue_name='MOL', storage=storage)\n", (57458, 57548), False, 'from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine\n'), ((64220, 64274), 'os.path.join', 'os.path.join', (['setup_path', '"""Imatinib-epik-charged.mol2"""'], {}), "(setup_path, 'Imatinib-epik-charged.mol2')\n", (64232, 64274), False, 'import os\n'), ((64422, 64447), 'openeye.oechem.OEMolToSmiles', 'oechem.OEMolToSmiles', (['mol'], {}), '(mol)\n', (64442, 64447), False, 'from openeye import oechem\n'), ((64618, 64674), 'os.path.join', 'os.path.join', (['setup_path', '"""Imatinib-state-penalties.out"""'], {}), "(setup_path, 'Imatinib-state-penalties.out')\n", (64630, 64674), False, 'import os\n'), ((64734, 64781), 'numpy.fromfile', 'np.fromfile', (['state_penalties_filename'], {'sep': '"""\n"""'}), "(state_penalties_filename, sep='\\n')\n", (64745, 64781), True, 'import numpy as np\n'), ((65484, 65539), 'os.path.join', 'os.path.join', (['setup_path', '"""Imatinib-epik-charged.ffxml"""'], {}), "(setup_path, 'Imatinib-epik-charged.ffxml')\n", (65496, 65539), False, 'import os\n'), ((67791, 67812), 'simtk.openmm.app.PDBFile', 'PDBFile', (['pdb_filename'], {}), '(pdb_filename)\n', (67798, 67812), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((69329, 69352), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (69344, 69352), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((69477, 69580), 'perses.rjmc.topology_proposal.SmallMoleculeSetProposalEngine', 'SmallMoleculeSetProposalEngine', (['list_of_oemols', 'system_generators[environment]'], {'residue_name': '"""MOL"""'}), "(list_of_oemols, system_generators[\n environment], residue_name='MOL')\n", (69507, 69580), False, 'from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine\n'), ((76178, 76243), 'os.path.join', 'os.path.join', (['setup_path', '"""imidazole/imidazole-epik-charged.mol2"""'], {}), "(setup_path, 'imidazole/imidazole-epik-charged.mol2')\n", (76190, 76243), False, 'import os\n'), ((76391, 76416), 'openeye.oechem.OEMolToSmiles', 'oechem.OEMolToSmiles', (['mol'], {}), '(mol)\n', (76411, 76416), False, 'from openeye import oechem\n'), ((76587, 76654), 'os.path.join', 'os.path.join', (['setup_path', '"""imidazole/imidazole-state-penalties.out"""'], {}), "(setup_path, 'imidazole/imidazole-state-penalties.out')\n", (76599, 76654), False, 'import os\n'), ((76714, 76761), 'numpy.fromfile', 'np.fromfile', (['state_penalties_filename'], {'sep': '"""\n"""'}), "(state_penalties_filename, sep='\\n')\n", (76725, 76761), True, 'import numpy as np\n'), ((79590, 79611), 'simtk.openmm.app.PDBFile', 'PDBFile', (['pdb_filename'], {}), '(pdb_filename)\n', (79597, 79611), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((81447, 81470), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (81462, 81470), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((81742, 81869), 'perses.rjmc.topology_proposal.SmallMoleculeSetProposalEngine', 'SmallMoleculeSetProposalEngine', (['list_of_oemols', 'system_generators[environment]'], {'residue_name': 'residue_name', 'storage': 'storage'}), '(list_of_oemols, system_generators[\n environment], residue_name=residue_name, storage=storage)\n', (81772, 81869), False, 'from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine\n'), ((90172, 90207), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles', 'f"""MOL_{i}"""'], {}), "(smiles, f'MOL_{i}')\n", (90187, 90207), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((90223, 90255), 'perses.utils.openeye.has_undefined_stereocenters', 'has_undefined_stereocenters', (['mol'], {}), '(mol)\n', (90250, 90255), False, 'from perses.utils.openeye import smiles_to_oemol, extractPositionsFromOEMol, has_undefined_stereocenters\n'), ((91561, 91584), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (91576, 91584), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((93399, 93612), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': self._ncmc_nsteps}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': self._ncmc_nsteps}, storage=storage)\n", (93422, 93612), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((93699, 93755), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (93710, 93755), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((95427, 95476), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', quotechar=\'"\')\n', (95437, 95476), False, 'import csv\n'), ((96078, 96128), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\'\\t\', quotechar=\'"\')\n', (96088, 96128), False, 'import csv\n'), ((96545, 96597), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/L99A-binders.txt"""'], {}), "('perses', 'data/L99A-binders.txt')\n", (96562, 96597), False, 'from pkg_resources import resource_filename\n'), ((96637, 96693), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/L99A-non-binders.txt"""'], {}), "('perses', 'data/L99A-non-binders.txt')\n", (96654, 96693), False, 'from pkg_resources import resource_filename\n'), ((96859, 96878), 'openeye.oechem.OEGraphMol', 'oechem.OEGraphMol', ([], {}), '()\n', (96876, 96878), False, 'from openeye import oechem\n'), ((96891, 96924), 'openeye.oechem.OESmilesToMol', 'oechem.OESmilesToMol', (['mol', 'smiles'], {}), '(mol, smiles)\n', (96911, 96924), False, 'from openeye import oechem\n'), ((96992, 97022), 'openeye.oechem.OESubSearch', 'oechem.OESubSearch', (['"""c1ccccc1"""'], {}), "('c1ccccc1')\n", (97010, 97022), False, 'from openeye import oechem\n'), ((97045, 97076), 'openeye.oechem.OEPrepareSearch', 'oechem.OEPrepareSearch', (['mol', 'ss'], {}), '(mol, ss)\n', (97067, 97076), False, 'from openeye import oechem\n'), ((101530, 101553), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (101545, 101553), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((101712, 101790), 'perses.rjmc.topology_proposal.SmallMoleculeSetProposalEngine', 'SmallMoleculeSetProposalEngine', (['list_of_oemols', 'system_generators[environment]'], {}), '(list_of_oemols, system_generators[environment])\n', (101742, 101790), False, 'from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine\n'), ((103289, 103486), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': 0}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': 0}, storage=storage)\n", (103312, 103486), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((103573, 103629), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (103584, 103629), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((104999, 105013), 'openeye.oechem.OEMol', 'oechem.OEMol', ([], {}), '()\n', (105011, 105013), False, 'from openeye import oechem\n'), ((105026, 105059), 'openeye.oechem.OESmilesToMol', 'oechem.OESmilesToMol', (['mol', 'smiles'], {}), '(mol, smiles)\n', (105046, 105059), False, 'from openeye import oechem\n'), ((105072, 105106), 'openeye.oechem.OEAddExplicitHydrogens', 'oechem.OEAddExplicitHydrogens', (['mol'], {}), '(mol)\n', (105101, 105106), False, 'from openeye import oechem\n'), ((105129, 105176), 'openeye.oechem.OECreateSmiString', 'oechem.OECreateSmiString', (['mol', 'OESMILES_OPTIONS'], {}), '(mol, OESMILES_OPTIONS)\n', (105153, 105176), False, 'from openeye import oechem\n'), ((11091, 11143), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (11108, 11143), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((11484, 11537), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (11503, 11537), False, 'from openmmtools import states\n'), ((11641, 11666), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (11654, 11666), False, 'import copy\n'), ((17867, 17919), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (17884, 17919), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((18212, 18237), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (18225, 18237), False, 'import copy\n'), ((29190, 29242), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (29207, 29242), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((29598, 29651), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (29617, 29651), False, 'from openmmtools import states\n'), ((29755, 29780), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (29768, 29780), False, 'import copy\n'), ((38544, 38596), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (38561, 38596), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((38952, 39005), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (38971, 39005), False, 'from openmmtools import states\n'), ((39109, 39134), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (39122, 39134), False, 'import copy\n'), ((44656, 44702), 'os.path.join', 'os.path.join', (['setup_path', "('%s.pdb' % component)"], {}), "(setup_path, '%s.pdb' % component)\n", (44668, 44702), False, 'import os\n'), ((46609, 46778), 'perses.rjmc.topology_proposal.PointMutationEngine', 'PointMutationEngine', (['topologies[environment]', 'system_generators[environment]', 'chain_id'], {'proposal_metadata': 'proposal_metadata', 'allowed_mutations': 'allowed_mutations'}), '(topologies[environment], system_generators[environment],\n chain_id, proposal_metadata=proposal_metadata, allowed_mutations=\n allowed_mutations)\n', (46628, 46778), False, 'from perses.rjmc.topology_proposal import PointMutationEngine\n'), ((48418, 48669), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'self.geometry_engine', 'proposal_engines[environment]'], {'options': "{'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, self.geometry_engine, proposal_engines[environment],\n options={'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps},\n storage=storage)\n", (48441, 48669), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((48759, 48815), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (48770, 48815), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((55739, 55785), 'os.path.join', 'os.path.join', (['setup_path', "('%s.pdb' % component)"], {}), "(setup_path, '%s.pdb' % component)\n", (55751, 55785), False, 'import os\n'), ((57331, 57383), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (57348, 57383), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((59709, 59960), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps},\n storage=storage)\n", (59732, 59960), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((60050, 60106), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (60061, 60106), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((67689, 67735), 'os.path.join', 'os.path.join', (['setup_path', "('%s.pdb' % component)"], {}), "(setup_path, '%s.pdb' % component)\n", (67701, 67735), False, 'import os\n'), ((71871, 72122), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps},\n storage=storage)\n", (71894, 72122), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((72212, 72268), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (72223, 72268), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((79488, 79534), 'os.path.join', 'os.path.join', (['setup_path', "('%s.pdb' % component)"], {}), "(setup_path, '%s.pdb' % component)\n", (79500, 79534), False, 'import os\n'), ((80917, 81002), 'simtk.openmm.app.PDBFile.writeFile', 'PDBFile.writeFile', (['topologies[environment]', 'positions[environment]'], {'file': 'outfile'}), '(topologies[environment], positions[environment], file=outfile\n )\n', (80934, 81002), False, 'from simtk.openmm.app import PDBFile, Modeller\n'), ((81645, 81697), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (81662, 81697), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((84170, 84421), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'self.geometry_engine'], {'options': "{'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps}", 'storage': 'storage'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], self.geometry_engine,\n options={'nsteps': self._ncmc_nsteps, 'mcmc_nsteps': self._mcmc_nsteps},\n storage=storage)\n", (84193, 84421), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((84511, 84567), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (84522, 84567), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((92720, 92772), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (92737, 92772), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((93123, 93176), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (93142, 93176), False, 'from openmmtools import states\n'), ((93280, 93305), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (93293, 93305), False, 'import copy\n'), ((102608, 102660), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (102625, 102660), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((103011, 103064), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (103030, 103064), False, 'from openmmtools import states\n'), ((103168, 103193), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (103181, 103193), False, 'import copy\n'), ((105902, 105925), 'perses.utils.smallmolecules.show_topology', 'show_topology', (['topology'], {}), '(topology)\n', (105915, 105925), False, 'from perses.utils.smallmolecules import show_topology\n'), ((45178, 45235), 'simtk.openmm.app.Modeller', 'app.Modeller', (['topologies[component]', 'positions[component]'], {}), '(topologies[component], positions[component])\n', (45190, 45235), False, 'from simtk.openmm import app\n'), ((47571, 47623), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (47588, 47623), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((47709, 47812), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature', 'pressure': 'pressure'}), '(system=systems[environment], temperature=\n temperature, pressure=pressure)\n', (47734, 47812), False, 'from openmmtools import states\n'), ((48027, 48106), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature'}), '(system=systems[environment], temperature=temperature)\n', (48052, 48106), False, 'from openmmtools import states\n'), ((48143, 48196), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (48162, 48196), False, 'from openmmtools import states\n'), ((48291, 48316), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (48304, 48316), False, 'import copy\n'), ((54100, 54131), 'openforcefield.topology.Molecule.from_openeye', 'Molecule.from_openeye', (['molecule'], {}), '(molecule)\n', (54121, 54131), False, 'from openforcefield.topology import Molecule\n'), ((55046, 55077), 'openforcefield.topology.Molecule.from_openeye', 'Molecule.from_openeye', (['molecule'], {}), '(molecule)\n', (55067, 55077), False, 'from openforcefield.topology import Molecule\n'), ((56293, 56350), 'simtk.openmm.app.Modeller', 'app.Modeller', (['topologies[component]', 'positions[component]'], {}), '(topologies[component], positions[component])\n', (56305, 56350), False, 'from simtk.openmm import app\n'), ((58061, 58164), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature', 'pressure': 'pressure'}), '(system=systems[environment], temperature=\n temperature, pressure=pressure)\n', (58086, 58164), False, 'from openmmtools import states\n'), ((58240, 58319), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature'}), '(system=systems[environment], temperature=temperature)\n', (58265, 58319), False, 'from openmmtools import states\n'), ((58862, 58914), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (58879, 58914), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((59000, 59103), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature', 'pressure': 'pressure'}), '(system=systems[environment], temperature=\n temperature, pressure=pressure)\n', (59025, 59103), False, 'from openmmtools import states\n'), ((59318, 59397), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature'}), '(system=systems[environment], temperature=temperature)\n', (59343, 59397), False, 'from openmmtools import states\n'), ((59434, 59487), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (59453, 59487), False, 'from openmmtools import states\n'), ((59582, 59607), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (59595, 59607), False, 'import copy\n'), ((66050, 66081), 'openforcefield.topology.Molecule.from_openeye', 'Molecule.from_openeye', (['molecule'], {}), '(molecule)\n', (66071, 66081), False, 'from openforcefield.topology import Molecule\n'), ((66996, 67027), 'openforcefield.topology.Molecule.from_openeye', 'Molecule.from_openeye', (['molecule'], {}), '(molecule)\n', (67017, 67027), False, 'from openforcefield.topology import Molecule\n'), ((68301, 68358), 'simtk.openmm.app.Modeller', 'app.Modeller', (['topologies[component]', 'positions[component]'], {}), '(topologies[component], positions[component])\n', (68313, 68358), False, 'from simtk.openmm import app\n'), ((70180, 70283), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature', 'pressure': 'pressure'}), '(system=systems[environment], temperature=\n temperature, pressure=pressure)\n', (70205, 70283), False, 'from openmmtools import states\n'), ((70359, 70438), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature'}), '(system=systems[environment], temperature=temperature)\n', (70384, 70438), False, 'from openmmtools import states\n'), ((71024, 71076), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (71041, 71076), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((71162, 71265), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature', 'pressure': 'pressure'}), '(system=systems[environment], temperature=\n temperature, pressure=pressure)\n', (71187, 71265), False, 'from openmmtools import states\n'), ((71480, 71559), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature'}), '(system=systems[environment], temperature=temperature)\n', (71505, 71559), False, 'from openmmtools import states\n'), ((71596, 71649), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (71615, 71649), False, 'from openmmtools import states\n'), ((71744, 71769), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (71757, 71769), False, 'import copy\n'), ((77819, 77850), 'openforcefield.topology.Molecule.from_openeye', 'Molecule.from_openeye', (['molecule'], {}), '(molecule)\n', (77840, 77850), False, 'from openforcefield.topology import Molecule\n'), ((78795, 78826), 'openforcefield.topology.Molecule.from_openeye', 'Molecule.from_openeye', (['molecule'], {}), '(molecule)\n', (78816, 78826), False, 'from openforcefield.topology import Molecule\n'), ((80100, 80157), 'simtk.openmm.app.Modeller', 'app.Modeller', (['topologies[component]', 'positions[component]'], {}), '(topologies[component], positions[component])\n', (80112, 80157), False, 'from simtk.openmm import app\n'), ((82469, 82572), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature', 'pressure': 'pressure'}), '(system=systems[environment], temperature=\n temperature, pressure=pressure)\n', (82494, 82572), False, 'from openmmtools import states\n'), ((82646, 82725), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature'}), '(system=systems[environment], temperature=temperature)\n', (82671, 82725), False, 'from openmmtools import states\n'), ((83323, 83375), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self.storage'], {'envname': 'environment'}), '(self.storage, envname=environment)\n', (83340, 83375), False, 'from perses.storage import NetCDFStorage, NetCDFStorageView\n'), ((83461, 83564), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature', 'pressure': 'pressure'}), '(system=systems[environment], temperature=\n temperature, pressure=pressure)\n', (83486, 83564), False, 'from openmmtools import states\n'), ((83779, 83858), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': 'systems[environment]', 'temperature': 'temperature'}), '(system=systems[environment], temperature=temperature)\n', (83804, 83858), False, 'from openmmtools import states\n'), ((83895, 83948), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (83914, 83948), False, 'from openmmtools import states\n'), ((84043, 84068), 'copy.deepcopy', 'copy.deepcopy', (['self._move'], {}), '(self._move)\n', (84056, 84068), False, 'import copy\n'), ((90566, 90590), 'openforcefield.topology.Molecule.from_openeye', 'Molecule.from_openeye', (['q'], {}), '(q)\n', (90587, 90590), False, 'from openforcefield.topology import Molecule\n'), ((100653, 100671), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['q'], {}), '(q)\n', (100668, 100671), False, 'from perses.utils.openeye import smiles_to_oemol\n')] |
# Required libraries
import pandas as pd
import logging as log
import time
import numpy as np
from scipy import signal
from sklearn.feature_extraction.text import CountVectorizer
# Given an iterable (list or Series), turns it into a bag of words matrix (DataFrame)
def get_bow(iterable, vocabulary=None, prefix=''):
# Turn vocabulary words lowercase, as required by CountVectorizer
if vocabulary:
vocabulary = [v.lower() for v in vocabulary]
# Apply CountVectorizer with given vocabulary
cv = CountVectorizer(vocabulary=vocabulary)
# Compute BOW matrix
bow = pd.DataFrame(cv.fit_transform(iterable).toarray(), columns=['{}{}'.format(prefix, f.upper()) for f in cv.get_feature_names()])
# Return computed bag of words
return bow
def get_bow_residues(residues, vocabulary=None, prefix='RES_NAME_'):
return get_bow(structs, vocabulary, prefix)
def get_bow_structures(structs, vocabulary=None, prefix='STRUCT_'):
return get_bow(structs, vocabulary, prefix)
def get_bow_edge_loc(structs, vocabulary=None, prefix='EDGE_LOC_'):
return get_bow(structs, vocabulary, prefix)
def get_bow_edge_type(structs, vocabulary=None, prefix='EDGE_TYPE'):
return get_bow(structs, vocabulary, prefix)
# Given a DataFrame and a column, removes the column and adds BOW columns computed from the latter
def replace_bow(df, col, vocabulary=None, prefix='', drop=False):
# Retrieve column which will be removed
removed = df[col]
# Delete column from dataframe if requested
if drop:
df = df.drop(col, axis=1, inplace=False)
# Compute BOW
bow = get_bow(removed, vocabulary=vocabulary, prefix=prefix)
# Concatenate DataFrames
df = pd.concat([df, bow], axis=1)
# Return computed DataFrame
return df
"""
Function to apply sliding windows on a proteins dataset. It uses Gaussian Filtering
"""
def sliding_window(data, k, sd):
"""
REQUIRE:
import pandas as pd
import numpy as np
import signal from scipy
INPUT:
data = dataframe of main features
k = the size of a window (int)
sd = the standard deviation of the gaussian filter (float)
OUTPUT:
A dataframe with sliding windows applied
"""
# Define starting time of the function
start = time.time()
#Set variables
df_windows = data.copy()
#Cycle for every protein
for pdb_id in data.PDB_ID.unique():
#Cycle for every chain in a given protein
for chain in set(data.CHAIN_ID[data.PDB_ID == pdb_id].unique()):
#Work on a reduced dataset: we apply sliding windows for every chain
df_sliced = df_windows[(data.PDB_ID == pdb_id)
& (data.CHAIN_ID == chain)]
# SET PDB_ID, CHIAN_ID and RES_ID to a separated df, we are not going to apply gaussian filter on them
info_sliced = df_sliced.iloc[:, 0:3]
#Shortcut name for lengths
chain_len = len(data.CHAIN_ID[(data.PDB_ID == pdb_id)
& (data.CHAIN_ID == chain)])
#Apply a symmatric mirroring at the start of the chain of size k//2
df_windows_start = pd.DataFrame(np.array(df_sliced.iloc[1:(k//2+1), ]),
index=np.arange(-k//2 + 1, 0, step = 1),
columns=list(data.columns)).sort_index()
#Apply a symmatric mirroring at the end of the chain of k//2
df_windows_end = pd.DataFrame(np.array(df_sliced.iloc[chain_len-(k//2 + 1):chain_len-1, ]),
index=np.arange(chain_len-1 + k//2,chain_len-1, step = -1),
columns=list(data.columns)).sort_index()
#Now we merge reunite this dataframe
df_with_start_sym = df_windows_start.append(df_sliced)
df_win_k = df_with_start_sym.append(df_windows_end)
### MAIN: COMPUTE GAUSSIAN FILTER OF GIVEN DATAFRAME
sliced = df_win_k.iloc[:, 3:]
window = signal.gaussian(k, std = sd)
sliced = sliced.rolling(window = k, center = True).apply(lambda x: np.dot(x,window)/k, raw=True)
# Reunite filtered features with PDB_ID, CHAIN_ID, RES_ID
tot_sliced = pd.merge(info_sliced, sliced.iloc[0:chain_len+k//2,:],
right_index=True, left_index=True) #here is chain_len + k//2
### Update the dataframe with the filtered features of given chain
df_windows[(df_windows.PDB_ID == pdb_id) & (df_windows.CHAIN_ID == chain)] = tot_sliced
# Debug time
log.debug('Window sliding took {}'.format(time.time() - start))
# Return "window slided" dataframe
return df_windows
| [
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.merge",
"time.time",
"numpy.array",
"numpy.arange",
"numpy.dot",
"scipy.signal.gaussian",
"pandas.concat"
] | [((518, 556), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'vocabulary': 'vocabulary'}), '(vocabulary=vocabulary)\n', (533, 556), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1702, 1730), 'pandas.concat', 'pd.concat', (['[df, bow]'], {'axis': '(1)'}), '([df, bow], axis=1)\n', (1711, 1730), True, 'import pandas as pd\n'), ((2271, 2282), 'time.time', 'time.time', ([], {}), '()\n', (2280, 2282), False, 'import time\n'), ((4077, 4103), 'scipy.signal.gaussian', 'signal.gaussian', (['k'], {'std': 'sd'}), '(k, std=sd)\n', (4092, 4103), False, 'from scipy import signal\n'), ((4311, 4410), 'pandas.merge', 'pd.merge', (['info_sliced', 'sliced.iloc[0:chain_len + k // 2, :]'], {'right_index': '(True)', 'left_index': '(True)'}), '(info_sliced, sliced.iloc[0:chain_len + k // 2, :], right_index=\n True, left_index=True)\n', (4319, 4410), True, 'import pandas as pd\n'), ((4705, 4716), 'time.time', 'time.time', ([], {}), '()\n', (4714, 4716), False, 'import time\n'), ((3193, 3232), 'numpy.array', 'np.array', (['df_sliced.iloc[1:k // 2 + 1,]'], {}), '(df_sliced.iloc[1:k // 2 + 1,])\n', (3201, 3232), True, 'import numpy as np\n'), ((3520, 3585), 'numpy.array', 'np.array', (['df_sliced.iloc[chain_len - (k // 2 + 1):chain_len - 1,]'], {}), '(df_sliced.iloc[chain_len - (k // 2 + 1):chain_len - 1,])\n', (3528, 3585), True, 'import numpy as np\n'), ((4185, 4202), 'numpy.dot', 'np.dot', (['x', 'window'], {}), '(x, window)\n', (4191, 4202), True, 'import numpy as np\n'), ((3283, 3316), 'numpy.arange', 'np.arange', (['(-k // 2 + 1)', '(0)'], {'step': '(1)'}), '(-k // 2 + 1, 0, step=1)\n', (3292, 3316), True, 'import numpy as np\n'), ((3630, 3687), 'numpy.arange', 'np.arange', (['(chain_len - 1 + k // 2)', '(chain_len - 1)'], {'step': '(-1)'}), '(chain_len - 1 + k // 2, chain_len - 1, step=-1)\n', (3639, 3687), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from .batch_transformer import BatchTransformer
class BaseRandomCellTransform(BatchTransformer):
""" This is a base class used for all sorts of random cell transforms: feature dropout, noise, etc
The transform is working by masked replacement of cells in a batch with some augmented version of the same batch:
batch.loc[mask] = augmented_batch[mask]
The this transform will provide infrastructure of this transformation, while derived classes will
define their own versions of augmented batch
**Parameters:**
- **n_probs** - a *list*, *tuple* or a *one dimensional numpy array* of probabilities $p_0, p_1, p_2, ... p_n$.
$p_0$ is a probability for a row to have 0 augmented elements (no augmentation), $p_1$ - one random cells,
$p_2$ - two random cells, etc. A parameter must have at least 2 values, scalars are not accepted
- **cols** - a *list*, *tuple* or *one dimensional numpy array* of strings with columns names to be transformed
Number of columns must be greater or equal the length of **n_probs** parameter simply because there must be
enough columns to choose from to augment n elements in a row
- **col_probs** - (optional) a *list*, *tuple* or *one dimensional numpy array* of floats
$p_{c_0}, p_{c_1}, ... p_{c_k}$ where k is the number of columns specified in parameter cols.
$p_{c_0}$ is the probability of column 0 from parameter *cols* to be selected in when only one column is picked
for augmentation. $p_{c_1}$ is the same for column 1, etc. It is important to understand that when two or more
columns, are picked for a row, actual frequencies of columns will drift towards equal distribution with every
new item added. In a case when number of columns picked for augmentation reaches its max allowed value
(number of columns available to choose from parameter **cols**), there will be no choice and the actual counts
of columns will be equal. This means the actual distribution will turn into a uniform discrete distribution.
**Default: None**
- **data_fork** - (optinonal) a single string setting the transformer to process only this index at level 0
when data has multiindex columns. The typical use scenario of this parameter is de-noising autoencoders
when same data is fed to both inputs (x) and outputs (y) of the model, but data pushed to inputs (x)
is augmented. In this case, data is forked by copying to different multiindex values (x and y). By using this
parameter you can set the transformer to process only x 'fork' of data
"""
def __init__(self, n_probs, cols, col_probs=None, data_fork=None):
super().__init__()
# if type(row_prob) not in [float]:
# raise ValueError('Error: row_prob must be a scalar float')
# self._row_prob = row_prob
# checking cols and col_probs
self._cols = self._validate_cols(cols)
self._col_probs = self._validate_col_probs(col_probs)
# checking n_probs
self._n_probs = self._validate_n_probs(n_probs)
self._col_factors = self._calculate_col_weights(self._col_probs)
self._fork = self._validate_data_fork(data_fork)
# seed table is a technical object used for vectorised implementation of n_probs
self._seed = np.tril(np.ones((self._n_probs.shape[0], self._n_probs.shape[0]-1), dtype=np.int64), k=-1)
def _validate_vector(self, vector, name, numeric=True):
if vector is None:
return None
if type(vector) not in [list, tuple, int, float, np.ndarray]:
raise ValueError(f'Error. Parameter {name} must be one of the following types: list, tuple, single int or'
f' float. Got {type(vector)}')
if type(vector) in [int, float]:
vector = [vector]
if numeric:
try:
v = np.array(vector, dtype=float)
except ValueError:
raise ValueError(f'Error. parameter {name} must be all-numeric')
if type(vector) != np.ndarray:
vector = np.array(vector)
if vector.ndim > 1:
raise ValueError(f'Error. parameter {name} must be a one-dimensional array or a scalar')
return vector
def _validate_n_probs(self, n_probs):
if n_probs is None:
raise ValueError(f'Error. parameter n_probs must be set')
nb = self._validate_vector(n_probs, 'n_probs')
if nb.shape[0] > self._cols.shape[0] + 1:
raise ValueError(f'Error. Length of parameter n_probs must not be lower that the length of parameter'
f' cols + 1. There must be enough columns to choose from to fill all positions specified'
f' in n_probs')
if np.abs(nb.sum() - 1.) > 0.001:
raise ValueError('Error. n_probs do not add up to 1.')
if nb.shape[0] < 2:
raise ValueError('Error. Parameter n_probs must have at least 2 values')
return nb
def _validate_cols(self, cols):
if type(cols) == str:
cols = [cols]
if type(cols) in [list, tuple, np.ndarray]:
if not all([type(s) == str for s in cols]):
raise ValueError('Error: parameter cols can only contain strings')
else:
raise ValueError('Error: parameter cols must be a single column name a list of column names')
c = np.array(cols)
return c
def _validate_col_probs(self, col_probs):
if col_probs is None:
cp = np.ones(shape=self._cols.shape)
else:
cp = self._validate_vector(col_probs, 'col_probs')
if len(cp) == 1:
raise ValueError('Error. parameter col_probs is not accepted when only one column in augmented')
if cp.shape != self._cols.shape:
raise ValueError('Error. parameters cols and col_probs must have same shape')
return cp
def _validate_data_fork(self, fork):
if fork is None:
return fork
if type(fork) != str:
raise ValueError('Error. Fork must be a single string value')
return fork
def _make_mask(self, batch):
""" This method creates a binary mask that marks items in an incoming batch that have to be augmented
The elements are selected taking the following parameters into account:
- n_probs - list of probabilities of picking 0, 1, ... n items in one row respectively
- cols - list of columns subjected to augmentation $colname_0, colname_1, ... colname_k$. $k \lt n$ to provide
enough choice for column picking.
- col_probs - expected frequencies $p_0, p_1 ... p_k$ of columns to be augmented in a one-per-row basis
**Parameters:**
**Returns:** a pandas dataframe of booleans of the same dimensions and indices as a batch. The returned
dataframe has True for elements that have to be augmented
There is a naive way to call pick columns for augmentation by using random choice for each column separately.
This way I could use col_probs directly in a choice function. This however **is quite slow method** as it
requires one call of random choice function per row.
This object utilises a vectorized approach for picking rows:
1. generate a matrix of random standard uniform floats of size (batch_size, number of cols K)
2. argsort and pick leftmost n columns. They will contain indices of picked columns
3. Because generally not all rows will have n items picked, these indices are multiplied by a n-picking mask,
which nullifies some of the indices effectively de-selecting them
3. one hot encode of the remaining indices to make mask
### Making n-picking mask
this mask is used to implement random number of cells per row picking which is set by `n_probs` parameter
It is created by sampling with replacement from a lower triangle matrix:
```0, 0, 0
1, 0, 0
1, 1, 0
1, 1, 1
```
using `n_probs` as weights. In this case, this matrix can be used for generating a mask where 0 to 3 cells
can be augmented in each row.
###Performance
the tests show 3 times performance increase when using vectorised version comparing with naive implementation
"""
rand = np.random.uniform(size=(batch.shape[0], self._cols.shape[0]))
# idx is a rectangular matrix of ids of columns selected randomly with column weighting
idx = np.argsort(np.power(rand, self._col_factors))[:, :(self._n_probs.shape[0]-1)] + 1
# now I will create a mask implementing n_probs (randomly picking rows with 0, 1, 2 etc cells picked)
seed_idx = np.random.choice(range(self._seed.shape[0]), size=idx.shape[0], p=self._n_probs)
idx = idx * self._seed[seed_idx, :]
b = np.zeros((idx.shape[0], self._cols.shape[0] + 1))
for i in range(idx.shape[1]):
b[np.arange(idx.shape[0]), idx[:, i]] = 1
return b[:, 1:]
def _calculate_col_weights(self, col_probs):
""" Calculate power factors for transformation according to desired frequencies
The weighed col sampler is using vectorized argsort for selecting unqiue ids in each row.
The downside of this approach is that it doesn't use weighting.
I.e. I can't make one column more prefferable if there is a choice of columns in each row.
When using uniform distribution as is, all variables become equally possible which means each
column can be selected with equal probability when only one column is chosen
to illustrate why this is happening, I will use CDF of a uniform distribution $X$
For a standard uniform distribution in unit interval $[0,1]$, the CDF fuction is
$$
CDF(X) = x : 0\le x\le 1
$$
CDF sets the probability of a random variable to evaluate less than x
$$
CDF(X, x) = p(X \le x)
$$
I can calculate probability of one variable be less than another $p(X_1 \le X_2)$.
For that I need to integrate the CDF:
$$
p(X_1 \\le X_2) = \\int_0^1 CDF(X_2) dX_1 = \\int_0^1 x dX_1 = \\int_0^1 x \\cdot 1 \\cdot dx =
$$
$$
\\bigl(\\frac{x^2}{2} + C\\bigr) \\biggr\\rvert_0^1 = \\frac{1}{2}
$$
then 3 variables are used, I will calculate joint probability
$$
p(X_1 \\le X_2, X_1 \\le X_3) = \\int_0^1 CDF(X_2) \\cdot CDF(X_3) \\cdot dX_1 =
\\int_0^1 x^2 dX_1 =
$$
$$
\\int_0^1 x^2 \\cdot 1 \\cdot dx = \\bigl(\\frac{x^3}{3} + C\\bigr) \\biggr\\rvert_0^1 = \\frac{1}{3}
$$
#### Adding weighting
Now, how I can skew the outcomes, so that the expectations of them being chosen are not equal,
but some other ratios? For that, I need to modify distribution of $X$ so that integral changes
in the way we want. The distributions must not change their intervals and must stay within unit limits $[0,1]$.
For this reason, I will not use simple scaling.
Instead, I will use power transformation of standard uniform distribution.
$$ X_1 = X^{z_1} $$
$$ X_2 = X^{z_2} $$
$$ X_3 = X^{z_3} $$
The power factors $z_1, z_2, z_3$ are not yet known.
Lets see if we can find them using desired weights $[p_1, p_2, p_3]$ for these variables:
$$
p_1 = p(X_1 \le X_2, X_1 \le X_3) = \int_0^1 CDF(X_2) \cdot CDF(X_3) \cdot dX_1 =
\int_0^1 x^{z_2} x^{z_3} dX_1 =
$$
$$
\int_0^1 x^{z_2} x^{z_3} \frac{dX_1}{dx} dx = \int_0^1 x^{z_2} x^{z_3} (z_1\cdot x^{z_1-1}) dx =
$$
$$
z_1 \int_0^1 x^{z_2} x^{z_3} x^{z_1-1} dx = z_1 \int_0^1 x^{z_1+z_2+z_3-1} dx =
$$
$$
z_1 \\bigl( \\frac{x^{z_1+z_2+z_3-1}}{z_1+z_2+z_3-1} + C\\bigr) \\biggr\\rvert_0^1 =
\\frac{z_1}{z_1+z_2+z_3}
$$
Using the same logic I can make formulas for $p_2$ and $p_3$. All together, they make a system of equations
$$ p_1 = \\frac{z_1}{z_1+z_2+z_3} $$
$$ p_2 = \\frac{z_2}{z_1+z_2+z_3} $$
$$ p_3 = \\frac{z_3}{z_1+z_2+z_3} $$
This is a funny system which may have infinite number of solution which can be obrained by simply scaling one solution
vector $z_1, z_2, z_3$ if it exists. Indeed, if a vector that conformed any of the equations is scaled, both nominator
and denominator get scaled by the same number. This basically means that all possible solutions lay on a line
$$ (p_1 - 1) z_1+p_1z_2+p_1z_3 = 0 $$
$$ p_2z_1+(p_2-1)z_2+p_2z_3 = 0 $$
$$ p_3z_1+p_3z_2+(p_3-1)z_3 = 0 $$
This is also a homogenious system of equations which has a simple solution $Z = 0$, which means the line where all
possible solutions lay crosses zero. Because there is no single solution, the matrix of the equations is singular.
I will use SVD for finding one of the solution
$$
A Z = 0
$$
Matrix A can be decomposed to
$$
A = UDV^T
$$
The solution will be in the n-th column where zero diagonal element is in matrix $D$.
For above matrix, this element will be on last position. The solution will be located in the last row of matrix V
"""
# first, normalize p
cp = col_probs / col_probs.sum()
# then create a matrix A
a = np.tile(np.expand_dims(cp, -1), (1, cp.shape[0])) - np.eye(cp.shape[0])
u, d, v = np.linalg.svd(a)
weights = v[-1, :]
# a is singular and might have multiple solutions: z=0, z, and -z. We only want positive z
if weights.sum() < 0:
weights *= -1
return weights
def _make_augmented_version(self, batch):
raise NotImplemented()
return batch
def transform(self, batch):
if self._fork:
if len(batch.columns.names) != 2:
raise KeyError(f'Error: The data passed to {type(self).__name__} is not forked, while fork parameter '
f'is specified. Please add multiindex level to columns of your data or use DataFork '
f'batch transform before.')
if self._fork not in batch.columns.get_level_values(0):
raise KeyError(f"Error: fork {self._fork} specified as a parameter 'data_fork' was not found in data. "
f"The following forks were found: {set(batch.columns.get_level_values(0))}. Please "
f"make sure you are using DataFork that is configured to provide this a fork with the"
f"name specified.")
# the top level of multiinedex is dropped here to avoid a hassle of handling it in methods _make_mask and
# _make_augmented_version. This dropped level will be added later when merged back with the batch
subset = batch[self._fork][self._cols].copy()
else:
subset = batch[self._cols].copy()
mask = self._make_mask(subset)
augmented_batch = self._make_augmented_version(subset)
transformed = subset.mask(mask.astype(bool), augmented_batch)
if self._fork:
# in order for loc to work, the top level index must be restored
transformed.columns = pd.MultiIndex.from_product([[self._fork], transformed.columns])
batch.loc[:, (self._fork, self._cols)] = transformed
else:
batch.loc[:, self._cols] = transformed
return batch
def inverse_transform(self, batch):
return batch
| [
"numpy.random.uniform",
"numpy.power",
"numpy.zeros",
"numpy.ones",
"numpy.expand_dims",
"pandas.MultiIndex.from_product",
"numpy.linalg.svd",
"numpy.array",
"numpy.arange",
"numpy.eye"
] | [((5537, 5551), 'numpy.array', 'np.array', (['cols'], {}), '(cols)\n', (5545, 5551), True, 'import numpy as np\n'), ((8533, 8594), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(batch.shape[0], self._cols.shape[0])'}), '(size=(batch.shape[0], self._cols.shape[0]))\n', (8550, 8594), True, 'import numpy as np\n'), ((9053, 9102), 'numpy.zeros', 'np.zeros', (['(idx.shape[0], self._cols.shape[0] + 1)'], {}), '((idx.shape[0], self._cols.shape[0] + 1))\n', (9061, 9102), True, 'import numpy as np\n'), ((13181, 13197), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {}), '(a)\n', (13194, 13197), True, 'import numpy as np\n'), ((3415, 3492), 'numpy.ones', 'np.ones', (['(self._n_probs.shape[0], self._n_probs.shape[0] - 1)'], {'dtype': 'np.int64'}), '((self._n_probs.shape[0], self._n_probs.shape[0] - 1), dtype=np.int64)\n', (3422, 3492), True, 'import numpy as np\n'), ((4189, 4205), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (4197, 4205), True, 'import numpy as np\n'), ((5663, 5694), 'numpy.ones', 'np.ones', ([], {'shape': 'self._cols.shape'}), '(shape=self._cols.shape)\n', (5670, 5694), True, 'import numpy as np\n'), ((13143, 13162), 'numpy.eye', 'np.eye', (['cp.shape[0]'], {}), '(cp.shape[0])\n', (13149, 13162), True, 'import numpy as np\n'), ((15024, 15087), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[[self._fork], transformed.columns]'], {}), '([[self._fork], transformed.columns])\n', (15050, 15087), True, 'import pandas as pd\n'), ((3987, 4016), 'numpy.array', 'np.array', (['vector'], {'dtype': 'float'}), '(vector, dtype=float)\n', (3995, 4016), True, 'import numpy as np\n'), ((13099, 13121), 'numpy.expand_dims', 'np.expand_dims', (['cp', '(-1)'], {}), '(cp, -1)\n', (13113, 13121), True, 'import numpy as np\n'), ((8716, 8749), 'numpy.power', 'np.power', (['rand', 'self._col_factors'], {}), '(rand, self._col_factors)\n', (8724, 8749), True, 'import numpy as np\n'), ((9155, 9178), 'numpy.arange', 'np.arange', (['idx.shape[0]'], {}), '(idx.shape[0])\n', (9164, 9178), True, 'import numpy as np\n')] |
from pgfutils import save, setup_figure
setup_figure(
width=0.5,
height=0.4,
preamble_substitute=True,
preamble=r"""
\usepackage{fontspec}
\setmainfont{CothamSans}[Path=${basedir}/../Cotham/,Extension=.otf]""",
)
from matplotlib import pyplot as plt
import numpy as np
t = np.linspace(-4, 4, 201)
plt.plot(t, 2 * np.sin(2 * np.pi * 2.5 * t))
save()
| [
"pgfutils.setup_figure",
"numpy.sin",
"pgfutils.save",
"numpy.linspace"
] | [((42, 238), 'pgfutils.setup_figure', 'setup_figure', ([], {'width': '(0.5)', 'height': '(0.4)', 'preamble_substitute': '(True)', 'preamble': '"""\n \\\\usepackage{fontspec}\n \\\\setmainfont{CothamSans}[Path=${basedir}/../Cotham/,Extension=.otf]"""'}), '(width=0.5, height=0.4, preamble_substitute=True, preamble=\n """\n \\\\usepackage{fontspec}\n \\\\setmainfont{CothamSans}[Path=${basedir}/../Cotham/,Extension=.otf]"""\n )\n', (54, 238), False, 'from pgfutils import save, setup_figure\n'), ((310, 333), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(201)'], {}), '(-4, 4, 201)\n', (321, 333), True, 'import numpy as np\n'), ((380, 386), 'pgfutils.save', 'save', ([], {}), '()\n', (384, 386), False, 'from pgfutils import save, setup_figure\n'), ((350, 377), 'numpy.sin', 'np.sin', (['(2 * np.pi * 2.5 * t)'], {}), '(2 * np.pi * 2.5 * t)\n', (356, 377), True, 'import numpy as np\n')] |
# This is the line list.
import re
import logging
import numpy as np
from . import utilsLists as lists
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Drawn from Splatalogue at http://www.cv.nrao.edu/php/splat/
# Lists that combine multiple transitions for a single line
line_families = {
'co': [
'co10', 'co21', 'co32', 'co43', 'co54', 'co65'],
'13co': [
'13co10', '13co21', '13co32', '13co43', '13co54', '13co65'],
'c18o': [
'c18o10', 'c18o21', 'c18o32', 'c18o43', 'c18o54', 'c18o65'],
'hcn': [
'hcn10', 'hcn21', 'hcn32', 'hcn43', 'hcn54', 'hcn65', 'hcn76'],
'h13cn': [
'h13cn10', 'h13cn21', 'h13cn32', 'h13cn43', 'h13cn54', 'h13cn65',
'h13cn76'],
'hnc': [
'hnc10', 'hnc21', 'hnc32', 'hnc43', 'hnc54', 'hnc65', 'hnc76'],
'hn13c': [
'hn13c10', 'hn13c21', 'hn13c32', 'hn13c43', 'hn13c54', 'hn13c65',
'hn13c76'],
'hcop': [
'hcop10', 'hcop21', 'hcop32', 'hcop43', 'hcop54', 'hcop65',
'hcop76'],
'h13cop': [
'h13cop10', 'h13cop21', 'h13cop32', 'h13cop43', 'h13cop54',
'h13cop65', 'h13cop76'],
'cs': [
'cs10', 'cs21', 'cs32', 'cs43', 'cs54', 'cs65', 'cs76', 'cs87',
'cs98', 'cs109', 'cs1110', 'cs1211', 'cs1312', 'cs1413'],
'13cs': [
'13cs10', '13cs21', '13cs32', '13cs43', '13cs54', '13cs65', '13cs76',
'13cs87', '13cs98', '13cs109', '13cs1110', '13cs1211', '13cs1312',
'13cs1413'],
'sio': [
'sio10', 'sio21', 'sio32', 'sio43', 'sio54', 'sio65', 'sio76', 'sio87',
'sio98', 'sio109', 'sio1110', 'sio1211', 'sio1312', 'sio1413',
'sio1514', 'sio1615'],
'hi': ['hi21cm'],
'ci': ['ci10', 'ci21'],
'nh3': ['nh311', 'nh322', 'nh333', 'nh344'],
'n2hp': ['n2hp10', 'n2hp21', 'n2hp32', 'n2hp43'],
'halpha': [ # only those in ALMA bands (for now)
'h19alpha',
'h21alpha',
'h24alpha', 'h25alpha',
'h26alpha', 'h27alpha', 'h28alpha',
'h29alpha', 'h30alpha',
'h31alpha', 'h32alpha', 'h33alpha',
'h34alpha', 'h35alpha', 'h36alpha',
'h38alpha', 'h39alpha', 'h40alpha', 'h41alpha',
'h42alpha', 'h43alpha', 'h44alpha', 'h45alpha',
'h53alpha', 'h54alpha', 'h55alpha', 'h56alpha', 'h57alpha', 'h58alpha',
]
}
# The line list dictionary
line_list = {
'co65': 691.47308,
'co54': 576.26793,
'co43': 461.04077,
'co32': 345.79599,
'co21': 230.53800,
'co10': 115.27120,
'13co65': 661.06728,
'13co54': 550.92629,
'13co43': 440.76517,
'13co32': 330.58797,
'13co21': 220.39868,
'13co10': 110.20135,
'c18o65': 658.55328,
'c18o54': 548.83101,
'c18o43': 439.08877,
'c18o32': 329.33055,
'c18o21': 219.56035,
'c18o10': 109.78217,
'hcn10': 88.63185, # J=1-0, F=2-1
'hcn21': 177.26111, # J=2-1, F=2-1
'hcn32': 265.88618,
'hcn43': 354.50548,
'hcn54': 443.11616,
'hcn65': 531.71639,
'hcn76': 620.30410,
'h13cn10': 86.33992140,
'h13cn21': 172.67785120,
'h13cn32': 259.01179760,
'h13cn43': 345.33976930,
'h13cn54': 431.65977480,
'h13cn65': 517.96982100,
'h13cn76': 604.26791400,
'cs10': 48.99095,
'cs21': 97.98095,
'cs32': 146.96903,
'cs43': 195.95421,
'cs54': 244.93556,
'cs65': 293.91209,
'cs76': 342.88285,
'cs87': 391.84689,
'cs98': 440.80323,
'cs109': 489.75092,
'cs1110': 538.68900,
'cs1211': 587.61649,
'cs1312': 636.53246,
'cs1413': 685.43592,
'13cs10': 46.24756320,
'13cs21': 92.49430800,
'13cs32': 138.73933500,
'13cs43': 184.98177200,
'13cs54': 231.22068520,
'13cs65': 277.45540500,
'13cs76': 323.68497300,
'13cs87': 369.90855050,
'13cs98': 416.12527510,
'13cs109': 462.33429010,
'13cs1110': 508.53473910,
'13cs1211': 554.72576570,
'13cs1312': 600.90648000,
'13cs1413': 647.07615000,
'hcop10': 89.18852,
'hcop21': 178.37506,
'hcop32': 267.55763,
'hcop43': 356.73422,
'hcop54': 445.90287,
'hcop65': 535.06158,
'hcop76': 624.20836,
'h13cop10': 86.75428840,
'h13cop21': 173.50670030,
'h13cop32': 260.25533900,
'h13cop43': 346.99834400,
'h13cop54': 433.73383270,
'h13cop65': 520.45988430,
'h13cop76': 607.17464560,
'hnc10': 90.66357,
'hnc21': 181.32476,
'hnc32': 271.98114,
'hnc43': 362.63030,
'hnc54': 453.26992,
'hnc65': 543.89755,
'hnc76': 634.51083,
'hn13c10': 87.09085000,
'hn13c21': 174.17940800,
'hn13c32': 261.26331010,
'hn13c43': 348.34026950,
'hn13c54': 435.40796260,
'hn13c65': 522.46407300,
'hn13c76': 609.50628400,
'ci10': 492.16065, # 3P1-3P0
'ci21': 809.34197, # 3P2-3P1
'sio10': 43.42376,
'sio21': 86.84696,
'sio32': 130.26861,
'sio43': 173.68831,
'sio54': 217.10498,
'sio65': 260.51802,
'sio76': 303.92696,
'sio87': 347.33063,
'sio98': 390.72845,
'sio109': 434.11955,
'sio1110': 477.50310,
'sio1211': 520.87820,
'sio1312': 564.24396,
'sio1413': 607.59942,
'sio1514': 650.94359,
'sio1615': 694.27543,
'hi21cm': 1.420405751,
'nh311': 23.6944955,
'nh322': 23.72263333,
'nh333': 23.8701296,
'nh344': 24.1394169,
'n2hp10': 93.1733977,
'n2hp21': 186.3446844,
'n2hp32': 279.5117491,
'n2hp43': 372.6724808,
'h19alpha': 888.047022,
'h21alpha': 662.404162,
'h24alpha': 447.540278,
'h25alpha': 396.900834,
'h26alpha': 353.622747,
'h27alpha': 316.415425,
'h28alpha': 284.250571,
'h29alpha': 256.302035,
'h30alpha': 231.900928,
'h31alpha': 210.501771,
'h32alpha': 191.656728,
'h33alpha': 174.995805,
'h34alpha': 160.211511,
'h35alpha': 147.046878,
'h36alpha': 135.286032,
'h38alpha': 115.274399,
'h39alpha': 106.737357,
'h40alpha': 99.022952,
'h41alpha': 92.034434,
'h42alpha': 85.688390,
'h43alpha': 79.912651,
'h44alpha': 74.644562,
'h45alpha': 69.829551,
'h53alpha': 42.951968,
'h54alpha': 40.630498,
'h55alpha': 38.473358,
'h56alpha': 36.466260,
'h57alpha': 34.596383,
'h58alpha': 32.852196,
}
# Run some consistency checks
def run_checks():
"""
Some internal consistency checks.
"""
all_okay = True
for family in line_families:
this_list = line_families[family]
for this_line in this_list:
if this_line not in line_list.keys():
print(
"Line missing from line list but "
"in line families: " + this_line)
all_okay = False
if all_okay:
print("All lines in line families present in line list.")
no_repeats = True
for this_line in line_list:
for other_line in line_list:
if this_line == other_line:
continue
if line_list[this_line] == line_list[other_line]:
print(
"Duplicate frequencies for: " + this_line +
" and " + other_line + " . Check for typos.")
no_repeats = False
if no_repeats:
print("No repeat frequencies in list.")
# Find line in line list
def get_line_name_and_frequency(line, exit_on_error=True):
"""
Access the line_dictionary and return the name and frequency
matched to some input line name.
"""
matched_line_name = None
matched_line_freq = None
# try to find by input line name
if matched_line_name is None:
if line in line_list:
matched_line_name = line
matched_line_freq = line_list[matched_line_name]
# if not found, try to find by input line name in lower case
if matched_line_name is None:
if line.lower() in line_list:
matched_line_name = line.lower()
matched_line_freq = line_list[matched_line_name]
# if not found, try to find by input line name in lower case
# and removed non-letters
if matched_line_name is None:
line_name_cleaned = re.sub(r'[^0-9a-zA-Z]', r'', line.lower())
if line_name_cleaned in line_list:
matched_line_name = line_name_cleaned
matched_line_freq = line_list[matched_line_name]
# report error
if matched_line_name is None:
if exit_on_error:
logger.error(
'Error! Could not find the input line "' + line +
'" in our line_list module. Candiate line names are: ' +
str(line_list.keys()))
raise Exception(
'Error! Could not find the input line "' + line +
'" in our line_list module.')
else:
logger.warning(
'Could not find the input line "' + line +
'" in our line_list module. ')
# return
return matched_line_name, matched_line_freq
# Find line in line families
def get_line_names_in_line_family(line, exit_on_error=True):
"""
Return the list of line names in a line family.
"""
matched_line_family_name = None
matched_line_names = []
line_name_cleaned = re.sub(r'[^0-9a-zA-Z]', r'', line.lower())
# try
if matched_line_family_name is None:
if line_name_cleaned in line_families:
matched_line_family_name = line_name_cleaned
matched_line_names.extend(line_families[line_name_cleaned])
# report error
if matched_line_family_name is None:
if exit_on_error:
logger.error(
'Error! Could not find the input line family "' + line +
'" in our line_list module. Candiate line families are: ' +
str(line_families.keys()))
raise Exception(
'Error! Could not find the input line family "' + line +
'" in our line_list module.')
else:
logger.warning(
'Could not find the input line family "' + line +
'" in our line_list module. ')
# return
return matched_line_names
def is_line_family(line_tag=''):
line_tag_cleaned = re.sub(r'[^0-9a-zA-Z]', r'', line_tag.lower())
return (line_tag_cleaned in line_families.keys())
def get_ghz_range_for_line(
line=None, restfreq_ghz=None, vsys_kms=None, vwidth_kms=None,
vlow_kms=None, vhigh_kms=None):
"""
Return a low, high frequency range for a line code and either vsys,
vwidth or vlow, vhigh.
"""
# Physical constants
sol_kms = 2.9979246e5
vsys_method = (vsys_kms is not None) and (vwidth_kms is not None)
vlow_method = (vlow_kms is not None) and (vhigh_kms is not None)
if not vsys_method and not vlow_method:
logger.warning(
"Neither vsys+vwidth and vlow+vhigh specified. Returning.")
return None
elif vlow_method:
use_vsys = False
if vsys_method:
logger.warning(
"Both vsys+vwidth and vlow+vhigh specified. "
"Using vlow method.")
else:
use_vsys = True
if restfreq_ghz is None:
if line is None:
logging.error(
"Specify a line name or provide a rest frequency in GHz.")
raise Exception("No rest frequency specified.")
restfreq_ghz = (
get_line_name_and_frequency(line, exit_on_error=True))[1]
if use_vsys:
vlow_kms = vsys_kms-vwidth_kms/2.0
vhigh_kms = vsys_kms+vwidth_kms/2.0
line_edge_ghz = [restfreq_ghz*(1.-(vlow_kms)/sol_kms),
restfreq_ghz*(1.-(vhigh_kms)/sol_kms)]
line_high_ghz = np.max(line_edge_ghz)
line_low_ghz = np.min(line_edge_ghz)
return line_low_ghz, line_high_ghz
def get_ghz_range_for_list(
line_list=[], vsys_kms=None, vwidth_kms=None,
vlow_kms=None, vhigh_kms=None):
"""
Return a low, high frequency range for a list of line or line
family codes and either vsys, vwidth or vlow, vhigh.
"""
if np.isscalar(line_list):
line_list = [line_list]
full_line_list = []
for this_line in line_list:
if is_line_family(this_line):
full_line_list.extend(get_line_names_in_line_family(this_line))
else:
full_line_list.append(this_line)
initial_list = []
for this_line in full_line_list:
this_low, this_high = get_ghz_range_for_line(
line=this_line, vsys_kms=vsys_kms, vwidth_kms=vwidth_kms,
vlow_kms=vlow_kms, vhigh_kms=vhigh_kms)
initial_list.append((this_low, this_high))
final_list = lists.merge_pairs(initial_list)
return final_list
| [
"logging.error",
"numpy.isscalar",
"numpy.max",
"numpy.min",
"logging.getLogger"
] | [((116, 143), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (133, 143), False, 'import logging\n'), ((11640, 11661), 'numpy.max', 'np.max', (['line_edge_ghz'], {}), '(line_edge_ghz)\n', (11646, 11661), True, 'import numpy as np\n'), ((11681, 11702), 'numpy.min', 'np.min', (['line_edge_ghz'], {}), '(line_edge_ghz)\n', (11687, 11702), True, 'import numpy as np\n'), ((12014, 12036), 'numpy.isscalar', 'np.isscalar', (['line_list'], {}), '(line_list)\n', (12025, 12036), True, 'import numpy as np\n'), ((11150, 11222), 'logging.error', 'logging.error', (['"""Specify a line name or provide a rest frequency in GHz."""'], {}), "('Specify a line name or provide a rest frequency in GHz.')\n", (11163, 11222), False, 'import logging\n')] |
"""
Visualizer classes for GOES-R series.
Authors:
<NAME>, <NAME> (2021)
"""
import argparse
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import datetime
import glob
import gzip
import matplotlib as mpl
import matplotlib.pyplot as plt
import metpy
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import os
import xarray
class Visualizer(object):
def __init__(self, image_file, measurement_file, band2extract, scene2extract=None,
vmax=0.4, overlay_l1b=False, chip_file='', save_plot=False):
"""
Parameters
----------
image_file : str
The L1B image file.
measurement_file : str
The measurement file.
band2extract : int
The band to extract.
scene2extract : str
The scene to extract. E.g., 1810-07182020, meaning scene falling during
18:10 on 07/18/2021.
vmax : int
The max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean map.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
"""
self.image_file = image_file
self.measurement_file = measurement_file
self.band2extract = band2extract
self.scene2extract = scene2extract
self.vmax = float(vmax)
self.overlay_l1b = overlay_l1b
self.chip_file = chip_file
self.save_plot = save_plot
self.scene = ''
self.nir_flg = False
if self.measurement_file != '':
# Extract satellite name
self.sat = self.measurement_file.split('/')[-1].split('_')[0]
# Extract the metric type
self.metric = self.measurement_file.split('/')[-1].split('_')[1]
# Find coverage
if 'CONUS' in self.measurement_file:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
else:
self.sat = ''
self.metric = ''
self.coverage = ''
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
def extract_geoloc(self):
""" Extract the geolocation information for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
date = datetime.datetime.strptime(self.scene2extract.split('-')[1], '%m%d%Y')
time = datetime.datetime.strptime(self.scene2extract.split('-')[0], '%H%M')
date_time = datetime.datetime.strptime(self.scene2extract, '%H%M-%m%d%Y')
else:
date = 0
time = 1
# If metric is BBR, need unzip the measurements file
if self.metric == 'BBR':
with gzip.open(self.measurement_file) as f:
measure_df = pd.read_csv(self.measurement_file)
else:
measure_df = pd.read_csv(self.measurement_file)
# Create a datetime column.
activity_date = np.array(measure_df['ACTIVITY_DATE1'])
activity_time = np.array(measure_df['ACTIVITY_TIME_1'])
measure_df['DATETIME'] = [datetime.datetime.strptime(activity_date[j]+'_'+activity_time[j],
'%m-%d-%Y_%H:%M:%S') for j in range(len(activity_time))]
# Round the user-inputted time to nearest scene (date/time) in measurement file
if self.scene2extract != None:
t = pd.DataFrame(measure_df, columns = ['DATETIME'])
t_df = pd.DataFrame.drop_duplicates(t)
t_df = t_df.reset_index()
df_sort = t_df.iloc[(t_df['DATETIME']-date_time).abs().argsort()[:1]]
self.scene = df_sort['DATETIME'].iloc[0].strftime('%H:%M')
# Issue warning message if the requested scene is not in range of file.
# (in that case, extract either first or last scene)
if not(date_time >= measure_df['DATETIME'].iloc[0] and date_time <= measure_df['DATETIME'].iloc[-1]):
print("--WARNING: Requested scene ({}) falls outside measurement file. Using closest scene ({}) instead.--"\
.format(self.scene2extract, df_sort['DATETIME'].iloc[0].strftime('%H%M-%m%d%Y')))
# Set "not in range" flag
self.nir_flg = True
else:
print("--Plotting closest scene in file ({})--"\
.format(df_sort['DATETIME'].iloc[0].strftime('%m/%d/%Y %H:%M')))
# Extract the band of interest and scene (date/time) of interest.
measure_df = measure_df[measure_df['BAND_NUM'] == self.band2extract]\
[measure_df['DATETIME'] == df_sort['DATETIME'].iloc[0]]
else:
self.scene = 'All'
# Extract the band of interest.
measure_df = measure_df[measure_df['BAND_NUM'] == self.band2extract]
print("Scene: ", self.scene)
# Read the Chip DB file, depending on the metric
exe_path = os.path.dirname(os.path.realpath(__file__))
if self.metric == 'NAV':
chipdb_df = pd.read_csv(os.path.join(exe_path, 'data', 'other_chipdb.csv'))
# Remove all columns from chip db except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chipdb_new = chipdb_df[['LANDMARK_S24', 'NEWLAT_R', 'NEWLON_R']].copy()
# Rename columns
chipdb_new = chipdb_new.rename(columns={"LANDMARK_S24":"chip", "NEWLAT_R":"lat", "NEWLON_R":"lon"})
else:
chipdb_df = pd.read_csv(os.path.join(exe_path, 'data', 'nav_chipdb.csv'))
# Remove all columns from chip db except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chipdb_new = chipdb_df[['name_S24', 'lat_R', 'lon_R']].copy()
# Rename columns
chipdb_new = chipdb_new.rename(columns={"name_S24":"chip", "lat_R":"lat", "lon_R":"lon"})
# Remove all duplicate rows from Chip DB.
chipdb_new = chipdb_new.drop_duplicates()
chipdb_new = chipdb_new.reset_index()
# Pull out columns to speed up search in for loop
origlat_r = chipdb_new["lat"]
origlon_r = chipdb_new["lon"]
landmark_s24 = np.array(chipdb_new["chip"])
chip_name = np.array(measure_df['CHIP_NAME'])
# Match chip names from the Chip DB file to those in measurements file in order to match rows in the
# measurements file to latitudes and longitudes.
lat_arr = []
lon_arr = []
# Extract chip names, if specified
if self.chip_file != '':
chip_list = self.extract_chips()
print("--Only user-specified chips will be plotted: {}--".format(chip_list))
else:
chip_list = chip_name
# Match chip name from measurements file to chip in Chip DB file in order to
# extract the corresponding lat/lon.
# If user specifies a chip list, retain only those chips.
for i in range(len(measure_df)):
if (chip_name[i] in landmark_s24) and (chip_name[i] in chip_list):
lat = np.array(origlat_r[chipdb_new["chip"] == chip_name[i]])
lon = np.array(origlon_r[chipdb_new["chip"] == chip_name[i]])
if len(lat) > 0:
lat_arr.append(lat[0])
lon_arr.append(lon[0])
else:
lat_arr.append(0)
lon_arr.append(0)
else:
lat_arr.append(0)
lon_arr.append(0)
# Append lat and lon arrays to measurement dataframe
measure_df['Lat'] = lat_arr
measure_df['Lon'] = lon_arr
measure_df = measure_df[(measure_df["Lat"] != 0)]
print("Number of vectors: ", len(measure_df["Lat"]))
return measure_df
def extract_chips(self):
"""
"""
chip_list = []
with open(self.chip_file) as f:
for line in f:
chip_list.append(line.strip('\n'))
return chip_list
def visualize(self):
""" Visualize the offsets as vector field on either L1B map or generic
world map.
"""
# Remove path to get just filename for parsing purposes
image_file = self.image_file.split('/')[-1]
# Extract mode
mode = image_file.split('_')[1].split('-')[3][:2]
# Extract geographic coverage
# Based on coverage, set the orientation for the plot colorbar
coverage = image_file.split('-')[2].strip('Rad')
if coverage == 'C':
coverage = 'CONUS'
orientation = 'horizontal'
elif coverage == 'F':
coverage = 'FULL'
orientation = 'vertical'
else:
## Say all others should be treated as "FULL" would, for now
coverage = 'FULL'
orientation = 'vertical'
# Extract satellite from image
sat = image_file.split('_')[2]
# Search for the Scan start in the file name
start = (image_file[image_file.find("s")+1:image_file.find("_e")])
start_formatted = start[0:4] + " Day " + start[4:7] + " - " + start[7:9] + ":" + \
start[9:11] + ":" + start[11:13] + "." + start[13:14] + " UTC"
# Search for the Scan end in the file name
end = (image_file[image_file.find("e")+1:image_file.find("_c")])
end_formatted = end[0:4] + " Day " + end[4:7] + " - " + end[7:9] + ":" + end[9:11] + \
":" + end[11:13] + "." + end[13:14] + " UTC"
# Open the file using the NetCDF4 library
nc = Dataset(self.image_file)
# Determine the lon_0
geo_extent = nc.variables['geospatial_lat_lon_extent']
lon_0 = geo_extent.geospatial_lon_center
lat_0 = 0
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band: ", self.band)
print("Measurement file coverage: ", self.coverage)
print("Image satellite: ", sat)
print("Image coverage: ", coverage)
print("Image start: ", start)
print("Image end: ", end)
# Import the measurements dataframe
if self.measurement_file != '':
measure_df = self.extract_geoloc()
else:
print("No measurement file supplied.")
# Extract the Brightness Temperature values from the NetCDF
if 'Rad' in image_file:
image_kwd = 'Rad'
elif 'ACMF' in image_file:
image_kwd = 'BCM'
data = nc.variables[image_kwd][:]
geos = ccrs.Geostationary(central_longitude=lon_0, satellite_height=35786023.0, sweep_axis='x')
# Start figure
fig=plt.figure(figsize=(12, 8))
ax=fig.add_axes([0.1,0.1,0.8,0.8], projection=geos)
open_image = xarray.open_dataset(self.image_file)
image_data = open_image.metpy.parse_cf(image_kwd)
image_x = image_data.x
image_y = image_data.y
# Set the axis bounds.
if coverage == 'CONUS':
ax.set_extent([image_x.min(), image_x.max(), image_y.min(), image_y.max()], crs=geos)
info_text='cyan'
elif coverage == 'FULL':
ax.set_global()
info_text='k'
# Overlay the L1B data
if self.overlay_l1b:
# De-normalize the vmax from range [0,1] to natural range
min_range = float(nc.variables[image_kwd].valid_range[0])
max_range = float(nc.variables[image_kwd].valid_range[1])
vmax = self.vmax*(max_range - min_range)
if coverage == 'CONUS':
vmax = vmax/3.5
# Plot L1B data
# Note: Increasing vmax lowers contrast. Vmax=small->black; Vmax=large->white
ax.imshow(open_image[image_kwd][:], origin='upper', cmap='gray', transform=geos, vmax=vmax,
extent=(image_x.min(), image_x.max(), image_y.min(), image_y.max()))
# Draw coatlines, country borders, lakes, and grid
# See https://scitools.org.uk/cartopy/docs/v0.14/matplotlib/feature_interface.html
ax.coastlines(linewidth=0.9, linestyle='solid', color='green')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.gridlines(linewidth=0.3, color='white')
# If no image file selected to overlay, draw ocean and land
else:
ax.stock_img()
# Draw the coastlines, countries, parallels and meridians
ax.coastlines(linewidth=0.9, linestyle='solid', color='black')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='black')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='skyblue', edgecolor='black')
ax.add_feature(cfeature.RIVERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='skyblue')
ax.gridlines(linewidth=0.3, color='white')
# Add a title to the plot
plt.title(self.sat + " ABI L1B Band " + self.band + " Scene " + \
self.scene + " Metric " + self.metric + "\n" + coverage + \
" Scan from " + start_formatted + " to " + end_formatted)
# Read some variables from the NetCDF header in order to use it in the plot
center = str(geo_extent.geospatial_lon_center)
west = str(geo_extent.geospatial_westbound_longitude)
east = str(geo_extent.geospatial_eastbound_longitude)
north = str(geo_extent.geospatial_northbound_latitude)
south = str(geo_extent.geospatial_southbound_latitude)
# Close netCDF file when finished
nc.close()
nc = None
# Put the information retrieved from the header in the final image
plt.text(0.01, 0.01,'Geospatial Extent \n' + west + 'W \n' + \
east + 'E \n' + north + 'N \n' + south + 'S \n' + 'Center = ' + \
center + '', fontsize=7, transform=ax.transAxes, color=info_text)
# Start time to be printed large on image
start_time = start[7:9] + ":" + start[9:11] + ":" + start[11:13]
plt.text(0.78, 0.88, start_time, fontsize=24, transform=ax.transAxes, color='red')
if self.nir_flg:
plt.text(0.01, 0.94,"WARNING: Selected scene \n{} \nnot in measurement file"\
.format(self.scene2extract), color='red', fontsize=8, transform=ax.transAxes)
if self.measurement_file != '':
# Project the coordinates from measurements dataframe
x = np.array(measure_df['Lon'])
y = np.array(measure_df['Lat'])
# Generate the vectors
delta_ew = np.array(measure_df['DELTA_EW'])
delta_ns = np.array(measure_df['DELTA_NS'])
# Calculate magnitudes so can colorize
mag = (delta_ew**2 + delta_ns**2)**(0.5)
# Normalize the arrows
delta_ew_norm = delta_ew/np.sqrt(delta_ew**2 + delta_ns**2)
delta_ns_norm = delta_ns/np.sqrt(delta_ew**2 + delta_ns**2)
# Draw the vectors
ax.quiver(x, y, delta_ew_norm, delta_ns_norm, mag, width=0.003,
cmap='jet', transform=ccrs.PlateCarree())
# Insert the colorbar
# Source: https://www.geeksforgeeks.org/matplotlib-pyplot-colorbar-function-in-python/
norm = mpl.colors.Normalize(vmin=min(mag), vmax=max(mag))
cmap = plt.get_cmap('jet')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
plt.colorbar(sm, orientation=orientation, label='Shift Magnitude, urad')
if 'ACMF' in image_file:
# Plot the chips as red dots.
exe_path = os.path.dirname(os.path.realpath(__file__))
chipdb_df = pd.read_csv(os.path.join(exe_path, 'data', 'nav_chipdb.csv'))
# Remove all columns from MutliSpecDB except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chipdb_new = chipdb_df[['LANDMARK_S24', 'NEWLAT_R', 'NEWLON_R']].copy()
# Rename columns
chipdb_new = chipdb_new.rename(columns={"LANDMARK_S24":"chip", "NEWLAT_R":"lat", "NEWLON_R":"lon"})
chipdb_new = chipdb_new.drop_duplicates()
chipdb_new = chipdb_new.reset_index()
plt.plot(chipdb_new["lon"], chipdb_new["lat"], color='red', marker='o',
linestyle='None', markersize=1.5, transform=ccrs.PlateCarree())
# Show or save the plot
if save_plot:
plt.savefig('vplot.png', bbox_inches='tight')
else:
plt.show()
plt.close()
class MVisualizer(Visualizer):
def __init__(self, image_file, band2extract, scene2extract,
vmax, overlay_l1b, chip_file, save_plot, measurement_files, dataspec):
"""
Parameters
----------
image_file : str
The L1B image file.
band2extract : int
The band to extract.
vmax : int
The max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean map.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
measurement_files : str
File containing list (one per line) of measurement file names.
dataspec : str
The range of dates in which to search for measurement files.
"""
measurement_file = None
super().__init__(image_file, measurement_file, band2extract, scene2extract,
vmax, overlay_l1b, chip_file, save_plot)
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
if measurement_files != None:
self.measurement_files = self.extract_from_file(measurement_files)
# Sort so that files are in order of datetime (unless files are in different locations...)
self.measurement_files = sorted(self.measurement_files)
print("Measurement files: ", self.measurement_files)
# Use the first file to determine the satellite and metric and start date
# Use the last file to determien end date
self.sat = self.measurement_files[0].split('/')[-1].split('_')[0]
self.metric = self.measurement_files[0].split('/')[-1].split('_')[1]
self.start_range = datetime.datetime.strptime(self.measurement_files[0]\
.split('/')[-1].split('_')[4].split('.')[0] \
+ '-' + self.measurement_files[0].split('/')[-1].split('_')[3], '%j-%Y')
self.end_range = datetime.datetime.strptime(self.measurement_files[-1]\
.split('/')[-1].split('_')[4].split('.')[0] \
+ '-' + self.measurement_files[-1].split('/')[-1].split('_')[3], '%j-%Y')
if 'CONUS' in self.measurement_files[0]:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band:", self.band)
print("Measurement file coverage: ", self.coverage)
print("Measurement file start date: ", self.start_range)
print("Measurement file end date: ", self.end_range)
elif dataspec != None:
print("dataspec: ", dataspec)
try:
self.sat = dataspec.split(' ')[0].upper()
self.metric = dataspec.split(' ')[1].upper()
self.coverage = dataspec.split(' ')[2].upper()
self.start_range = datetime.datetime.strptime(dataspec.split(' ')[3], '%m%d%Y')
self.end_range = datetime.datetime.strptime(dataspec.split(' ')[4], '%m%d%Y')
self.measurement_files = self.searchforfiles()
print("Measurement files: ", self.measurement_files)
if self.measurement_files == []:
print("Error! No measurement files found.")
else:
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band:", self.band)
print("Measurement file coverage: ", self.coverage)
print("Measurement file start date: ", self.start_range)
print("Measurement file end date: ", self.end_range)
except:
print("Error! Data specification needs to be in format 'AAA BBB CCC MMDDYYYY MMDDYYYY', where AAA can be G16 or G17; BBB can be FFR, NAV, BBR or WIFR; and CCC can be FUL or CON")
else:
print("Error! Please provide either file listing measurement files (--m) or a data specification (satellite, metric, coverage, and date range) to search for measurement files (--d).")
def extract_geoloc(self, measurement_file):
""" Extract the geolocation information for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
print("User-requested starting scene: ", self.scene2extract.split(' ')[0])
print("User-requested ending scene: ", self.scene2extract.split(' ')[-1])
start_time = datetime.datetime.strptime(self.scene2extract.split(' ')[0], '%H%M')
end_time = datetime.datetime.strptime(self.scene2extract.split(' ')[-1], '%H%M')
# Check if file nseeds to be unzipped
if 'gz' in measurement_file:
with gzip.open(measurement_file) as f:
measure_df = pd.read_csv(measurement_file)
else:
measure_df = pd.read_csv(measurement_file)
# Create a datetime column.
activity_date = np.array(measure_df['ACTIVITY_DATE1'])
activity_time = np.array(measure_df['ACTIVITY_TIME_1'])
measure_df['DATETIME'] = [datetime.datetime.strptime(activity_time[j], '%H:%M:%S') for j in range(len(activity_time))]
# Round the user-inputted time to nearest scene (date/time) in measurement file
if self.scene2extract != None and start_time != end_time:
t_df = pd.DataFrame(measure_df, columns = ['ACTIVITY_TIME_1'])
t_df['DATETIME'] = [datetime.datetime.strptime(i, '%H:%M:%S') for i in t_df['ACTIVITY_TIME_1']]
time_sorted = t_df.sort_values(by='DATETIME')
# Find the start and ending date and then form a datetime in order to get the range the user wants
df_sort_start = t_df.iloc[(t_df['DATETIME']-start_time).abs().argsort()[:1]]
df_sort_end = t_df.iloc[(t_df['DATETIME']-end_time).abs().argsort()[:1]]
self.scene = df_sort_start['ACTIVITY_TIME_1'].iloc[0] + ' to ' + df_sort_end['ACTIVITY_TIME_1'].iloc[0]
# Extract the band of interest and scene (date/time) of interest.
print("--WARNING using closest found scenes as the bounds {}.".format(self.scene))
measure_df = measure_df[measure_df['BAND_NUM'] == self.band2extract]\
[(measure_df['DATETIME'] >= df_sort_start['DATETIME'].iloc[0]) & (measure_df['DATETIME'] <= df_sort_end['DATETIME'].iloc[0])]
elif self.scene2extract != None and start_time == end_time:
t = pd.DataFrame(measure_df, columns = ['DATETIME'])
t_df = pd.DataFrame.drop_duplicates(t)
t_df = t_df.reset_index()
df_sort = t_df.iloc[(t_df['DATETIME']-start_time).abs().argsort()[:1]]
self.scene = df_sort['DATETIME'].iloc[0].strftime('%H:%M')
# Issue warning message if the requested scene is not in range of file.
# (in that case, extract either first or last scene)
if not(start_time >= measure_df['DATETIME'].iloc[0] and start_time <= measure_df['DATETIME'].iloc[-1]):
print("--WARNING: Requested scene ({}) falls outside measurement file. Using closest scene ({}) instead.--"\
.format(self.scene2extract, df_sort['DATETIME'].iloc[0].strftime('%H%M-%m%d%Y')))
# Set "not in range" flag
self.nir_flg = True
else:
print("--Plotting closest scene in file ({})--".format(df_sort['DATETIME'].iloc[0].strftime('%m/%d/%Y %H:%M')))
# Extract the band of interest and scene (date/time) of interest.
measure_df = measure_df[measure_df['BAND_NUM'] == self.band2extract]\
[measure_df['DATETIME'] == df_sort['DATETIME'].iloc[0]]
else:
self.scene = 'All'
# Extract the band of interest.
measure_df = measure_df[measure_df['BAND_NUM'] == self.band2extract]
print("Scene: ", self.scene)
#print("measure_df: ", measure_df)
# Read the Chip DB file, depending on the metric
exe_path = os.path.dirname(os.path.realpath(__file__))
if self.metric == 'NAV':
chipdb_df = pd.read_csv(os.path.join(exe_path, 'MultiSpecDB_Jan20_2018_v1_2018_Feb13_150634.csv'))
# Remove all columns from MutliSpecDB except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chipdb_new = chipdb_df[['LANDMARK_S24', 'NEWLAT_R', 'NEWLON_R']].copy()
# Rename columns
chipdb_new = chipdb_new.rename(columns={"LANDMARK_S24":"chip", "NEWLAT_R":"lat", "NEWLON_R":"lon"})
else:
chipdb_df = pd.read_csv(os.path.join(exe_path, 'EvalLoc_2018_Feb26.csv'))
# Remove all columns from MutliSpecDB except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chipdb_new = chipdb_df[['name_S24', 'lat_R', 'lon_R']].copy()
# Rename columns
chipdb_new = chipdb_new.rename(columns={"name_S24":"chip", "lat_R":"lat", "lon_R":"lon"})
# Remove all duplicate rows from Chip DB.
chipdb_new = chipdb_new.drop_duplicates()
chipdb_new = chipdb_new.reset_index()
# Pull out columns to speed up search in for loop
origlat_r = chipdb_new["lat"]
origlon_r = chipdb_new["lon"]
landmark_s24 = np.array(chipdb_new["chip"])
chip_name = np.array(measure_df['CHIP_NAME'])
# Match chip names from the Chip DB file to those in measurements file in order to match rows in the
# measurements file to latitudes and longitudes.
lat_arr = []
lon_arr = []
# Extract chip names, if specified
if self.chip_file != '':
chip_list = self.extract_from_file(self.chip_file)
print("--Only user-specified chips will be plotted: {}--".format(chip_list))
else:
chip_list = chip_name
# Match chip name from measurements file to chip in Chip DB file in order to
# extract the corresponding lat/lon.
# If user specifies a chip list, retain only those chips.
for i in range(len(measure_df)):
if (chip_name[i] in landmark_s24) and (chip_name[i] in chip_list):
lat = np.array(origlat_r[chipdb_new["chip"] == chip_name[i]])
lon = np.array(origlon_r[chipdb_new["chip"] == chip_name[i]])
if len(lat) > 0:
lat_arr.append(lat[0])
lon_arr.append(lon[0])
else:
lat_arr.append(0)
lon_arr.append(0)
else:
lat_arr.append(0)
lon_arr.append(0)
# Append lat and lon arrays to measurement dataframe
measure_df['Lat'] = lat_arr
measure_df['Lon'] = lon_arr
measure_df = measure_df[(measure_df["Lat"] != 0)]
return measure_df
def extract_from_file(self, filename):
"""
"""
item_list = []
with open(filename) as f:
for line in f:
item_list.append(line.strip('\n'))
return item_list
def searchforfiles(self):
""" Creates list of measurement files in the given range for satellite and metric.
"""
measurement_files = []
# Calculate how many days are between first and last.
ndates = (self.end_range - self.start_range).days
# Loop over number of days and add day each iteration to start date and then form the filename to glob for it
for d in range(ndates):
# Add 'd' days to the start_date
newdate = self.start_range + datetime.timedelta(d)
# Convert the new date to year and day of year
year = newdate.year
startofyear = datetime.datetime(year=year, month=1, day=1)
days_since_startofyear = (newdate - startofyear).days
# Use year and day of year to construct a string to search for measurement file
if 'F' in self.coverage:
search_path = ''
elif 'C' in self.coverage:
search_path = ''
search_string = '{}_{}_measurements_{}_{}.*.csv*'.format(self.sat,
self.metric, year, days_since_startofyear)
# Append the found file to list of files
measurement_files += glob.glob(os.path.join(search_path, search_string))
return measurement_files
def visualize(self):
""" Visualize the offsets as vector field on either L1B map or generic
world map.
"""
measure_df = self.build_measurement_df()
# Remove path to get just filename for parsing purposes
image_file = self.image_file.split('/')[-1]
# Extract mode
mode = image_file.split('_')[1].split('-')[3][:2]
# Extract geographic coverage from image
# Based on coverage, set the orientation for the plot colorbar
coverage = image_file.split('-')[2].strip('Rad')
if coverage == 'C':
coverage = 'CONUS'
orientation = 'horizontal'
elif coverage == 'F':
coverage = 'FULL'
orientation = 'vertical'
# Extract satellite from image
sat = image_file.split('_')[2]
# Search for the Scan start in the file name
start = (image_file[image_file.find("s")+1:image_file.find("_e")])
start_formatted = start[0:4] + " Day " + start[4:7] + " - " + start[7:9] + ":" + \
start[9:11] + ":" + start[11:13] + "." + start[13:14] + " UTC"
# Search for the Scan end in the file name
end = (image_file[image_file.find("e")+1:image_file.find("_c")])
end_formatted = end[0:4] + " Day " + end[4:7] + " - " + end[7:9] + ":" + end[9:11] + \
":" + end[11:13] + "." + end[13:14] + " UTC"
# Open the file using the NetCDF4 library
nc = Dataset(self.image_file)
# Determine the lon_0
geo_extent = nc.variables['geospatial_lat_lon_extent']
lon_0 = geo_extent.geospatial_lon_center
lat_0 = 0
print("Image satellite: ", sat)
print("Image coverage: ", coverage)
print("Image start: ", start)
print("Image end: ", end)
# Extract the Brightness Temperature values from the NetCDF
data = nc.variables['Rad'][:]
geos = ccrs.Geostationary(central_longitude=lon_0, satellite_height=35786023.0, sweep_axis='x')
# Start figure
fig=plt.figure(figsize=(12, 8))
ax=fig.add_axes([0.1,0.1,0.8,0.8], projection=geos)
open_image = xarray.open_dataset(self.image_file)
image_data = open_image.metpy.parse_cf('Rad')
image_x = image_data.x
image_y = image_data.y
# Set the axis bounds.
if coverage == 'FULL':
ax.set_global()
info_text='k'
elif coverage == 'CONUS':
ax.set_extent([image_x.min(), image_x.max(), image_y.min(), image_y.max()], crs=geos)
info_text='cyan'
# Overlay the L1B data
if self.overlay_l1b:
# De-normalize the vmax from range [0,1] to natural range
min_range = float(nc.variables['Rad'].valid_range[0])
max_range = float(nc.variables['Rad'].valid_range[1])
vmax = self.vmax*(max_range - min_range)
if coverage == 'CONUS':
vmax = vmax/3.5
# Plot L1B data
# Note: Increasing vmax lowers contrast. Vmax=small->black; Vmax=large->white
ax.imshow(open_image['Rad'][:], origin='upper', cmap='gray', transform=geos, vmax=vmax,
extent=(image_x.min(), image_x.max(), image_y.min(), image_y.max()))
# Draw coatlines, country borders, lakes, and grid
# See https://scitools.org.uk/cartopy/docs/v0.14/matplotlib/feature_interface.html
ax.coastlines(linewidth=0.9, linestyle='solid', color='green')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.gridlines(linewidth=0.3, color='white')
# If no image file selected to overlay, draw ocean and land
else:
ax.stock_img()
# Draw the coastlines, countries, parallels and meridians
#bmap.drawparallels(np.arange(-90.0, 90.0, 10.0), linewidth=0.3, color='white')
#bmap.drawmeridians(np.arange(0.0, 360.0, 10.0), linewidth=0.3, color='white')
ax.coastlines(linewidth=0.9, linestyle='solid', color='black')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='black')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='skyblue', edgecolor='black')
ax.add_feature(cfeature.RIVERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='skyblue')
ax.gridlines(linewidth=0.3, color='white')
# Add a title to the plot
plt.title(self.sat + " ABI L1B Band " + self.band + " Metric " + self.metric + " from " + \
self.start_range.strftime('%Y Day %j') + " to " + self.end_range.strftime('%Y Day %j') + \
"\n" + coverage + " Scan from " + start_formatted + " to " + end_formatted)
# Read some variables from the NetCDF header in order to use it in the plot
center = str(geo_extent.geospatial_lon_center)
west = str(geo_extent.geospatial_westbound_longitude)
east = str(geo_extent.geospatial_eastbound_longitude)
north = str(geo_extent.geospatial_northbound_latitude)
south = str(geo_extent.geospatial_southbound_latitude)
# Close netCDF file when finished
nc.close()
nc = None
# Put the information retrieved from the header in the final image
plt.text(0.01, 0.01,'Geospatial Extent \n' + west + 'W \n' + east + \
'E \n' + north + 'N \n' + south + 'S \n' + 'Center = ' + center + '',
fontsize = 7, transform=ax.transAxes, color=info_text)
if self.nir_flg:
plt.text(0.01, 0.94,"WARNING: Selected scene \n{} \nnot in measurement file"\
.format(self.scene2extract), color='red', fontsize=8, transform=ax.transAxes)
# Project the coordinates from measurements dataframe
x = np.array(measure_df['Lon'])
y = np.array(measure_df['Lat'])
# Generate the vectors
delta_ew = np.array(measure_df['AVG_EW'])
delta_ns = np.array(measure_df['AVG_NS'])
# Calculate magnitudes so can colorize
mag = (delta_ew**2 + delta_ns**2)**(0.5)
# Normalize the arrows
delta_ew_norm = delta_ew/np.sqrt(delta_ew**2 + delta_ns**2)
delta_ns_norm = delta_ns/np.sqrt(delta_ew**2 + delta_ns**2)
# Draw the vectors
ax.quiver(x, y, delta_ew_norm, delta_ns_norm, mag, width=0.003,
cmap='jet', transform=ccrs.PlateCarree())
# Insert the colorbar
# Source: https://www.geeksforgeeks.org/matplotlib-pyplot-colorbar-function-in-python/
norm = mpl.colors.Normalize(vmin=min(mag), vmax=max(mag))
cmap = plt.get_cmap('jet')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
plt.colorbar(sm, orientation=orientation, label='Mean Shift Magnitude, urad')
# Show or save the plot
if save_plot:
plt.savefig('vplot.png', bbox_inches='tight')
else:
plt.show()
def build_measurement_df(self):
"""
"""
measure_df = pd.DataFrame([])
for measurement_file in self.measurement_files:
# Import the measurements dataframe
df = self.extract_geoloc(measurement_file)
# Append dataframes.
measure_df = measure_df.append(df, ignore_index = True)
# Calculate the mean EW and NS for each chip
measure_df['AVG_EW'] = measure_df.groupby(['CHIP_NAME','BAND_NUM'])['DELTA_EW'].transform('mean')
measure_df['AVG_NS'] = measure_df.groupby(['CHIP_NAME','BAND_NUM'])['DELTA_NS'].transform('mean')
# Remove the duplicates within each "band_num">"chip_name" subgroups
measure_df = measure_df.drop_duplicates(['CHIP_NAME', 'BAND_NUM'])
return measure_df
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"os.path.join",
"netCDF4.Dataset",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.pyplot.colorbar",
"datetime.timedelta",
"cartopy.crs.Geostationary",
"matplotlib.pyplot... | [((3341, 3379), 'numpy.array', 'np.array', (["measure_df['ACTIVITY_DATE1']"], {}), "(measure_df['ACTIVITY_DATE1'])\n", (3349, 3379), True, 'import numpy as np\n'), ((3404, 3443), 'numpy.array', 'np.array', (["measure_df['ACTIVITY_TIME_1']"], {}), "(measure_df['ACTIVITY_TIME_1'])\n", (3412, 3443), True, 'import numpy as np\n'), ((6491, 6519), 'numpy.array', 'np.array', (["chipdb_new['chip']"], {}), "(chipdb_new['chip'])\n", (6499, 6519), True, 'import numpy as np\n'), ((6540, 6573), 'numpy.array', 'np.array', (["measure_df['CHIP_NAME']"], {}), "(measure_df['CHIP_NAME'])\n", (6548, 6573), True, 'import numpy as np\n'), ((9895, 9919), 'netCDF4.Dataset', 'Dataset', (['self.image_file'], {}), '(self.image_file)\n', (9902, 9919), False, 'from netCDF4 import Dataset\n'), ((10914, 11006), 'cartopy.crs.Geostationary', 'ccrs.Geostationary', ([], {'central_longitude': 'lon_0', 'satellite_height': '(35786023.0)', 'sweep_axis': '"""x"""'}), "(central_longitude=lon_0, satellite_height=35786023.0,\n sweep_axis='x')\n", (10932, 11006), True, 'import cartopy.crs as ccrs\n'), ((11039, 11066), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (11049, 11066), True, 'import matplotlib.pyplot as plt\n'), ((11149, 11185), 'xarray.open_dataset', 'xarray.open_dataset', (['self.image_file'], {}), '(self.image_file)\n', (11168, 11185), False, 'import xarray\n'), ((13593, 13780), 'matplotlib.pyplot.title', 'plt.title', (["(self.sat + ' ABI L1B Band ' + self.band + ' Scene ' + self.scene +\n ' Metric ' + self.metric + '\\n' + coverage + ' Scan from ' +\n start_formatted + ' to ' + end_formatted)"], {}), "(self.sat + ' ABI L1B Band ' + self.band + ' Scene ' + self.scene +\n ' Metric ' + self.metric + '\\n' + coverage + ' Scan from ' +\n start_formatted + ' to ' + end_formatted)\n", (13602, 13780), True, 'import matplotlib.pyplot as plt\n'), ((14357, 14556), 'matplotlib.pyplot.text', 'plt.text', (['(0.01)', '(0.01)', "('Geospatial Extent \\n' + west + 'W \\n' + east + 'E \\n' + north + 'N \\n' +\n south + 'S \\n' + 'Center = ' + center + '')"], {'fontsize': '(7)', 'transform': 'ax.transAxes', 'color': 'info_text'}), "(0.01, 0.01, 'Geospatial Extent \\n' + west + 'W \\n' + east + 'E \\n' +\n north + 'N \\n' + south + 'S \\n' + 'Center = ' + center + '', fontsize=7,\n transform=ax.transAxes, color=info_text)\n", (14365, 14556), True, 'import matplotlib.pyplot as plt\n'), ((14708, 14795), 'matplotlib.pyplot.text', 'plt.text', (['(0.78)', '(0.88)', 'start_time'], {'fontsize': '(24)', 'transform': 'ax.transAxes', 'color': '"""red"""'}), "(0.78, 0.88, start_time, fontsize=24, transform=ax.transAxes, color\n ='red')\n", (14716, 14795), True, 'import matplotlib.pyplot as plt\n'), ((17191, 17202), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17200, 17202), True, 'import matplotlib.pyplot as plt\n'), ((22700, 22738), 'numpy.array', 'np.array', (["measure_df['ACTIVITY_DATE1']"], {}), "(measure_df['ACTIVITY_DATE1'])\n", (22708, 22738), True, 'import numpy as np\n'), ((22763, 22802), 'numpy.array', 'np.array', (["measure_df['ACTIVITY_TIME_1']"], {}), "(measure_df['ACTIVITY_TIME_1'])\n", (22771, 22802), True, 'import numpy as np\n'), ((27002, 27030), 'numpy.array', 'np.array', (["chipdb_new['chip']"], {}), "(chipdb_new['chip'])\n", (27010, 27030), True, 'import numpy as np\n'), ((27051, 27084), 'numpy.array', 'np.array', (["measure_df['CHIP_NAME']"], {}), "(measure_df['CHIP_NAME'])\n", (27059, 27084), True, 'import numpy as np\n'), ((31598, 31622), 'netCDF4.Dataset', 'Dataset', (['self.image_file'], {}), '(self.image_file)\n', (31605, 31622), False, 'from netCDF4 import Dataset\n'), ((32064, 32156), 'cartopy.crs.Geostationary', 'ccrs.Geostationary', ([], {'central_longitude': 'lon_0', 'satellite_height': '(35786023.0)', 'sweep_axis': '"""x"""'}), "(central_longitude=lon_0, satellite_height=35786023.0,\n sweep_axis='x')\n", (32082, 32156), True, 'import cartopy.crs as ccrs\n'), ((32189, 32216), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (32199, 32216), True, 'import matplotlib.pyplot as plt\n'), ((32299, 32335), 'xarray.open_dataset', 'xarray.open_dataset', (['self.image_file'], {}), '(self.image_file)\n', (32318, 32335), False, 'import xarray\n'), ((35750, 35949), 'matplotlib.pyplot.text', 'plt.text', (['(0.01)', '(0.01)', "('Geospatial Extent \\n' + west + 'W \\n' + east + 'E \\n' + north + 'N \\n' +\n south + 'S \\n' + 'Center = ' + center + '')"], {'fontsize': '(7)', 'transform': 'ax.transAxes', 'color': 'info_text'}), "(0.01, 0.01, 'Geospatial Extent \\n' + west + 'W \\n' + east + 'E \\n' +\n north + 'N \\n' + south + 'S \\n' + 'Center = ' + center + '', fontsize=7,\n transform=ax.transAxes, color=info_text)\n", (35758, 35949), True, 'import matplotlib.pyplot as plt\n'), ((36255, 36282), 'numpy.array', 'np.array', (["measure_df['Lon']"], {}), "(measure_df['Lon'])\n", (36263, 36282), True, 'import numpy as np\n'), ((36295, 36322), 'numpy.array', 'np.array', (["measure_df['Lat']"], {}), "(measure_df['Lat'])\n", (36303, 36322), True, 'import numpy as np\n'), ((36374, 36404), 'numpy.array', 'np.array', (["measure_df['AVG_EW']"], {}), "(measure_df['AVG_EW'])\n", (36382, 36404), True, 'import numpy as np\n'), ((36424, 36454), 'numpy.array', 'np.array', (["measure_df['AVG_NS']"], {}), "(measure_df['AVG_NS'])\n", (36432, 36454), True, 'import numpy as np\n'), ((37080, 37099), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (37092, 37099), True, 'import matplotlib.pyplot as plt\n'), ((37113, 37156), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (37134, 37156), True, 'import matplotlib.pyplot as plt\n'), ((37190, 37267), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'orientation': 'orientation', 'label': '"""Mean Shift Magnitude, urad"""'}), "(sm, orientation=orientation, label='Mean Shift Magnitude, urad')\n", (37202, 37267), True, 'import matplotlib.pyplot as plt\n'), ((37500, 37516), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (37512, 37516), True, 'import pandas as pd\n'), ((2873, 2934), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['self.scene2extract', '"""%H%M-%m%d%Y"""'], {}), "(self.scene2extract, '%H%M-%m%d%Y')\n", (2899, 2934), False, 'import datetime\n'), ((3245, 3279), 'pandas.read_csv', 'pd.read_csv', (['self.measurement_file'], {}), '(self.measurement_file)\n', (3256, 3279), True, 'import pandas as pd\n'), ((3478, 3572), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["(activity_date[j] + '_' + activity_time[j])", '"""%m-%d-%Y_%H:%M:%S"""'], {}), "(activity_date[j] + '_' + activity_time[j],\n '%m-%d-%Y_%H:%M:%S')\n", (3504, 3572), False, 'import datetime\n'), ((3758, 3804), 'pandas.DataFrame', 'pd.DataFrame', (['measure_df'], {'columns': "['DATETIME']"}), "(measure_df, columns=['DATETIME'])\n", (3770, 3804), True, 'import pandas as pd\n'), ((3826, 3857), 'pandas.DataFrame.drop_duplicates', 'pd.DataFrame.drop_duplicates', (['t'], {}), '(t)\n', (3854, 3857), True, 'import pandas as pd\n'), ((5321, 5347), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5337, 5347), False, 'import os\n'), ((15124, 15151), 'numpy.array', 'np.array', (["measure_df['Lon']"], {}), "(measure_df['Lon'])\n", (15132, 15151), True, 'import numpy as np\n'), ((15168, 15195), 'numpy.array', 'np.array', (["measure_df['Lat']"], {}), "(measure_df['Lat'])\n", (15176, 15195), True, 'import numpy as np\n'), ((15255, 15287), 'numpy.array', 'np.array', (["measure_df['DELTA_EW']"], {}), "(measure_df['DELTA_EW'])\n", (15263, 15287), True, 'import numpy as np\n'), ((15311, 15343), 'numpy.array', 'np.array', (["measure_df['DELTA_NS']"], {}), "(measure_df['DELTA_NS'])\n", (15319, 15343), True, 'import numpy as np\n'), ((16017, 16036), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (16029, 16036), True, 'import matplotlib.pyplot as plt\n'), ((16054, 16097), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (16075, 16097), True, 'import matplotlib.pyplot as plt\n'), ((16139, 16211), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'orientation': 'orientation', 'label': '"""Shift Magnitude, urad"""'}), "(sm, orientation=orientation, label='Shift Magnitude, urad')\n", (16151, 16211), True, 'import matplotlib.pyplot as plt\n'), ((17100, 17145), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""vplot.png"""'], {'bbox_inches': '"""tight"""'}), "('vplot.png', bbox_inches='tight')\n", (17111, 17145), True, 'import matplotlib.pyplot as plt\n'), ((17172, 17182), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17180, 17182), True, 'import matplotlib.pyplot as plt\n'), ((22609, 22638), 'pandas.read_csv', 'pd.read_csv', (['measurement_file'], {}), '(measurement_file)\n', (22620, 22638), True, 'import pandas as pd\n'), ((22838, 22894), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['activity_time[j]', '"""%H:%M:%S"""'], {}), "(activity_time[j], '%H:%M:%S')\n", (22864, 22894), False, 'import datetime\n'), ((23106, 23159), 'pandas.DataFrame', 'pd.DataFrame', (['measure_df'], {'columns': "['ACTIVITY_TIME_1']"}), "(measure_df, columns=['ACTIVITY_TIME_1'])\n", (23118, 23159), True, 'import pandas as pd\n'), ((25801, 25827), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (25817, 25827), False, 'import os\n'), ((29458, 29502), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'year', 'month': '(1)', 'day': '(1)'}), '(year=year, month=1, day=1)\n', (29475, 29502), False, 'import datetime\n'), ((36615, 36653), 'numpy.sqrt', 'np.sqrt', (['(delta_ew ** 2 + delta_ns ** 2)'], {}), '(delta_ew ** 2 + delta_ns ** 2)\n', (36622, 36653), True, 'import numpy as np\n'), ((36683, 36721), 'numpy.sqrt', 'np.sqrt', (['(delta_ew ** 2 + delta_ns ** 2)'], {}), '(delta_ew ** 2 + delta_ns ** 2)\n', (36690, 36721), True, 'import numpy as np\n'), ((37335, 37380), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""vplot.png"""'], {'bbox_inches': '"""tight"""'}), "('vplot.png', bbox_inches='tight')\n", (37346, 37380), True, 'import matplotlib.pyplot as plt\n'), ((37407, 37417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37415, 37417), True, 'import matplotlib.pyplot as plt\n'), ((3103, 3135), 'gzip.open', 'gzip.open', (['self.measurement_file'], {}), '(self.measurement_file)\n', (3112, 3135), False, 'import gzip\n'), ((3171, 3205), 'pandas.read_csv', 'pd.read_csv', (['self.measurement_file'], {}), '(self.measurement_file)\n', (3182, 3205), True, 'import pandas as pd\n'), ((5418, 5468), 'os.path.join', 'os.path.join', (['exe_path', '"""data"""', '"""other_chipdb.csv"""'], {}), "(exe_path, 'data', 'other_chipdb.csv')\n", (5430, 5468), False, 'import os\n'), ((5838, 5886), 'os.path.join', 'os.path.join', (['exe_path', '"""data"""', '"""nav_chipdb.csv"""'], {}), "(exe_path, 'data', 'nav_chipdb.csv')\n", (5850, 5886), False, 'import os\n'), ((7381, 7436), 'numpy.array', 'np.array', (["origlat_r[chipdb_new['chip'] == chip_name[i]]"], {}), "(origlat_r[chipdb_new['chip'] == chip_name[i]])\n", (7389, 7436), True, 'import numpy as np\n'), ((7459, 7514), 'numpy.array', 'np.array', (["origlon_r[chipdb_new['chip'] == chip_name[i]]"], {}), "(origlon_r[chipdb_new['chip'] == chip_name[i]])\n", (7467, 7514), True, 'import numpy as np\n'), ((15520, 15558), 'numpy.sqrt', 'np.sqrt', (['(delta_ew ** 2 + delta_ns ** 2)'], {}), '(delta_ew ** 2 + delta_ns ** 2)\n', (15527, 15558), True, 'import numpy as np\n'), ((15592, 15630), 'numpy.sqrt', 'np.sqrt', (['(delta_ew ** 2 + delta_ns ** 2)'], {}), '(delta_ew ** 2 + delta_ns ** 2)\n', (15599, 15630), True, 'import numpy as np\n'), ((16327, 16353), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (16343, 16353), False, 'import os\n'), ((16391, 16439), 'os.path.join', 'os.path.join', (['exe_path', '"""data"""', '"""nav_chipdb.csv"""'], {}), "(exe_path, 'data', 'nav_chipdb.csv')\n", (16403, 16439), False, 'import os\n'), ((22477, 22504), 'gzip.open', 'gzip.open', (['measurement_file'], {}), '(measurement_file)\n', (22486, 22504), False, 'import gzip\n'), ((22540, 22569), 'pandas.read_csv', 'pd.read_csv', (['measurement_file'], {}), '(measurement_file)\n', (22551, 22569), True, 'import pandas as pd\n'), ((23194, 23235), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['i', '"""%H:%M:%S"""'], {}), "(i, '%H:%M:%S')\n", (23220, 23235), False, 'import datetime\n'), ((24214, 24260), 'pandas.DataFrame', 'pd.DataFrame', (['measure_df'], {'columns': "['DATETIME']"}), "(measure_df, columns=['DATETIME'])\n", (24226, 24260), True, 'import pandas as pd\n'), ((24282, 24313), 'pandas.DataFrame.drop_duplicates', 'pd.DataFrame.drop_duplicates', (['t'], {}), '(t)\n', (24310, 24313), True, 'import pandas as pd\n'), ((25898, 25971), 'os.path.join', 'os.path.join', (['exe_path', '"""MultiSpecDB_Jan20_2018_v1_2018_Feb13_150634.csv"""'], {}), "(exe_path, 'MultiSpecDB_Jan20_2018_v1_2018_Feb13_150634.csv')\n", (25910, 25971), False, 'import os\n'), ((26345, 26393), 'os.path.join', 'os.path.join', (['exe_path', '"""EvalLoc_2018_Feb26.csv"""'], {}), "(exe_path, 'EvalLoc_2018_Feb26.csv')\n", (26357, 26393), False, 'import os\n'), ((27910, 27965), 'numpy.array', 'np.array', (["origlat_r[chipdb_new['chip'] == chip_name[i]]"], {}), "(origlat_r[chipdb_new['chip'] == chip_name[i]])\n", (27918, 27965), True, 'import numpy as np\n'), ((27988, 28043), 'numpy.array', 'np.array', (["origlon_r[chipdb_new['chip'] == chip_name[i]]"], {}), "(origlon_r[chipdb_new['chip'] == chip_name[i]])\n", (27996, 28043), True, 'import numpy as np\n'), ((29318, 29339), 'datetime.timedelta', 'datetime.timedelta', (['d'], {}), '(d)\n', (29336, 29339), False, 'import datetime\n'), ((30049, 30089), 'os.path.join', 'os.path.join', (['search_path', 'search_string'], {}), '(search_path, search_string)\n', (30061, 30089), False, 'import os\n'), ((36853, 36871), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (36869, 36871), True, 'import cartopy.crs as ccrs\n'), ((15774, 15792), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (15790, 15792), True, 'import cartopy.crs as ccrs\n'), ((17013, 17031), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (17029, 17031), True, 'import cartopy.crs as ccrs\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds vocab files for a vertical in word/char/tag level respectively."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import sys
import tempfile
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from simpdom import constants
FLAGS = flags.FLAGS
flags.DEFINE_integer("dim_word_glove", 100,
"The dimensionality of the word embeddings.")
flags.DEFINE_integer("word_frequence_cutoff", 3,
"Ignore the words whose frequence is under this.")
flags.DEFINE_string(
"domtree_path", "",
"The path of the json file containing node and features from swde dataset.")
flags.DEFINE_string(
"word_embedding_path", "",
"The path of word embedding file, which should be in GloVe format.")
def get_leaf_type(xpath):
"""Gets the leaf type from the xpath."""
# Example:
# "/div[1]/span[2]/br[1]"-->"br"
# "/div[1]/span[2]/tail"-->"span"
# "/div[1]/span[2]/a"-->"a"
html_tags = xpath.split("/")
tag = html_tags[-1]
if tag.startswith("tail"):
tag = html_tags[-2]
if tag.find("[") >= 0:
return tag[:tag.find("[")] # Clean the tag index.
else:
return tag
def split_xpath(xpath):
"""Gets a list of html tags from a xpath string."""
# Example:
# "/div[1]/span[2]/br[1]" --> ["div", "span", "br"]
# "/div[1]/span[2]/tail" --> ["div", "span", "tail"]
# "/div[1]/span[2]/a" --> ["div", "span", "a"]
split_tags = []
for tag in xpath.split("/"):
if tag.find("[") >= 0:
tag = tag[:tag.find("[")]
if tag.strip():
split_tags.append(tag.strip())
return split_tags
def build_vocab(json_data, vertical_to_process):
"""Builds the vacabulary of a vertical's all pages."""
counter_words = collections.Counter()
vocab_labels = set()
vocab_chars = set()
vocab_leaf_html_tags = set()
vocab_html_tags = set()
for page in json_data["features"]:
for node in page:
path = node["html_path"]
vertical = path.split("/")[0]
if vertical == vertical_to_process:
counter_words.update(node["text"])
counter_words.update(
list(itertools.chain.from_iterable(node["prev_text"])))
vocab_labels.update([node["label"]])
vocab_leaf_html_tags.update([get_leaf_type(node["xpath"])])
vocab_html_tags.update(split_xpath(node["xpath"]))
vocab_words = {
w for w, c in counter_words.items() if c >= FLAGS.word_frequence_cutoff
}
for w in vocab_words:
vocab_chars.update(w)
return (vocab_words, vocab_labels, vocab_chars, vocab_leaf_html_tags,
vocab_html_tags)
def get_emebeddings(vocab_words, embedding_file_lines, vertical_to_process):
"""Gets relevant glove vectors and saves to a file."""
word_to_id = {
word: index for index, word in enumerate(sorted(list(vocab_words)))
}
vocab_size = len(word_to_id)
embeddings = np.zeros((vocab_size, FLAGS.dim_word_glove))
found = 0
print("Writing word embedding file (may take a while)...")
glove_embedding_dict = {}
for line in embedding_file_lines:
line = line.strip().split()
if len(line) != FLAGS.dim_word_glove + 1:
continue
word = line[0]
embedding = line[1:]
glove_embedding_dict[word] = embedding
for vocab_word in vocab_words:
if vocab_word in glove_embedding_dict:
found += 1
word_idx = word_to_id[vocab_word]
embeddings[word_idx] = glove_embedding_dict[vocab_word]
elif vocab_word.lower() in glove_embedding_dict:
found += 1
word_idx = word_to_id[vocab_word]
embeddings[word_idx] = glove_embedding_dict[vocab_word.lower()]
# Save np.array to file.
with tempfile.TemporaryFile() as tmp, tf.gfile.Open(
os.path.join(FLAGS.domtree_path, vertical_to_process + ".%d.emb.npz" %
(FLAGS.dim_word_glove)), "wb") as gfo:
np.savez(tmp, embeddings=embeddings)
tmp.seek(0)
gfo.write(tmp.read())
print("- done. Found {} vectors for {} words for {}".format(
found, vocab_size, gfo.name))
def write_vocab(vocab_type, vocab, vertical_to_process):
"""Writes the vocabularies to files."""
with tf.gfile.Open(
os.path.join(FLAGS.domtree_path,
vertical_to_process + ".vocab.%s.txt" % (vocab_type)),
"w") as vocab_file:
for item in sorted(list(vocab)):
vocab_file.write("{}\n".format(item))
print("Saving done:", vocab_file.name, file=sys.stderr)
def main(_):
verticals = constants.VERTICAL_WEBSITES.keys()
with tf.gfile.Open(FLAGS.word_embedding_path, "r") as embedding_file:
embedding_file_lines = embedding_file.read().split("\n")
(all_vocab_words, all_vocab_labels, all_vocab_chars, all_vocab_leaf_html_tags,
all_vocab_html_tags) = set(), set(), set(), set(), set()
for vertical_to_process in verticals:
print("Processing vertical:", vertical_to_process, file=sys.stderr)
(vocab_words, vocab_labels, vocab_chars, vocab_leaf_html_tags,
vocab_html_tags) = set(), set(), set(), set(), set()
for json_data_path in tf.gfile.ListDirectory(FLAGS.domtree_path):
if json_data_path.endswith(".json") and json_data_path.startswith(
vertical_to_process) and len(json_data_path.split("-")) == 2:
print("processing %s" % (json_data_path), file=sys.stderr)
json_data = json.load(
tf.gfile.Open(
os.path.join(FLAGS.domtree_path, json_data_path), "r"))
(current_words, current_tags, current_chars, current_leaf_types,
current_xpath_units) = build_vocab(json_data, vertical_to_process)
vocab_words.update(current_words)
vocab_labels.update(current_tags)
vocab_chars.update(current_chars)
vocab_leaf_html_tags.update(current_leaf_types)
vocab_html_tags.update(current_xpath_units)
# Add the current vertrical's vocab to an over-all vocab.
all_vocab_words.update(vocab_words)
all_vocab_labels.update(vocab_labels)
all_vocab_chars.update(vocab_chars)
all_vocab_leaf_html_tags.update(vocab_leaf_html_tags)
all_vocab_html_tags.update(vocab_html_tags)
# Saving vocabs and word embeddings.
write_vocab("words", vocab_words, vertical_to_process)
write_vocab("tags", vocab_labels, vertical_to_process)
write_vocab("chars", vocab_chars, vertical_to_process)
get_emebeddings(vocab_words, embedding_file_lines, vertical_to_process)
write_vocab("leaf_types", vocab_leaf_html_tags, vertical_to_process)
write_vocab("xpath_units", vocab_html_tags, vertical_to_process)
# Saving over-all vocabs and word embeddings.
write_vocab("words", all_vocab_words, "all")
write_vocab("tags", all_vocab_labels, "all")
write_vocab("chars", all_vocab_chars, "all")
get_emebeddings(all_vocab_words, embedding_file_lines, "all")
write_vocab("leaf_types", all_vocab_leaf_html_tags, "all")
write_vocab("xpath_units", all_vocab_html_tags, "all")
if __name__ == "__main__":
app.run(main)
| [
"itertools.chain.from_iterable",
"simpdom.constants.VERTICAL_WEBSITES.keys",
"tensorflow.compat.v1.gfile.Open",
"numpy.zeros",
"absl.flags.DEFINE_string",
"tempfile.TemporaryFile",
"tensorflow.compat.v1.gfile.ListDirectory",
"absl.app.run",
"absl.flags.DEFINE_integer",
"collections.Counter",
"nu... | [((1031, 1124), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""dim_word_glove"""', '(100)', '"""The dimensionality of the word embeddings."""'], {}), "('dim_word_glove', 100,\n 'The dimensionality of the word embeddings.')\n", (1051, 1124), False, 'from absl import flags\n'), ((1142, 1245), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""word_frequence_cutoff"""', '(3)', '"""Ignore the words whose frequence is under this."""'], {}), "('word_frequence_cutoff', 3,\n 'Ignore the words whose frequence is under this.')\n", (1162, 1245), False, 'from absl import flags\n'), ((1263, 1388), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""domtree_path"""', '""""""', '"""The path of the json file containing node and features from swde dataset."""'], {}), "('domtree_path', '',\n 'The path of the json file containing node and features from swde dataset.'\n )\n", (1282, 1388), False, 'from absl import flags\n'), ((1389, 1508), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""word_embedding_path"""', '""""""', '"""The path of word embedding file, which should be in GloVe format."""'], {}), "('word_embedding_path', '',\n 'The path of word embedding file, which should be in GloVe format.')\n", (1408, 1508), False, 'from absl import flags\n'), ((2470, 2491), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2489, 2491), False, 'import collections\n'), ((3604, 3648), 'numpy.zeros', 'np.zeros', (['(vocab_size, FLAGS.dim_word_glove)'], {}), '((vocab_size, FLAGS.dim_word_glove))\n', (3612, 3648), True, 'import numpy as np\n'), ((5177, 5211), 'simpdom.constants.VERTICAL_WEBSITES.keys', 'constants.VERTICAL_WEBSITES.keys', ([], {}), '()\n', (5209, 5211), False, 'from simpdom import constants\n'), ((7649, 7662), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (7656, 7662), False, 'from absl import app\n'), ((4376, 4400), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {}), '()\n', (4398, 4400), False, 'import tempfile\n'), ((4563, 4599), 'numpy.savez', 'np.savez', (['tmp'], {'embeddings': 'embeddings'}), '(tmp, embeddings=embeddings)\n', (4571, 4599), True, 'import numpy as np\n'), ((5220, 5265), 'tensorflow.compat.v1.gfile.Open', 'tf.gfile.Open', (['FLAGS.word_embedding_path', '"""r"""'], {}), "(FLAGS.word_embedding_path, 'r')\n", (5233, 5265), True, 'import tensorflow.compat.v1 as tf\n'), ((5750, 5792), 'tensorflow.compat.v1.gfile.ListDirectory', 'tf.gfile.ListDirectory', (['FLAGS.domtree_path'], {}), '(FLAGS.domtree_path)\n', (5772, 5792), True, 'import tensorflow.compat.v1 as tf\n'), ((4430, 4526), 'os.path.join', 'os.path.join', (['FLAGS.domtree_path', "(vertical_to_process + '.%d.emb.npz' % FLAGS.dim_word_glove)"], {}), "(FLAGS.domtree_path, vertical_to_process + '.%d.emb.npz' %\n FLAGS.dim_word_glove)\n", (4442, 4526), False, 'import os\n'), ((4874, 4962), 'os.path.join', 'os.path.join', (['FLAGS.domtree_path', "(vertical_to_process + '.vocab.%s.txt' % vocab_type)"], {}), "(FLAGS.domtree_path, vertical_to_process + '.vocab.%s.txt' %\n vocab_type)\n", (4886, 4962), False, 'import os\n'), ((2853, 2901), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (["node['prev_text']"], {}), "(node['prev_text'])\n", (2882, 2901), False, 'import itertools\n'), ((6080, 6128), 'os.path.join', 'os.path.join', (['FLAGS.domtree_path', 'json_data_path'], {}), '(FLAGS.domtree_path, json_data_path)\n', (6092, 6128), False, 'import os\n')] |
#Calculate formulas for reward , q_value , R(state , action):
from config import *
from csv_handling import *
import numpy as np
def calc_reward(df , r, D):
rew = (np.absolute(r) - df)/D
return rew
def calc_q(action,r,total_record_input,reward,D,candidate_set,df):
index = candidate_set.index(action)
q_value = reward[index]
value = 0 #Formula application
maxi = -10**10
for element in candidate_set:
if element != action:
idx = candidate_set.index(element)
value = reward[idx]
value = value + (df[idx]/D)
value = value - ((total_records_input * r[element])/(D**2))
if value > maxi:
value = maxi
q_value = q_value + maxi
return q_value
def calc_r(output_list):
global total_records
total_records = total_records + len(output_list)
return len(output_list)
| [
"numpy.absolute"
] | [((170, 184), 'numpy.absolute', 'np.absolute', (['r'], {}), '(r)\n', (181, 184), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy
import irlib
import scipy.integrate as integrate
import matplotlib.pyplot as plt
from mpmath import *
plt.rc('text', usetex=True)
N = 1000
xvec = numpy.linspace(-1, -1+0.01, N)
## Construct basis
idx = 0
markers = ['o', 's', 'x', '+', 'v']
ls = ['-', '--', ':']
colors = ['r', 'b', 'g', 'k']
for Lambda in [1000.0]:
#print("Loading basis functions...")
b = irlib.loadtxt("np10/basis_f-mp-Lambda"+str(Lambda)+".txt")
print("dim = ", b.dim())
edges = numpy.array([b.section_edge(s) for s in range(b.num_sections()+1)])
for s in range(1,b.num_sections()):
print(edges[s])
order = 3
dim = b.dim()
plt.figure(1)
for l in [dim-1]:
plt.plot(xvec+1, numpy.abs(numpy.array([b.ulx_derivative(l,x,order) for x in xvec])), marker='', linestyle='-', color=colors[idx])
idx += 1
plt.figure(1)
plt.xlabel('$x$')
plt.ylabel('$u_l(x)$')
plt.xscale("log")
plt.yscale("log")
plt.legend()
plt.tight_layout()
plt.savefig('ulx_deriv.pdf')
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] | [((156, 183), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (162, 183), True, 'import matplotlib.pyplot as plt\n'), ((201, 233), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(-1 + 0.01)', 'N'], {}), '(-1, -1 + 0.01, N)\n', (215, 233), False, 'import numpy\n'), ((884, 897), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (894, 897), True, 'import matplotlib.pyplot as plt\n'), ((898, 915), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (908, 915), True, 'import matplotlib.pyplot as plt\n'), ((916, 938), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$u_l(x)$"""'], {}), "('$u_l(x)$')\n", (926, 938), True, 'import matplotlib.pyplot as plt\n'), ((939, 956), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (949, 956), True, 'import matplotlib.pyplot as plt\n'), ((957, 974), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (967, 974), True, 'import matplotlib.pyplot as plt\n'), ((975, 987), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (985, 987), True, 'import matplotlib.pyplot as plt\n'), ((988, 1006), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1004, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1035), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ulx_deriv.pdf"""'], {}), "('ulx_deriv.pdf')\n", (1018, 1035), True, 'import matplotlib.pyplot as plt\n'), ((694, 707), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (704, 707), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -------------------------------------------------------------------------
# This file is part of BayesOpt, an efficient C++ library for
# Bayesian optimization.
#
# Copyright (C) 2011-2015 <NAME> <<EMAIL>>
#
# BayesOpt is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BayesOpt is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BayesOpt. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------
import bayesopt
from bayesoptmodule import BayesOptContinuous
import numpy as np
from time import clock
# Function for testing.
def testfunc(Xin):
total = 5.0
for value in Xin:
total = total + (value -0.33)*(value-0.33)
return total
# Class for OO testing.
class BayesOptTest(BayesOptContinuous):
def evaluateSample(self,Xin):
return testfunc(Xin)
# Let's define the parameters
# For different options: see parameters.h and cpp
# If a parameter is not define, it will be automatically set
# to a default value.
params = {}
params['n_iterations'] = 50
params['n_init_samples'] = 20
params['crit_name'] = "cSum(cEI,cDistance)"
params['crit_params'] = [1, 0.5]
params['kernel_name'] = "kMaternISO3"
print("Callback implementation")
n = 2 # n dimensions
lb = np.zeros((n,))
ub = np.ones((n,))
start = clock()
mvalue, x_out, error = bayesopt.optimize(testfunc, n, lb, ub, params)
print("Result", x_out)
print("Seconds", clock() - start)
print("OO implementation")
bo_test = BayesOptTest(n)
bo_test.parameters = params
bo_test.lower_bound = lb
bo_test.upper_bound = ub
start = clock()
mvalue, x_out, error = bo_test.optimize()
print("Result", x_out)
print("Seconds", clock() - start)
print("Callback discrete implementation")
x_set = np.random.rand(100,n)
start = clock()
mvalue, x_out, error = bayesopt.optimize_discrete(testfunc, x_set, params)
print("Result", x_out)
print("Seconds", clock() - start)
value = np.array([testfunc(i) for i in x_set])
print("Optimun", x_set[value.argmin()])
| [
"bayesopt.optimize_discrete",
"bayesopt.optimize",
"numpy.zeros",
"numpy.ones",
"time.clock",
"numpy.random.rand"
] | [((1805, 1819), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (1813, 1819), True, 'import numpy as np\n'), ((1825, 1838), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (1832, 1838), True, 'import numpy as np\n'), ((1848, 1855), 'time.clock', 'clock', ([], {}), '()\n', (1853, 1855), False, 'from time import clock\n'), ((1880, 1926), 'bayesopt.optimize', 'bayesopt.optimize', (['testfunc', 'n', 'lb', 'ub', 'params'], {}), '(testfunc, n, lb, ub, params)\n', (1897, 1926), False, 'import bayesopt\n'), ((2127, 2134), 'time.clock', 'clock', ([], {}), '()\n', (2132, 2134), False, 'from time import clock\n'), ((2287, 2309), 'numpy.random.rand', 'np.random.rand', (['(100)', 'n'], {}), '(100, n)\n', (2301, 2309), True, 'import numpy as np\n'), ((2317, 2324), 'time.clock', 'clock', ([], {}), '()\n', (2322, 2324), False, 'from time import clock\n'), ((2349, 2400), 'bayesopt.optimize_discrete', 'bayesopt.optimize_discrete', (['testfunc', 'x_set', 'params'], {}), '(testfunc, x_set, params)\n', (2375, 2400), False, 'import bayesopt\n'), ((1968, 1975), 'time.clock', 'clock', ([], {}), '()\n', (1973, 1975), False, 'from time import clock\n'), ((2218, 2225), 'time.clock', 'clock', ([], {}), '()\n', (2223, 2225), False, 'from time import clock\n'), ((2442, 2449), 'time.clock', 'clock', ([], {}), '()\n', (2447, 2449), False, 'from time import clock\n')] |
from copy import deepcopy
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.datasets
from sklearn import datasets
from sklearn import tree
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import plot_tree, DecisionTreeClassifier
from sklearn.utils import check_X_y, check_array
from sklearn.utils.validation import _check_sample_weight
from imodels.tree.viz_utils import DecisionTreeViz
plt.rcParams['figure.dpi'] = 300
class Node:
def __init__(self, feature: int = None, threshold: int = None,
value=None, idxs=None, is_root: bool = False, left=None,
impurity_reduction: float = None, tree_num: int = None,
right=None):
"""Node class for splitting
"""
# split or linear
self.is_root = is_root
self.idxs = idxs
self.tree_num = tree_num
self.feature = feature
self.impurity_reduction = impurity_reduction
# different meanings
self.value = value # for split this is mean, for linear this is weight
# split-specific
self.threshold = threshold
self.left = left
self.right = right
self.left_temp = None
self.right_temp = None
def setattrs(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
if self.is_root:
return f'X_{self.feature} <= {self.threshold:0.3f} (Tree #{self.tree_num} root)'
elif self.left is None and self.right is None:
return f'Val: {self.value[0][0]:0.3f} (leaf)'
else:
return f'X_{self.feature} <= {self.threshold:0.3f} (split)'
def print_root(self, y):
try:
one_count = pd.Series(y).value_counts()[1.0]
except KeyError:
one_count = 0
one_proportion = f' {one_count}/{y.shape[0]} ({round(100 * one_count / y.shape[0], 2)}%)'
if self.is_root:
return f'X_{self.feature} <= {self.threshold:0.3f}' + one_proportion
elif self.left is None and self.right is None:
return f'ΔRisk = {self.value[0][0]:0.2f}' + one_proportion
else:
return f'X_{self.feature} <= {self.threshold:0.3f}' + one_proportion
def __repr__(self):
return self.__str__()
class FIGS(BaseEstimator):
"""FIGS (sum of trees) classifier.
Fast Interpretable Greedy-Tree Sums (FIGS) is an algorithm for fitting concise rule-based models.
Specifically, FIGS generalizes CART to simultaneously grow a flexible number of trees in a summation.
The total number of splits across all the trees can be restricted by a pre-specified threshold, keeping the model interpretable.
Experiments across real-world datasets show that FIGS achieves state-of-the-art prediction performance when restricted to just a few splits (e.g. less than 20).
https://arxiv.org/abs/2201.11931
"""
def __init__(self, max_rules: int = 12, min_impurity_decrease: float = 0.0, random_state=None):
super().__init__()
self.max_rules = max_rules
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self._init_estimator_type() # decides between regressor and classifier
self._init_decision_function()
def _init_estimator_type(self):
"""
FIGSRegressor and FIGSClassifier override this method
to alter the prediction task. When using this class directly,
it is equivalent to FIGSRegressor
"""
self._estimator_type = 'regressor'
def _init_decision_function(self):
"""Sets decision function based on _estimator_type
"""
# used by sklearn GrriidSearchCV, BaggingClassifier
if self._estimator_type == 'classifier':
decision_function = lambda x: self.predict_proba(x)[:, 1]
elif self._estimator_type == 'regressor':
decision_function = self.predict
def _construct_node_with_stump(self, X, y, idxs, tree_num, sample_weight=None,
compare_nodes_with_sample_weight=True):
"""
Params
------
compare_nodes_with_sample_weight: Deprecated
If this is set to true and sample_weight is passed, use sample_weight to compare nodes
Otherwise, use sample_weight only for picking a split given a particular node
"""
# array indices
SPLIT = 0
LEFT = 1
RIGHT = 2
# fit stump
stump = tree.DecisionTreeRegressor(max_depth=1)
sweight = None
if sample_weight is not None:
sweight = sample_weight[idxs]
stump.fit(X[idxs], y[idxs], sample_weight=sweight)
# these are all arrays, arr[0] is split node
# note: -2 is dummy
feature = stump.tree_.feature
threshold = stump.tree_.threshold
impurity = stump.tree_.impurity
n_node_samples = stump.tree_.n_node_samples
value = stump.tree_.value
# no split
if len(feature) == 1:
# print('no split found!', idxs.sum(), impurity, feature)
return Node(idxs=idxs, value=value[SPLIT], tree_num=tree_num,
feature=feature[SPLIT], threshold=threshold[SPLIT],
impurity_reduction=None)
# manage sample weights
idxs_split = X[:, feature[SPLIT]] <= threshold[SPLIT]
idxs_left = idxs_split & idxs
idxs_right = ~idxs_split & idxs
if sample_weight is None:
n_node_samples_left = n_node_samples[LEFT]
n_node_samples_right = n_node_samples[RIGHT]
else:
n_node_samples_left = sample_weight[idxs_left].sum()
n_node_samples_right = sample_weight[idxs_right].sum()
n_node_samples_split = n_node_samples_left + n_node_samples_right
# calculate impurity
impurity_reduction = (
impurity[SPLIT] -
impurity[LEFT] * n_node_samples_left / n_node_samples_split -
impurity[RIGHT] * n_node_samples_right / n_node_samples_split
) * n_node_samples_split
node_split = Node(idxs=idxs, value=value[SPLIT], tree_num=tree_num,
feature=feature[SPLIT], threshold=threshold[SPLIT],
impurity_reduction=impurity_reduction)
# print('\t>>>', node_split, 'impurity', impurity, 'num_pts', idxs.sum(), 'imp_reduc', impurity_reduction)
# manage children
node_left = Node(idxs=idxs_left, value=value[LEFT], tree_num=tree_num)
node_right = Node(idxs=idxs_right, value=value[RIGHT], tree_num=tree_num)
node_split.setattrs(left_temp=node_left, right_temp=node_right, )
return node_split
def fit(self, X, y=None, feature_names=None, verbose=False, sample_weight=None):
"""
Params
------
_sample_weight: array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Splits that would create child nodes with net zero or negative weight
are ignored while searching for a split in each node.
"""
X, y = check_X_y(X, y)
y = y.astype(float)
if feature_names is not None:
self.feature_names_ = feature_names
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
self.trees_ = [] # list of the root nodes of added trees
self.complexity_ = 0 # tracks the number of rules in the model
y_predictions_per_tree = {} # predictions for each tree
y_residuals_per_tree = {} # based on predictions above
# set up initial potential_splits
# everything in potential_splits either is_root (so it can be added directly to self.trees_)
# or it is a child of a root node that has already been added
idxs = np.ones(X.shape[0], dtype=bool)
node_init = self._construct_node_with_stump(X=X, y=y, idxs=idxs, tree_num=-1, sample_weight=sample_weight)
potential_splits = [node_init]
for node in potential_splits:
node.setattrs(is_root=True)
potential_splits = sorted(potential_splits, key=lambda x: x.impurity_reduction)
# start the greedy fitting algorithm
finished = False
while len(potential_splits) > 0 and not finished:
# print('potential_splits', [str(s) for s in potential_splits])
split_node = potential_splits.pop() # get node with max impurity_reduction (since it's sorted)
# don't split on node
if split_node.impurity_reduction < self.min_impurity_decrease:
finished = True
break
# split on node
if verbose:
print('\nadding ' + str(split_node))
self.complexity_ += 1
# if added a tree root
if split_node.is_root:
# start a new tree
self.trees_.append(split_node)
# update tree_num
for node_ in [split_node, split_node.left_temp, split_node.right_temp]:
if node_ is not None:
node_.tree_num = len(self.trees_) - 1
# add new root potential node
node_new_root = Node(is_root=True, idxs=np.ones(X.shape[0], dtype=bool),
tree_num=-1)
potential_splits.append(node_new_root)
# add children to potential splits
# assign left_temp, right_temp to be proper children
# (basically adds them to tree in predict method)
split_node.setattrs(left=split_node.left_temp, right=split_node.right_temp)
# add children to potential_splits
potential_splits.append(split_node.left)
potential_splits.append(split_node.right)
# update predictions for altered tree
for tree_num_ in range(len(self.trees_)):
y_predictions_per_tree[tree_num_] = self._predict_tree(self.trees_[tree_num_], X)
y_predictions_per_tree[-1] = np.zeros(X.shape[0]) # dummy 0 preds for possible new trees
# update residuals for each tree
# -1 is key for potential new tree
for tree_num_ in list(range(len(self.trees_))) + [-1]:
y_residuals_per_tree[tree_num_] = deepcopy(y)
# subtract predictions of all other trees
for tree_num_other_ in range(len(self.trees_)):
if not tree_num_other_ == tree_num_:
y_residuals_per_tree[tree_num_] -= y_predictions_per_tree[tree_num_other_]
# recompute all impurities + update potential_split children
potential_splits_new = []
for potential_split in potential_splits:
y_target = y_residuals_per_tree[potential_split.tree_num]
# re-calculate the best split
potential_split_updated = self._construct_node_with_stump(X=X,
y=y_target,
idxs=potential_split.idxs,
tree_num=potential_split.tree_num,
sample_weight=sample_weight, )
# need to preserve certain attributes from before (value at this split + is_root)
# value may change because residuals may have changed, but we want it to store the value from before
potential_split.setattrs(
feature=potential_split_updated.feature,
threshold=potential_split_updated.threshold,
impurity_reduction=potential_split_updated.impurity_reduction,
left_temp=potential_split_updated.left_temp,
right_temp=potential_split_updated.right_temp,
)
# this is a valid split
if potential_split.impurity_reduction is not None:
potential_splits_new.append(potential_split)
# sort so largest impurity reduction comes last (should probs make this a heap later)
potential_splits = sorted(potential_splits_new, key=lambda x: x.impurity_reduction)
if verbose:
print(self)
if self.max_rules is not None and self.complexity_ >= self.max_rules:
finished = True
break
return self
def _tree_to_str(self, root: Node, prefix=''):
if root is None:
return ''
elif root.threshold is None:
return ''
pprefix = prefix + '\t'
return prefix + str(root) + '\n' + self._tree_to_str(root.left, pprefix) + self._tree_to_str(root.right,
pprefix)
def _tree_to_str_with_data(self, X, y, root: Node, prefix=''):
if root is None:
return ''
elif root.threshold is None:
return ''
pprefix = prefix + '\t'
left = X[:, root.feature] <= root.threshold
return (
prefix + root.print_root(y) + '\n' +
self._tree_to_str_with_data(X[left], y[left], root.left, pprefix) +
self._tree_to_str_with_data(X[~left], y[~left], root.right, pprefix))
def __str__(self):
s = '> ------------------------------\n'
s += '> FIGS-Fast Interpretable Greedy-Tree Sums:\n'
s += '> \tPredictions are made by summing the "Val" reached by traversing each tree\n'
s += '> ------------------------------\n'
s += '\n\t+\n'.join([self._tree_to_str(t) for t in self.trees_])
if hasattr(self, 'feature_names_') and self.feature_names_ is not None:
for i in range(len(self.feature_names_))[::-1]:
s = s.replace(f'X_{i}', self.feature_names_[i])
return s
def print_tree(self, X, y):
s = '------------\n' + '\n\t+\n'.join([self._tree_to_str_with_data(X, y, t) for t in self.trees_])
if hasattr(self, 'feature_names_') and self.feature_names_ is not None:
for i in range(len(self.feature_names_))[::-1]:
s = s.replace(f'X_{i}', self.feature_names_[i])
return s
def predict(self, X):
X = check_array(X)
preds = np.zeros(X.shape[0])
for tree in self.trees_:
preds += self._predict_tree(tree, X)
if self._estimator_type == 'regressor':
return preds
elif self._estimator_type == 'classifier':
return (preds > 0.5).astype(int)
def predict_proba(self, X):
X = check_array(X)
if self._estimator_type == 'regressor':
return NotImplemented
preds = np.zeros(X.shape[0])
for tree in self.trees_:
preds += self._predict_tree(tree, X)
preds = np.clip(preds, a_min=0., a_max=1.) # constrain to range of probabilities
return np.vstack((1 - preds, preds)).transpose()
def _predict_tree(self, root: Node, X):
"""Predict for a single tree
"""
def _predict_tree_single_point(root: Node, x):
if root.left is None and root.right is None:
return root.value
left = x[root.feature] <= root.threshold
if left:
if root.left is None: # we don't actually have to worry about this case
return root.value
else:
return _predict_tree_single_point(root.left, x)
else:
if root.right is None: # we don't actually have to worry about this case
return root.value
else:
return _predict_tree_single_point(root.right, x)
preds = np.zeros(X.shape[0])
for i in range(X.shape[0]):
preds[i] = _predict_tree_single_point(root, X[i])
return preds
def plot(self, cols=2, feature_names=None, filename=None, label="all",
impurity=False, tree_number=None, dpi=150):
is_single_tree = len(self.trees_) < 2 or tree_number is not None
n_cols = int(cols)
n_rows = int(np.ceil(len(self.trees_) / n_cols))
# if is_single_tree:
# fig, ax = plt.subplots(1)
# else:
# fig, axs = plt.subplots(n_rows, n_cols)
n_plots = int(len(self.trees_)) if tree_number is None else 1
fig, axs = plt.subplots(n_plots, dpi=dpi)
criterion = "squared_error" if self._estimator_type == "regressor" else "gini"
n_classes = 1 if self._estimator_type == 'regressor' else 2
ax_size = int(len(self.trees_)) # n_cols * n_rows
for i in range(n_plots):
r = i // n_cols
c = i % n_cols
if not is_single_tree:
# ax = axs[r, c]
ax = axs[i]
else:
ax = axs
try:
tree = self.trees_[i] if tree_number is None else self.trees_[tree_number]
plot_tree(DecisionTreeViz(tree, criterion, n_classes),
ax=ax, feature_names=feature_names, label=label,
impurity=impurity)
except IndexError:
ax.axis('off')
continue
ax.set_title(f"Tree {i}")
if filename is not None:
plt.savefig(filename)
return
plt.show()
class FIGSRegressor(FIGS, RegressorMixin):
def _init_estimator_type(self):
self._estimator_type = 'regressor'
class FIGSClassifier(FIGS, ClassifierMixin):
def _init_estimator_type(self):
self._estimator_type = 'classifier'
class FIGSCV:
def __init__(self, figs,
n_rules_list: List[float] = [6, 12, 24, 30, 50],
cv: int = 3, scoring=None, *args, **kwargs):
self._figs_class = figs
self.n_rules_list = np.array(n_rules_list)
self.cv = cv
self.scoring = scoring
def fit(self, X, y):
self.scores_ = []
for n_rules in self.n_rules_list:
est = self._figs_class(max_rules=n_rules)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
mean_score = np.mean(cv_scores)
if len(self.scores_) == 0:
self.figs = est
elif mean_score > np.max(self.scores_):
self.figs = est
self.scores_.append(mean_score)
self.figs.fit(X=X, y=y)
def predict_proba(self, X):
return self.figs.predict_proba(X)
def predict(self, X):
return self.figs.predict(X)
@property
def max_rules(self):
return self.figs.max_rules
class FIGSRegressorCV(FIGSCV):
def __init__(self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
cv: int = 3, scoring='r2', *args, **kwargs):
super(FIGSRegressorCV, self).__init__(figs=FIGSRegressor, n_rules_list=n_rules_list,
cv=cv, scoring=scoring, *args, **kwargs)
class FIGSClassifierCV(FIGSCV):
def __init__(self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
cv: int = 3, scoring="accuracy", *args, **kwargs):
super(FIGSClassifierCV, self).__init__(figs=FIGSClassifier, n_rules_list=n_rules_list,
cv=cv, scoring=scoring, *args, **kwargs)
if __name__ == '__main__':
from sklearn import datasets
X_cls, Y_cls = datasets.load_breast_cancer(return_X_y=True)
X_reg, Y_reg = datasets.make_friedman1(100)
est = FIGSClassifier(max_rules=10)
# est.fit(X_cls, Y_cls, sample_weight=np.arange(0, X_cls.shape[0]))
est.fit(X_cls, Y_cls, sample_weight=[1] * X_cls.shape[0])
est.predict(X_cls)
est = FIGSRegressorCV()
est.fit(X_reg, Y_reg)
est.predict(X_reg)
print(est.max_rules)
est.figs.plot(tree_number=0)
est = FIGSClassifierCV()
est.fit(X_cls, Y_cls)
est.predict(X_cls)
print(est.max_rules)
est.figs.plot(tree_number=0)
# %%
| [
"sklearn.model_selection.cross_val_score",
"numpy.ones",
"numpy.clip",
"numpy.mean",
"sklearn.utils.validation._check_sample_weight",
"sklearn.datasets.make_friedman1",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.utils.check_array",
"sklearn.utils.check_X_y",
"numpy.max",
"imodels.tree.viz_ut... | [((20125, 20169), 'sklearn.datasets.load_breast_cancer', 'datasets.load_breast_cancer', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (20152, 20169), False, 'from sklearn import datasets\n'), ((20189, 20217), 'sklearn.datasets.make_friedman1', 'datasets.make_friedman1', (['(100)'], {}), '(100)\n', (20212, 20217), False, 'from sklearn import datasets\n'), ((4694, 4733), 'sklearn.tree.DecisionTreeRegressor', 'tree.DecisionTreeRegressor', ([], {'max_depth': '(1)'}), '(max_depth=1)\n', (4720, 4733), False, 'from sklearn import tree\n'), ((7480, 7495), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (7489, 7495), False, 'from sklearn.utils import check_X_y, check_array\n'), ((8212, 8243), 'numpy.ones', 'np.ones', (['X.shape[0]'], {'dtype': 'bool'}), '(X.shape[0], dtype=bool)\n', (8219, 8243), True, 'import numpy as np\n'), ((14870, 14884), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (14881, 14884), False, 'from sklearn.utils import check_X_y, check_array\n'), ((14901, 14921), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (14909, 14921), True, 'import numpy as np\n'), ((15218, 15232), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (15229, 15232), False, 'from sklearn.utils import check_X_y, check_array\n'), ((15331, 15351), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (15339, 15351), True, 'import numpy as np\n'), ((15450, 15486), 'numpy.clip', 'np.clip', (['preds'], {'a_min': '(0.0)', 'a_max': '(1.0)'}), '(preds, a_min=0.0, a_max=1.0)\n', (15457, 15486), True, 'import numpy as np\n'), ((16367, 16387), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (16375, 16387), True, 'import numpy as np\n'), ((17025, 17055), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_plots'], {'dpi': 'dpi'}), '(n_plots, dpi=dpi)\n', (17037, 17055), True, 'import matplotlib.pyplot as plt\n'), ((18016, 18026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18024, 18026), True, 'import matplotlib.pyplot as plt\n'), ((18512, 18534), 'numpy.array', 'np.array', (['n_rules_list'], {}), '(n_rules_list)\n', (18520, 18534), True, 'import numpy as np\n'), ((7676, 7714), 'sklearn.utils.validation._check_sample_weight', '_check_sample_weight', (['sample_weight', 'X'], {}), '(sample_weight, X)\n', (7696, 7714), False, 'from sklearn.utils.validation import _check_sample_weight\n'), ((10465, 10485), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (10473, 10485), True, 'import numpy as np\n'), ((17967, 17988), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (17978, 17988), True, 'import matplotlib.pyplot as plt\n'), ((18759, 18819), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['est', 'X', 'y'], {'cv': 'self.cv', 'scoring': 'self.scoring'}), '(est, X, y, cv=self.cv, scoring=self.scoring)\n', (18774, 18819), False, 'from sklearn.model_selection import train_test_split, cross_val_score\n'), ((18845, 18863), 'numpy.mean', 'np.mean', (['cv_scores'], {}), '(cv_scores)\n', (18852, 18863), True, 'import numpy as np\n'), ((10736, 10747), 'copy.deepcopy', 'deepcopy', (['y'], {}), '(y)\n', (10744, 10747), False, 'from copy import deepcopy\n'), ((15539, 15568), 'numpy.vstack', 'np.vstack', (['(1 - preds, preds)'], {}), '((1 - preds, preds))\n', (15548, 15568), True, 'import numpy as np\n'), ((17631, 17674), 'imodels.tree.viz_utils.DecisionTreeViz', 'DecisionTreeViz', (['tree', 'criterion', 'n_classes'], {}), '(tree, criterion, n_classes)\n', (17646, 17674), False, 'from imodels.tree.viz_utils import DecisionTreeViz\n'), ((18965, 18985), 'numpy.max', 'np.max', (['self.scores_'], {}), '(self.scores_)\n', (18971, 18985), True, 'import numpy as np\n'), ((1895, 1907), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (1904, 1907), True, 'import pandas as pd\n'), ((9665, 9696), 'numpy.ones', 'np.ones', (['X.shape[0]'], {'dtype': 'bool'}), '(X.shape[0], dtype=bool)\n', (9672, 9696), True, 'import numpy as np\n')] |
# gui.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019 and later, <NAME> and <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import inspect
from typing import Dict, List, Tuple, Union
import numpy as np
from matplotlib.figure import Figure
try:
from ipywidgets import (
AppLayout,
HBox,
IntSlider,
Label,
Layout,
Text,
VBox,
interactive,
widgets,
)
except ImportError:
_HAS_IPYWIDGETS = False
else:
_HAS_IPYWIDGETS = True
try:
from IPython.display import display
except ImportError:
_HAS_IPYTHON = False
else:
_HAS_IPYTHON = True
from pathlib import Path
import scqubits as scq
import scqubits.utils.misc as utils
from scqubits.core.qubit_base import QubitBaseClass
class GUI:
def __repr__(self):
return ""
@utils.Required(ipywidgets=_HAS_IPYWIDGETS, IPython=_HAS_IPYTHON)
def __init__(self):
scq.settings.PROGRESSBAR_DISABLED = False
global_defaults = {
"mode_wavefunc": "real",
"mode_matrixelem": "abs",
"ng": {"min": 0, "max": 1},
"flux": {"min": 0, "max": 1},
"EJ": {"min": 1e-10, "max": 70},
"EC": {"min": 1e-10, "max": 5},
"int": {"min": 1, "max": 30},
"float": {"min": 0, "max": 30},
}
transmon_defaults = {
**global_defaults,
"scan_param": "ng",
"operator": "n_operator",
"ncut": {"min": 10, "max": 50},
"scale": 1,
"num_sample": 150,
}
tunabletransmon_defaults = {
**global_defaults,
"scan_param": "flux",
"operator": "n_operator",
"EJmax": global_defaults["EJ"],
"d": {"min": 0, "max": 1},
"ncut": {"min": 10, "max": 50},
"scale": 1,
"num_sample": 150,
}
fluxonium_defaults = {
**global_defaults,
"scan_param": "flux",
"operator": "n_operator",
"EC": {"min": 1e-2, "max": 5},
"EL": {"min": 1e-10, "max": 2},
"cutoff": {"min": 10, "max": 120},
"scale": 1,
"num_sample": 150,
}
fluxqubit_defaults = {
**global_defaults,
"scan_param": "flux",
"operator": "n_1_operator",
"ncut": {"min": 5, "max": 30},
"EJ1": global_defaults["EJ"],
"EJ2": global_defaults["EJ"],
"EJ3": global_defaults["EJ"],
"ECJ1": global_defaults["EC"],
"ECJ2": global_defaults["EC"],
"ECJ3": global_defaults["EC"],
"ECg1": global_defaults["EC"],
"ECg2": global_defaults["EC"],
"ng1": global_defaults["ng"],
"ng2": global_defaults["ng"],
"scale": None,
"num_sample": 100,
}
zeropi_defaults = {
**global_defaults,
"scan_param": "flux",
"operator": "n_theta_operator",
"ncut": {"min": 5, "max": 50},
"EL": {"min": 1e-10, "max": 3},
"ECJ": {"min": 1e-10, "max": 30},
"dEJ": {"min": 0, "max": 1},
"dCJ": {"min": 0, "max": 1},
"scale": None,
"num_sample": 50,
}
fullzeropi_defaults = {
**global_defaults,
"scan_param": "flux",
"operator": "n_theta_operator",
"ncut": {"min": 5, "max": 50},
"EL": {"min": 1e-10, "max": 3},
"ECJ": {"min": 1e-10, "max": 30},
"dEJ": {"min": 0, "max": 1},
"dCJ": {"min": 0, "max": 1},
"dEL": {"min": 0, "max": 1},
"dC": {"min": 0, "max": 1},
"zeropi_cutoff": {"min": 5, "max": 30},
"zeta_cutoff": {"min": 5, "max": 30},
"scale": None,
"num_sample": 50,
}
cos2phiqubit_defaults = {
**global_defaults,
"scan_param": "flux",
"operator": "phi_operator",
"EL": {"min": 1e-10, "max": 5},
"ECJ": {"min": 1e-10, "max": 30},
"dEJ": {"min": 0, "max": 0.99},
"dL": {"min": 0, "max": 0.99},
"dCJ": {"min": 0, "max": 0.99},
"ncut": {"min": 5, "max": 50},
"zeta_cut": {"min": 10, "max": 50},
"phi_cut": {"min": 5, "max": 30},
"scale": None,
"num_sample": 50,
}
self.qubit_defaults = {
"Transmon": transmon_defaults,
"TunableTransmon": tunabletransmon_defaults,
"Fluxonium": fluxonium_defaults,
"FluxQubit": fluxqubit_defaults,
"ZeroPi": zeropi_defaults,
"FullZeroPi": fullzeropi_defaults,
"Cos2PhiQubit": cos2phiqubit_defaults,
}
self.grid_defaults = {
"grid_min_val": -6 * np.pi,
"grid_max_val": 6 * np.pi,
"grid_pt_count": 50,
}
self.plot_choices = [
"Energy spectrum",
"Wavefunctions",
"Matrix element scan",
"Matrix elements",
]
self.supported_qubits = [
"Transmon",
"TunableTransmon",
"Fluxonium",
"FluxQubit",
"ZeroPi",
"FullZeroPi",
"Cos2PhiQubit",
]
self.gui_active = True
self.qubit_change = True
self.slow_qubits = ["FluxQubit", "ZeroPi", "FullZeroPi", "Cos2PhiQubit"]
self.active_defaults: Dict[str, Union[str, Dict[str, Union[int, float]]]] = {}
self.fig: Figure
self.qubit_current_params: Dict[str, Union[int, float, None]] = {}
self.qubit_base_params: Dict[str, Union[int, float, None]] = {}
self.qubit_scan_params: Dict[str, Union[int, float, None]] = {}
self.qubit_plot_options_widgets: Dict[widgets] = {}
self.qubit_and_plot_choice_widgets: Dict[widgets] = {}
self.qubit_params_widgets: Dict[widgets] = {}
self.active_qubit: QubitBaseClass
self.set_qubit("Transmon")
self.create_qubit_and_plot_choice_widgets()
qubit_and_plot_choice_display, plot_display = self.create_GUI()
display(qubit_and_plot_choice_display, plot_display)
# Initialization Methods -------------------------------------------------------------------------------------------------
def initialize_qubit(self, qubit_name: str) -> None:
"""Initializes self.active_qubit to the user's choice
using the chosen qubit's default parameters.
Parameters
----------
qubit_name:
"""
QubitClass = getattr(scq, qubit_name)
if self.qubit_change:
init_params = QubitClass.default_params()
if qubit_name == "ZeroPi" or qubit_name == "FullZeroPi":
init_params["grid"] = scq.Grid1d(
min_val=self.grid_defaults["grid_min_val"],
max_val=self.grid_defaults["grid_max_val"],
pt_count=self.grid_defaults["grid_pt_count"],
)
self.qubit_current_params = init_params
self.qubit_change = False
self.active_qubit = QubitClass(**self.qubit_current_params)
def set_qubit(self, qubit_name: str) -> None:
"""Sets up the chosen qubit to be the active qubit
and updates the defaults and widgets accordingly.
Parameters
----------
qubit_name:
"""
self.active_defaults = self.qubit_defaults[qubit_name]
self.initialize_qubit(qubit_name)
self.create_params_dict()
self.create_plot_settings_widgets()
self.create_qubit_params_widgets()
def get_operators(self) -> List[str]:
"""Returns a list of operators for the active_qubit.
Note that this list omits any operators that start with "_".
Returns
-------
List[ str ]
"""
operator_list = []
for name, val in inspect.getmembers(self.active_qubit):
if "operator" in name and name[0] != "_":
operator_list.append(name)
return operator_list
# Widget EventHandler Methods -------------------------------------------------------------------------------------------------
def scan_dropdown_eventhandler(self, change):
self.gui_active = False
self.qubit_params_widgets[change.old].disabled = False
self.qubit_params_widgets[change.new].disabled = True
self.qubit_plot_options_widgets["scan_range_slider"].min = self.active_defaults[
change.new
]["min"]
self.qubit_plot_options_widgets["scan_range_slider"].max = self.active_defaults[
change.new
]["max"]
self.qubit_plot_options_widgets["scan_range_slider"].value = [
self.active_defaults[change.new]["min"],
self.active_defaults[change.new]["max"],
]
self.gui_active = True
self.qubit_plot_options_widgets[
"scan_range_slider"
].description = "{} range".format(change.new)
def qubit_buttons_eventhandler(self, change):
self.qubit_change = True
def save_button_clicked_action(self, *args):
self.fig.savefig(self.qubit_plot_options_widgets["filename_text"].value)
# Methods for qubit_plot_interactive -------------------------------------------------------------------------------------------------
def update_qubit_params(self, **params):
self.qubit_current_params.update(params)
self.active_qubit.set_params(**self.qubit_current_params)
def update_grid_qubit_params(self, **params):
grid_min, grid_max = params["grid"]
updated_grid = scq.Grid1d(
min_val=grid_min,
max_val=grid_max,
pt_count=self.grid_defaults["grid_pt_count"],
)
params.update({"grid": updated_grid})
self.qubit_current_params.update(params)
del params["grid"]
params["grid_min_val"] = grid_min
params["grid_max_val"] = grid_max
params["grid_pt_count"] = self.grid_defaults["grid_pt_count"]
self.active_qubit.set_params(**params)
def evals_vs_paramvals_plot(
self,
scan_value: str,
scan_range: Tuple[float, float],
eigenvalue_state_value: int,
subtract_ground_tf: bool,
**params: Union[Tuple[float, float], float, int]
) -> None:
"""This is the method associated with qubit_plot_interactive that allows for
us to interact with plot_evals_vs_paramvals().
Parameters
----------
scan_value:
Current value of the scan parameter dropdown.
scan_range:
Sets the interval [ min, max ] through
which plot_evals_vs_paramvals() will plot over.
eigenvalue_state_value:
The number of states/eigenvalues that will be plotted.
subtract_ground_tf:
Determines whether we subtract away the ground energy or not.
Initially set to False.
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
if not self.gui_active:
return None
scan_min, scan_max = scan_range
self.update_qubit_params(**params)
np_list = np.linspace(scan_min, scan_max, self.active_defaults["num_sample"])
self.fig, _ = self.active_qubit.plot_evals_vs_paramvals(
scan_value,
np_list,
evals_count=eigenvalue_state_value,
subtract_ground=subtract_ground_tf,
)
def grid_evals_vs_paramvals_plot(
self,
scan_value: str,
scan_range: Tuple[float, float],
eigenvalue_state_value: int,
subtract_ground_tf: bool,
**params: Union[Tuple[float, float], float, int]
) -> None:
"""This is the method associated with qubit_plot_interactive that allows for
us to interact with plot_evals_vs_paramvals(). Namely, this method is for the
qubits that require a grid option.
Parameters
----------
scan_value:
Current value of the scan parameter dropdown.
scan_range:
Sets the interval [ min, max ] through
which plot_evals_vs_paramvals() will plot over.
eigenvalue_state_value:
The number of states/eigenvalues that will be plotted.
subtract_ground_tf:
Determines whether we subtract away the ground energy or not.
Initially set to False.
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
if not self.gui_active:
return None
self.update_grid_qubit_params(**params)
scan_min, scan_max = scan_range
np_list = np.linspace(scan_min, scan_max, self.active_defaults["num_sample"])
self.fig, _ = self.active_qubit.plot_evals_vs_paramvals(
scan_value,
np_list,
evals_count=eigenvalue_state_value,
subtract_ground=subtract_ground_tf,
)
def matelem_vs_paramvals_plot(
self,
operator_value: str,
scan_value: str,
scan_range: Tuple[float, float],
matrix_element_state_value: int,
mode_value: str,
**params: Union[Tuple[float, float], float, int]
) -> None:
"""This is the method associated with qubit_plot_interactive that allows for us to interact with plot_matelem_vs_paramvals().
Parameters
----------
operator_value:
Current value of the operator dropdown.
scan_value:
Current value of the scan parameter dropdown.
scan_range:
Sets the interval [ min, max ] through
which plot_matelem_vs_paramvals() will plot over.
matrix_element_state_value:
The number of states/elements that will be shown.
mode_value:
Current value of the mode (e.g. real, imaginary, etc.)
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
if not self.gui_active:
return None
scan_min, scan_max = scan_range
self.update_qubit_params(**params)
np_list = np.linspace(scan_min, scan_max, self.active_defaults["num_sample"])
self.fig, _ = self.active_qubit.plot_matelem_vs_paramvals(
operator_value,
scan_value,
np_list,
select_elems=matrix_element_state_value,
mode=mode_value,
)
def grid_matelem_vs_paramvals_plot(
self,
operator_value: str,
scan_value: str,
scan_range: Tuple[float, float],
matrix_element_state_value: int,
mode_value: str,
**params: Union[Tuple[float, float], float, int]
) -> None:
"""This is the method associated with qubit_plot_interactive that allows for us to interact with plot_matelem_vs_paramvals().
Namely, this method is for the qubits that require a grid option.
Parameters
----------
operator_value:
Current value of the operator dropdown.
scan_value:
Current value of the scan parameter dropdown.
scan_range:
Sets the interval [ min, max ] through
which plot_matelem_vs_paramvals() will plot over.
matrix_element_state_value:
The number of states/elements that will be shown.
mode_value:
Current value of the mode (e.g. real, imaginary, etc.)
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
if not self.gui_active:
return None
self.update_grid_qubit_params(**params)
scan_min, scan_max = scan_range
np_list = np.linspace(scan_min, scan_max, self.active_defaults["num_sample"])
self.fig, _ = self.active_qubit.plot_matelem_vs_paramvals(
operator_value,
scan_value,
np_list,
select_elems=matrix_element_state_value,
mode=mode_value,
)
def scaled_wavefunction_plot(
self,
eigenvalue_states: Union[List[int], int],
mode_value: str,
manual_scale_tf: bool,
scale_value: float,
**params: Union[Tuple[float, float], float, int]
) -> None:
"""This is the method associated with qubit_plot_interactive that allows for us to interact with plot_wavefunction().
Namely, this method is for the qubits that have an option for scaling the wavefunction amplitudes.
Parameters
----------
eigenvalue_states:
The number of states to be plotted
mode_value:
Current value of the mode (e.g. real, imaginary, etc.)
manual_scale_tf:
scale_value:
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
if manual_scale_tf:
self.qubit_plot_options_widgets[
"wavefunction_scale_slider"
].disabled = False
else:
self.qubit_plot_options_widgets["wavefunction_scale_slider"].disabled = True
scale_value = None
self.update_qubit_params(**params)
self.fig, _ = self.active_qubit.plot_wavefunction(
which=eigenvalue_states, mode=mode_value, scaling=scale_value
)
def wavefunction_plot(
self,
eigenvalue_states: Union[List[int], int],
mode_value: str,
**params: Union[Tuple[float, float], float, int]
) -> None:
"""This is the method associated with qubit_plot_interactive that allows for us to interact with plot_wavefunction().
Parameters
----------
eigenvalue_states:
The number of states to be plotted
mode_value:
Current value of the mode (e.g. real, imaginary, etc.)
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
self.update_qubit_params(**params)
self.fig, _ = self.active_qubit.plot_wavefunction(
which=eigenvalue_states, mode=mode_value
)
def grid_wavefunction_plot(
self,
eigenvalue_states: Union[List[int], int],
mode_value: str,
**params: Union[Tuple[float, float], float, int]
) -> None:
"""This is the method associated with qubit_plot_interactive that allows for us to interact with plot_wavefunction().
Namely, this method is for the qubits that require a grid option.
Parameters
----------
eigenvalue_states:
The number of states to be plotted
mode_value:
Current value of the mode (e.g. real, imaginary, etc.)
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
self.update_grid_qubit_params(**params)
self.fig, _ = self.active_qubit.plot_wavefunction(
which=eigenvalue_states, mode=mode_value
)
def matrixelements_plot(
self,
operator_value: str,
eigenvalue_state_value: int,
mode_value: str,
show_numbers_tf: bool,
show3d_tf: bool,
**params: Union[Tuple[float, float], float, int]
):
"""This is the method associated with qubit_plot_interactive that allows for us to interact with plot_matrixelements().
Parameters
----------
operator_value:
Current value of the operator dropdown.
eigenvalue_state_value:
The number of states/eigenvalues that will be plotted
mode_value:
Current value of the mode (e.g. real, imaginary, etc.)
show_numbers_tf:
Determines whether the numerical values will be shown in the 2D plot.
Initially set to False.
show3d_tf:
Determines whether a 3D version of the 2D plot will be shown.
Initially set to True.
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
self.update_qubit_params(**params)
self.fig, _ = self.active_qubit.plot_matrixelements(
operator_value,
evals_count=eigenvalue_state_value,
mode=mode_value,
show_numbers=show_numbers_tf,
show3d=show3d_tf,
)
def grid_matrixelements_plot(
self,
operator_value: str,
eigenvalue_state_value: int,
mode_value: str,
show_numbers_tf: bool,
show3d_tf: bool,
**params: Union[Tuple[float, float], float, int]
):
"""This is the method associated with qubit_plot_interactive that allows for us to interact with plot_matrixelements().
Namely, this method is for the qubits that require a grid option.
Parameters
----------
operator_value:
Current value of the operator dropdown.
eigenvalue_state_value:
The number of states/eigenvalues that will be plotted
mode_value:
Current value of the mode (e.g. real, imaginary, etc.)
show_numbers_tf:
Determines whether the numerical values will be shown in the 2D plot.
Initially set to False.
show3d_tf:
Determines whether a 3D version of the 2D plot will be shown.
Initially set to True.
**params:
Dictionary of current qubit parameter values (taken from the sliders)
"""
self.update_grid_qubit_params(**params)
self.fig, _ = self.active_qubit.plot_matrixelements(
operator_value,
evals_count=eigenvalue_state_value,
mode=mode_value,
show_numbers=show_numbers_tf,
show3d=show3d_tf,
)
# Methods for create_GUI -------------------------------------------------------------------------------------------------
def display_qubit_info(self, qubit_info: bool) -> None:
"""Displays the image that corresponds to the current qubit.
Parameters
----------
qubit_info: bool
"""
if qubit_info:
image_box = widgets.Box(layout=Layout(justify_content="center"))
image_box.children = [
self.qubit_plot_options_widgets["qubit_info_image_widget"]
]
display(image_box)
def energy_scan_interactive(self) -> widgets.interactive:
"""Returns an interactive for the evals_vs_paramvals
Returns
-------
widgets.interactive
"""
self.qubit_params_widgets[
self.qubit_plot_options_widgets["scan_dropdown"].value
].disabled = True
if isinstance(self.active_qubit, (scq.ZeroPi, scq.FullZeroPi)):
interactive_choice = self.grid_evals_vs_paramvals_plot
else:
interactive_choice = self.evals_vs_paramvals_plot
qubit_plot_interactive = widgets.interactive(
interactive_choice,
scan_value=self.qubit_plot_options_widgets["scan_dropdown"],
scan_range=self.qubit_plot_options_widgets["scan_range_slider"],
subtract_ground_tf=self.qubit_plot_options_widgets[
"subtract_ground_checkbox"
],
eigenvalue_state_value=self.qubit_plot_options_widgets[
"eigenvalue_state_slider"
],
**self.qubit_params_widgets
)
return qubit_plot_interactive
def matelem_scan_interactive(self) -> widgets.interactive:
"""Returns an interactive for the matelem_vs_paramvals plot
Returns
-------
widgets.interactive
"""
self.qubit_plot_options_widgets["mode_dropdown"].value = self.active_defaults[
"mode_matrixelem"
]
self.qubit_params_widgets[
self.qubit_plot_options_widgets["scan_dropdown"].value
].disabled = True
if isinstance(self.active_qubit, (scq.ZeroPi, scq.FullZeroPi)):
interactive_choice = self.grid_matelem_vs_paramvals_plot
else:
interactive_choice = self.matelem_vs_paramvals_plot
qubit_plot_interactive = widgets.interactive(
interactive_choice,
operator_value=self.qubit_plot_options_widgets["operator_dropdown"],
scan_value=self.qubit_plot_options_widgets["scan_dropdown"],
scan_range=self.qubit_plot_options_widgets["scan_range_slider"],
matrix_element_state_value=self.qubit_plot_options_widgets[
"matrix_element_state_slider"
],
mode_value=self.qubit_plot_options_widgets["mode_dropdown"],
**self.qubit_params_widgets
)
return qubit_plot_interactive
def wavefunction_interactive(self) -> widgets.interactive:
"""Returns an interactive for the wavefunction plot
Returns
-------
widgets.interactive
"""
if isinstance(self.active_qubit, scq.FullZeroPi):
qubit_plot_interactive = None
else:
self.qubit_plot_options_widgets[
"mode_dropdown"
].value = self.active_defaults["mode_wavefunc"]
self.qubit_params_widgets[
self.qubit_plot_options_widgets["scan_dropdown"].value
].disabled = False
if (
isinstance(self.active_qubit, scq.FluxQubit)
or isinstance(self.active_qubit, scq.ZeroPi)
or isinstance(self.active_qubit, scq.Cos2PhiQubit)
):
which_widget = self.qubit_plot_options_widgets[
"wavefunction_single_state_selector"
]
else:
which_widget = self.qubit_plot_options_widgets[
"wavefunction_multi_state_selector"
]
if isinstance(self.active_qubit, scq.ZeroPi):
interactive_choice = self.grid_wavefunction_plot
elif isinstance(self.active_qubit, (scq.FluxQubit, scq.Cos2PhiQubit)):
interactive_choice = self.wavefunction_plot
else:
interactive_choice = self.scaled_wavefunction_plot
if interactive_choice == self.scaled_wavefunction_plot:
qubit_plot_interactive = widgets.interactive(
interactive_choice,
eigenvalue_states=which_widget,
mode_value=self.qubit_plot_options_widgets["mode_dropdown"],
manual_scale_tf=self.qubit_plot_options_widgets[
"manual_scale_checkbox"
],
scale_value=self.qubit_plot_options_widgets[
"wavefunction_scale_slider"
],
**self.qubit_params_widgets
)
else:
qubit_plot_interactive = widgets.interactive(
interactive_choice,
eigenvalue_states=which_widget,
mode_value=self.qubit_plot_options_widgets["mode_dropdown"],
**self.qubit_params_widgets
)
return qubit_plot_interactive
def matelem_interactive(self) -> widgets.interactive:
"""Returns an interactive for the matrix elements plot.
Returns
-------
widgets.interactive
"""
self.qubit_plot_options_widgets["mode_dropdown"].value = self.active_defaults[
"mode_matrixelem"
]
self.qubit_params_widgets[
self.qubit_plot_options_widgets["scan_dropdown"].value
].disabled = False
if isinstance(self.active_qubit, (scq.ZeroPi, scq.FullZeroPi)):
interactive_choice = self.grid_matrixelements_plot
else:
interactive_choice = self.matrixelements_plot
qubit_plot_interactive = widgets.interactive(
interactive_choice,
operator_value=self.qubit_plot_options_widgets["operator_dropdown"],
eigenvalue_state_value=self.qubit_plot_options_widgets[
"eigenvalue_state_slider"
],
mode_value=self.qubit_plot_options_widgets["mode_dropdown"],
show_numbers_tf=self.qubit_plot_options_widgets["show_numbers_checkbox"],
show3d_tf=self.qubit_plot_options_widgets["show3d_checkbox"],
**self.qubit_params_widgets
)
return qubit_plot_interactive
def qubit_plot(self, qubit_value: str, qubit_info: bool, plot_value: str) -> None:
"""Sets up and displays qubit_plot_interactive.
Parameters
----------
qubit_value:
Current qubit chosen.
qubit_info:
Decides whether or not the image corresponding
to the qubit is shown.
plot_value:
Current plot option chosen
"""
if qubit_value in self.slow_qubits:
scq.settings.PROGRESSBAR_DISABLED = False
else:
scq.settings.PROGRESSBAR_DISABLED = True
self.set_qubit(qubit_value)
self.display_qubit_info(qubit_info)
qubit_plot_interactive = self.create_qubit_plot_interactive(plot_value)
self.display_qubit_plot_interactive(qubit_plot_interactive)
def display_qubit_plot_interactive(
self, qubit_plot_interactive: widgets.interactive
) -> None:
"""Organizes the output for qubit_plot_interactive and displays it.
Parameters
----------
qubit_plot_interactive:
"""
if qubit_plot_interactive is None:
display("FullZeroPi currently does not have Wavefunctions implemented.")
return None
output = qubit_plot_interactive.children[-1]
output.layout = Layout(align_items="center")
widget_columns = self.create_plot_option_columns(qubit_plot_interactive)
qubit_plot_interactive.children = (
widgets.HBox(widget_columns, layout=Layout(margin="2px"), box_style="info"),
widgets.HBox(
[
self.qubit_plot_options_widgets["save_button"],
self.qubit_plot_options_widgets["filename_text"],
],
layout=Layout(margin="2px", justify_content="flex-end"),
),
output,
)
display(qubit_plot_interactive)
# Create Methods -------------------------------------------------------------------------------------------------
def create_params_dict(self) -> None:
"""Initializes qubit_base_params and qubit_scan_params.
Note that qubit_scan_params will be used to create the
dropdown options.
"""
self.qubit_base_params.clear()
self.qubit_scan_params.clear()
self.qubit_base_params = dict(self.qubit_current_params)
if isinstance(self.active_qubit, (scq.ZeroPi, scq.FullZeroPi)):
self.qubit_base_params["grid"] = None
if "truncated_dim" in self.qubit_base_params.keys():
del self.qubit_base_params["truncated_dim"]
for param_name, param_val in self.qubit_base_params.items():
if "cut" in param_name or "grid" in param_name:
pass
else:
self.qubit_scan_params[param_name] = param_val
def create_plot_settings_widgets(self):
"""Creates all the widgets that will be used for general plotting options."""
self.qubit_plot_options_widgets = {}
std_layout = Layout(width="300px")
operator_dropdown_list = self.get_operators()
scan_dropdown_list = self.qubit_scan_params.keys()
mode_dropdown_list = [
("Re(·)", "real"),
("Im(·)", "imag"),
("|·|", "abs"),
(u"|\u00B7|\u00B2", "abs_sqr"),
]
file = open(self.active_qubit._image_filename, "rb")
image = file.read()
self.qubit_plot_options_widgets = {
"qubit_info_image_widget": widgets.Image(
value=image, format="jpg", layout=Layout(width="700px")
),
"save_button": widgets.Button(
icon="save", layout=widgets.Layout(width="35px")
),
"filename_text": widgets.Text(
value=str(Path.cwd().joinpath("plot.pdf")),
description="",
disabled=False,
layout=Layout(width="500px"),
),
"scan_dropdown": widgets.Dropdown(
options=scan_dropdown_list,
value=self.active_defaults["scan_param"],
description="Scan over",
disabled=False,
layout=std_layout,
),
"mode_dropdown": widgets.Dropdown(
options=mode_dropdown_list,
description="Plot as:",
disabled=False,
layout=std_layout,
),
"operator_dropdown": widgets.Dropdown(
options=operator_dropdown_list,
value=self.active_defaults["operator"],
description="Operator",
disabled=False,
layout=std_layout,
),
"scan_range_slider": widgets.FloatRangeSlider(
min=self.active_defaults[self.active_defaults["scan_param"]]["min"],
max=self.active_defaults[self.active_defaults["scan_param"]]["max"],
value=[
self.active_defaults[self.active_defaults["scan_param"]]["min"],
self.active_defaults[self.active_defaults["scan_param"]]["max"],
],
step=0.05,
description="{} range".format(self.active_defaults["scan_param"]),
continuous_update=False,
layout=std_layout,
),
"eigenvalue_state_slider": widgets.IntSlider(
min=1,
max=10,
value=7,
description="Highest state",
continuous_update=False,
layout=std_layout,
),
"matrix_element_state_slider": widgets.IntSlider(
min=1,
max=6,
value=4,
description="Highest state",
continuous_update=False,
layout=std_layout,
),
"wavefunction_single_state_selector": widgets.IntSlider(
min=0,
max=10,
value=0,
description="State no.",
continuous_update=False,
layout=std_layout,
),
"wavefunction_scale_slider": widgets.FloatSlider(
min=0.1,
max=4,
value=self.active_defaults["scale"],
description="\u03c8 ampl.",
continuous_update=False,
layout=std_layout,
),
"wavefunction_multi_state_selector": widgets.SelectMultiple(
options=range(0, 10),
value=[0, 1, 2, 3, 4],
description="States",
disabled=False,
continuous_update=False,
layout=std_layout,
),
"show_numbers_checkbox": widgets.Checkbox(
value=False, description="Show values", disabled=False
),
"show3d_checkbox": widgets.Checkbox(
value=True, description="Show 3D", disabled=False
),
"subtract_ground_checkbox": widgets.Checkbox(
value=False, description="Subtract E\u2080", disabled=False
),
"manual_scale_checkbox": widgets.Checkbox(
value=False, description="Manual Scaling", disabled=False
),
}
self.qubit_plot_options_widgets["save_button"].on_click(
self.save_button_clicked_action
)
self.qubit_plot_options_widgets["scan_dropdown"].observe(
self.scan_dropdown_eventhandler, names="value"
)
def create_qubit_params_widgets(self):
"""Creates all the widgets that will be used
for changing the parameter values for the specified qubit.
"""
# We need to clear qubit_params_widgets since the previous widgets from the
# old qubit will still be initialized otherwise.
self.qubit_params_widgets.clear()
for param_name, param_val in self.qubit_base_params.items():
if param_name == "grid":
grid_min = self.qubit_current_params["grid"].min_val
grid_max = self.qubit_current_params["grid"].max_val
self.qubit_params_widgets[param_name] = widgets.FloatRangeSlider(
min=-12 * np.pi,
max=12 * np.pi,
value=[grid_min, grid_max],
step=0.05,
description="Grid range",
continuous_update=False,
layout=Layout(width="300px"),
)
elif isinstance(param_val, int):
kwargs = (
self.active_defaults.get(param_name) or self.active_defaults["int"]
)
self.qubit_params_widgets[param_name] = widgets.IntSlider(
**kwargs,
value=param_val,
description="{}:".format(param_name),
continuous_update=False,
layout=Layout(width="300px")
)
else:
kwargs = (
self.active_defaults.get(param_name)
or self.active_defaults["float"]
)
self.qubit_params_widgets[param_name] = widgets.FloatSlider(
**kwargs,
value=param_val,
step=0.01,
description="{}:".format(param_name),
continuous_update=False,
layout=Layout(width="300px")
)
def create_qubit_and_plot_choice_widgets(self):
"""Creates all the widgets that controls
which qubit or plot the user can choose from.
"""
self.qubit_and_plot_choice_widgets = {
"qubit_buttons": widgets.ToggleButtons(
options=self.supported_qubits,
description="Qubits:",
layout=widgets.Layout(width="800px"),
),
"plot_buttons": widgets.ToggleButtons(
options=self.plot_choices,
description="Plot:",
button_style="info",
),
"show_qubitinfo_checkbox": widgets.Checkbox(
value=False, description="qubit info", disabled=False
),
}
self.qubit_and_plot_choice_widgets["qubit_buttons"].observe(
self.qubit_buttons_eventhandler, names="value"
)
def create_plot_option_columns(
self, qubit_plot_interactive: widgets.interactive
) -> List[widgets.VBox]:
"""Organizes the widgets in qubit_plot_interactive into columns.
The first column will always contain the widgets that correspond to
plotting options, whereas the remaining columns will contain the
widgets that control the qubit parameters.
Parameters
----------
qubit_plot_interactive:
Returns
-------
List[ widgets.VBox ]
"""
widgets_per_column = 7
base_index = (len(qubit_plot_interactive.children) - 1) - len(
self.qubit_base_params
)
initial_index = base_index
end_index = base_index + widgets_per_column
widget_list = [VBox([*qubit_plot_interactive.children[0:base_index]])]
while end_index < len(qubit_plot_interactive.children):
widget_list.append(
VBox([*qubit_plot_interactive.children[initial_index:end_index]])
)
initial_index += widgets_per_column
end_index += widgets_per_column
widget_list.append(VBox([*qubit_plot_interactive.children[initial_index:-1]]))
return widget_list
def create_qubit_plot_interactive(self, plot_value: str) -> widgets.interactive:
"""Creates the qubit_plot_interactive that corresponds to the
selected qubit and plot option.
Parameters
----------
plot_value:
Current plot option chosen (e.g. Energy Spectrum)
Returns
-------
widgets.interactive
"""
if plot_value == "Energy spectrum":
return self.energy_scan_interactive()
elif plot_value == "Matrix element scan":
return self.matelem_scan_interactive()
elif plot_value == "Wavefunctions":
return self.wavefunction_interactive()
elif plot_value == "Matrix elements":
return self.matelem_interactive()
def create_GUI(self) -> Tuple[widgets.VBox, widgets.interactive_output]:
"""Creates the two main components of the GUI: the qubit and plot option
buttons and the interactive_output that connects the buttons with
the main qubit plot.
Returns
-------
Tuple[ widgets.VBox, widgets.interactive_output ]
"""
qubit_choice_hbox = widgets.HBox(
[
self.qubit_and_plot_choice_widgets["qubit_buttons"],
self.qubit_and_plot_choice_widgets["show_qubitinfo_checkbox"],
]
)
plot_choice_hbox = widgets.HBox(
[self.qubit_and_plot_choice_widgets["plot_buttons"]]
)
qubit_and_plot_choice_widgets = widgets.VBox(
[qubit_choice_hbox, plot_choice_hbox]
)
qubit_and_plot_choice_interactive = widgets.interactive_output(
self.qubit_plot,
{
"qubit_value": self.qubit_and_plot_choice_widgets["qubit_buttons"],
"qubit_info": self.qubit_and_plot_choice_widgets[
"show_qubitinfo_checkbox"
],
"plot_value": self.qubit_and_plot_choice_widgets["plot_buttons"],
},
)
qubit_and_plot_choice_interactive.layout.width = "975px"
return qubit_and_plot_choice_widgets, qubit_and_plot_choice_interactive
| [
"ipywidgets.widgets.ToggleButtons",
"ipywidgets.widgets.Checkbox",
"ipywidgets.widgets.HBox",
"ipywidgets.widgets.interactive_output",
"pathlib.Path.cwd",
"ipywidgets.widgets.FloatSlider",
"IPython.display.display",
"scqubits.utils.misc.Required",
"scqubits.Grid1d",
"ipywidgets.widgets.Dropdown",
... | [((1052, 1116), 'scqubits.utils.misc.Required', 'utils.Required', ([], {'ipywidgets': '_HAS_IPYWIDGETS', 'IPython': '_HAS_IPYTHON'}), '(ipywidgets=_HAS_IPYWIDGETS, IPython=_HAS_IPYTHON)\n', (1066, 1116), True, 'import scqubits.utils.misc as utils\n'), ((6504, 6556), 'IPython.display.display', 'display', (['qubit_and_plot_choice_display', 'plot_display'], {}), '(qubit_and_plot_choice_display, plot_display)\n', (6511, 6556), False, 'from IPython.display import display\n'), ((8304, 8341), 'inspect.getmembers', 'inspect.getmembers', (['self.active_qubit'], {}), '(self.active_qubit)\n', (8322, 8341), False, 'import inspect\n'), ((10047, 10144), 'scqubits.Grid1d', 'scq.Grid1d', ([], {'min_val': 'grid_min', 'max_val': 'grid_max', 'pt_count': "self.grid_defaults['grid_pt_count']"}), "(min_val=grid_min, max_val=grid_max, pt_count=self.grid_defaults[\n 'grid_pt_count'])\n", (10057, 10144), True, 'import scqubits as scq\n'), ((11665, 11732), 'numpy.linspace', 'np.linspace', (['scan_min', 'scan_max', "self.active_defaults['num_sample']"], {}), "(scan_min, scan_max, self.active_defaults['num_sample'])\n", (11676, 11732), True, 'import numpy as np\n'), ((13188, 13255), 'numpy.linspace', 'np.linspace', (['scan_min', 'scan_max', "self.active_defaults['num_sample']"], {}), "(scan_min, scan_max, self.active_defaults['num_sample'])\n", (13199, 13255), True, 'import numpy as np\n'), ((14674, 14741), 'numpy.linspace', 'np.linspace', (['scan_min', 'scan_max', "self.active_defaults['num_sample']"], {}), "(scan_min, scan_max, self.active_defaults['num_sample'])\n", (14685, 14741), True, 'import numpy as np\n'), ((16260, 16327), 'numpy.linspace', 'np.linspace', (['scan_min', 'scan_max', "self.active_defaults['num_sample']"], {}), "(scan_min, scan_max, self.active_defaults['num_sample'])\n", (16271, 16327), True, 'import numpy as np\n'), ((23522, 23903), 'ipywidgets.widgets.interactive', 'widgets.interactive', (['interactive_choice'], {'scan_value': "self.qubit_plot_options_widgets['scan_dropdown']", 'scan_range': "self.qubit_plot_options_widgets['scan_range_slider']", 'subtract_ground_tf': "self.qubit_plot_options_widgets['subtract_ground_checkbox']", 'eigenvalue_state_value': "self.qubit_plot_options_widgets['eigenvalue_state_slider']"}), "(interactive_choice, scan_value=self.\n qubit_plot_options_widgets['scan_dropdown'], scan_range=self.\n qubit_plot_options_widgets['scan_range_slider'], subtract_ground_tf=\n self.qubit_plot_options_widgets['subtract_ground_checkbox'],\n eigenvalue_state_value=self.qubit_plot_options_widgets[\n 'eigenvalue_state_slider'], **self.qubit_params_widgets)\n", (23541, 23903), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((24775, 25219), 'ipywidgets.widgets.interactive', 'widgets.interactive', (['interactive_choice'], {'operator_value': "self.qubit_plot_options_widgets['operator_dropdown']", 'scan_value': "self.qubit_plot_options_widgets['scan_dropdown']", 'scan_range': "self.qubit_plot_options_widgets['scan_range_slider']", 'matrix_element_state_value': "self.qubit_plot_options_widgets['matrix_element_state_slider']", 'mode_value': "self.qubit_plot_options_widgets['mode_dropdown']"}), "(interactive_choice, operator_value=self.\n qubit_plot_options_widgets['operator_dropdown'], scan_value=self.\n qubit_plot_options_widgets['scan_dropdown'], scan_range=self.\n qubit_plot_options_widgets['scan_range_slider'],\n matrix_element_state_value=self.qubit_plot_options_widgets[\n 'matrix_element_state_slider'], mode_value=self.\n qubit_plot_options_widgets['mode_dropdown'], **self.qubit_params_widgets)\n", (24794, 25219), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((28514, 28956), 'ipywidgets.widgets.interactive', 'widgets.interactive', (['interactive_choice'], {'operator_value': "self.qubit_plot_options_widgets['operator_dropdown']", 'eigenvalue_state_value': "self.qubit_plot_options_widgets['eigenvalue_state_slider']", 'mode_value': "self.qubit_plot_options_widgets['mode_dropdown']", 'show_numbers_tf': "self.qubit_plot_options_widgets['show_numbers_checkbox']", 'show3d_tf': "self.qubit_plot_options_widgets['show3d_checkbox']"}), "(interactive_choice, operator_value=self.\n qubit_plot_options_widgets['operator_dropdown'], eigenvalue_state_value\n =self.qubit_plot_options_widgets['eigenvalue_state_slider'], mode_value\n =self.qubit_plot_options_widgets['mode_dropdown'], show_numbers_tf=self\n .qubit_plot_options_widgets['show_numbers_checkbox'], show3d_tf=self.\n qubit_plot_options_widgets['show3d_checkbox'], **self.qubit_params_widgets)\n", (28533, 28956), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((30417, 30445), 'ipywidgets.Layout', 'Layout', ([], {'align_items': '"""center"""'}), "(align_items='center')\n", (30423, 30445), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((30987, 31018), 'IPython.display.display', 'display', (['qubit_plot_interactive'], {}), '(qubit_plot_interactive)\n', (30994, 31018), False, 'from IPython.display import display\n'), ((32157, 32178), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""300px"""'}), "(width='300px')\n", (32163, 32178), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((42066, 42201), 'ipywidgets.widgets.HBox', 'widgets.HBox', (["[self.qubit_and_plot_choice_widgets['qubit_buttons'], self.\n qubit_and_plot_choice_widgets['show_qubitinfo_checkbox']]"], {}), "([self.qubit_and_plot_choice_widgets['qubit_buttons'], self.\n qubit_and_plot_choice_widgets['show_qubitinfo_checkbox']])\n", (42078, 42201), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((42293, 42359), 'ipywidgets.widgets.HBox', 'widgets.HBox', (["[self.qubit_and_plot_choice_widgets['plot_buttons']]"], {}), "([self.qubit_and_plot_choice_widgets['plot_buttons']])\n", (42305, 42359), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((42423, 42474), 'ipywidgets.widgets.VBox', 'widgets.VBox', (['[qubit_choice_hbox, plot_choice_hbox]'], {}), '([qubit_choice_hbox, plot_choice_hbox])\n', (42435, 42474), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((42542, 42812), 'ipywidgets.widgets.interactive_output', 'widgets.interactive_output', (['self.qubit_plot', "{'qubit_value': self.qubit_and_plot_choice_widgets['qubit_buttons'],\n 'qubit_info': self.qubit_and_plot_choice_widgets[\n 'show_qubitinfo_checkbox'], 'plot_value': self.\n qubit_and_plot_choice_widgets['plot_buttons']}"], {}), "(self.qubit_plot, {'qubit_value': self.\n qubit_and_plot_choice_widgets['qubit_buttons'], 'qubit_info': self.\n qubit_and_plot_choice_widgets['show_qubitinfo_checkbox'], 'plot_value':\n self.qubit_and_plot_choice_widgets['plot_buttons']})\n", (42568, 42812), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((22928, 22946), 'IPython.display.display', 'display', (['image_box'], {}), '(image_box)\n', (22935, 22946), False, 'from IPython.display import display\n'), ((30242, 30314), 'IPython.display.display', 'display', (['"""FullZeroPi currently does not have Wavefunctions implemented."""'], {}), "('FullZeroPi currently does not have Wavefunctions implemented.')\n", (30249, 30314), False, 'from IPython.display import display\n'), ((33123, 33274), 'ipywidgets.widgets.Dropdown', 'widgets.Dropdown', ([], {'options': 'scan_dropdown_list', 'value': "self.active_defaults['scan_param']", 'description': '"""Scan over"""', 'disabled': '(False)', 'layout': 'std_layout'}), "(options=scan_dropdown_list, value=self.active_defaults[\n 'scan_param'], description='Scan over', disabled=False, layout=std_layout)\n", (33139, 33274), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((33395, 33502), 'ipywidgets.widgets.Dropdown', 'widgets.Dropdown', ([], {'options': 'mode_dropdown_list', 'description': '"""Plot as:"""', 'disabled': '(False)', 'layout': 'std_layout'}), "(options=mode_dropdown_list, description='Plot as:',\n disabled=False, layout=std_layout)\n", (33411, 33502), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((33612, 33764), 'ipywidgets.widgets.Dropdown', 'widgets.Dropdown', ([], {'options': 'operator_dropdown_list', 'value': "self.active_defaults['operator']", 'description': '"""Operator"""', 'disabled': '(False)', 'layout': 'std_layout'}), "(options=operator_dropdown_list, value=self.active_defaults\n ['operator'], description='Operator', disabled=False, layout=std_layout)\n", (33628, 33764), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((34538, 34656), 'ipywidgets.widgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(1)', 'max': '(10)', 'value': '(7)', 'description': '"""Highest state"""', 'continuous_update': '(False)', 'layout': 'std_layout'}), "(min=1, max=10, value=7, description='Highest state',\n continuous_update=False, layout=std_layout)\n", (34555, 34656), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((34808, 34925), 'ipywidgets.widgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(1)', 'max': '(6)', 'value': '(4)', 'description': '"""Highest state"""', 'continuous_update': '(False)', 'layout': 'std_layout'}), "(min=1, max=6, value=4, description='Highest state',\n continuous_update=False, layout=std_layout)\n", (34825, 34925), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((35084, 35198), 'ipywidgets.widgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': '(10)', 'value': '(0)', 'description': '"""State no."""', 'continuous_update': '(False)', 'layout': 'std_layout'}), "(min=0, max=10, value=0, description='State no.',\n continuous_update=False, layout=std_layout)\n", (35101, 35198), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((35348, 35491), 'ipywidgets.widgets.FloatSlider', 'widgets.FloatSlider', ([], {'min': '(0.1)', 'max': '(4)', 'value': "self.active_defaults['scale']", 'description': '"""ψ ampl."""', 'continuous_update': '(False)', 'layout': 'std_layout'}), "(min=0.1, max=4, value=self.active_defaults['scale'],\n description='ψ ampl.', continuous_update=False, layout=std_layout)\n", (35367, 35491), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((35953, 36025), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Show values"""', 'disabled': '(False)'}), "(value=False, description='Show values', disabled=False)\n", (35969, 36025), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((36088, 36155), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(True)', 'description': '"""Show 3D"""', 'disabled': '(False)'}), "(value=True, description='Show 3D', disabled=False)\n", (36104, 36155), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((36227, 36299), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Subtract E₀"""', 'disabled': '(False)'}), "(value=False, description='Subtract E₀', disabled=False)\n", (36243, 36299), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((36373, 36448), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Manual Scaling"""', 'disabled': '(False)'}), "(value=False, description='Manual Scaling', disabled=False)\n", (36389, 36448), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((39199, 39293), 'ipywidgets.widgets.ToggleButtons', 'widgets.ToggleButtons', ([], {'options': 'self.plot_choices', 'description': '"""Plot:"""', 'button_style': '"""info"""'}), "(options=self.plot_choices, description='Plot:',\n button_style='info')\n", (39220, 39293), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((39393, 39464), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""qubit info"""', 'disabled': '(False)'}), "(value=False, description='qubit info', disabled=False)\n", (39409, 39464), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((40443, 40497), 'ipywidgets.VBox', 'VBox', (['[*qubit_plot_interactive.children[0:base_index]]'], {}), '([*qubit_plot_interactive.children[0:base_index]])\n', (40447, 40497), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((40811, 40869), 'ipywidgets.VBox', 'VBox', (['[*qubit_plot_interactive.children[initial_index:-1]]'], {}), '([*qubit_plot_interactive.children[initial_index:-1]])\n', (40815, 40869), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((7166, 7320), 'scqubits.Grid1d', 'scq.Grid1d', ([], {'min_val': "self.grid_defaults['grid_min_val']", 'max_val': "self.grid_defaults['grid_max_val']", 'pt_count': "self.grid_defaults['grid_pt_count']"}), "(min_val=self.grid_defaults['grid_min_val'], max_val=self.\n grid_defaults['grid_max_val'], pt_count=self.grid_defaults['grid_pt_count']\n )\n", (7176, 7320), True, 'import scqubits as scq\n'), ((26922, 27249), 'ipywidgets.widgets.interactive', 'widgets.interactive', (['interactive_choice'], {'eigenvalue_states': 'which_widget', 'mode_value': "self.qubit_plot_options_widgets['mode_dropdown']", 'manual_scale_tf': "self.qubit_plot_options_widgets['manual_scale_checkbox']", 'scale_value': "self.qubit_plot_options_widgets['wavefunction_scale_slider']"}), "(interactive_choice, eigenvalue_states=which_widget,\n mode_value=self.qubit_plot_options_widgets['mode_dropdown'],\n manual_scale_tf=self.qubit_plot_options_widgets['manual_scale_checkbox'\n ], scale_value=self.qubit_plot_options_widgets[\n 'wavefunction_scale_slider'], **self.qubit_params_widgets)\n", (26941, 27249), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((27521, 27691), 'ipywidgets.widgets.interactive', 'widgets.interactive', (['interactive_choice'], {'eigenvalue_states': 'which_widget', 'mode_value': "self.qubit_plot_options_widgets['mode_dropdown']"}), "(interactive_choice, eigenvalue_states=which_widget,\n mode_value=self.qubit_plot_options_widgets['mode_dropdown'], **self.\n qubit_params_widgets)\n", (27540, 27691), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((40612, 40677), 'ipywidgets.VBox', 'VBox', (['[*qubit_plot_interactive.children[initial_index:end_index]]'], {}), '([*qubit_plot_interactive.children[initial_index:end_index]])\n', (40616, 40677), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((22758, 22790), 'ipywidgets.Layout', 'Layout', ([], {'justify_content': '"""center"""'}), "(justify_content='center')\n", (22764, 22790), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((30619, 30639), 'ipywidgets.Layout', 'Layout', ([], {'margin': '"""2px"""'}), "(margin='2px')\n", (30625, 30639), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((30884, 30932), 'ipywidgets.Layout', 'Layout', ([], {'margin': '"""2px"""', 'justify_content': '"""flex-end"""'}), "(margin='2px', justify_content='flex-end')\n", (30890, 30932), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((32706, 32727), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""700px"""'}), "(width='700px')\n", (32712, 32727), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((32822, 32850), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""35px"""'}), "(width='35px')\n", (32836, 32850), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((33056, 33077), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""500px"""'}), "(width='500px')\n", (33062, 33077), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((39125, 39154), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""800px"""'}), "(width='800px')\n", (39139, 39154), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((37700, 37721), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""300px"""'}), "(width='300px')\n", (37706, 37721), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((38191, 38212), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""300px"""'}), "(width='300px')\n", (38197, 38212), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((38709, 38730), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""300px"""'}), "(width='300px')\n", (38715, 38730), False, 'from ipywidgets import AppLayout, HBox, IntSlider, Label, Layout, Text, VBox, interactive, widgets\n'), ((32935, 32945), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (32943, 32945), False, 'from pathlib import Path\n')] |
import tensorflow as tf
import tensorflow_datasets as tfds
from cvnn import layers
import numpy as np
import timeit
import datetime
from pdb import set_trace
import os
import plotly.graph_objects as go
import plotly
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
try:
PLOTLY = True
except ModuleNotFoundError:
PLOTLY = False
PLOTLY_CONFIG = {
'scrollZoom': True,
'editable': True
}
def cast_to_complex(image, label):
return tf.cast(image, tf.complex64), label
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# ds_train = ds_train.cache()
# # ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
# ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
# ds_test = ds_test.cache()
# ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def tensorflow_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform', train_bias=True):
tf.random.set_seed(24)
# https://www.tensorflow.org/datasets/keras_example
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1), dtype=np.float32),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1, dtype=np.float32, use_bias=train_bias),
tf.keras.layers.Dense(10, activation='softmax', kernel_initializer=init2, dtype=np.float32, use_bias=train_bias)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def own_complex_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.complex64),
layers.ComplexDense(128, activation='cart_relu', dtype=np.complex64, kernel_initializer=init1,
use_bias=False, init_technique='zero_imag'),
layers.ComplexDense(10, activation='cast_to_real', dtype=np.complex64, kernel_initializer=init2,
use_bias=False, init_technique='zero_imag'),
tf.keras.layers.Activation('softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
# ds_train = ds_train.map(cast_to_complex)
# ds_test = ds_test.map(cast_to_complex)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def own_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1),
layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32, kernel_initializer=init2)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def test_mnist():
assert not tf.test.gpu_device_name(), "Using GPU not good for debugging"
ds_train, ds_test = get_dataset()
# Don't use bias becase complex model gets a complex bias with imag not zero.
keras_hist, keras_time, keras_logs = keras_fit(ds_train, ds_test, train_bias=False)
keras_weigths = keras_logs['weights']
own_cvnn_hist, own_cvnn_time, own_cvnn_logs = own_complex_fit(ds_train, ds_test)
own_cvnn_weigths = own_cvnn_logs['weights']
assert np.all([np.all(k_w == o_w) for k_w, o_w in zip(keras_weigths, own_cvnn_weigths[::2])])
assert np.all([np.all(o_w == 0) for o_w in own_cvnn_weigths[1::2]])
assert own_cvnn_logs['loss'] == keras_logs['loss']
assert np.allclose(own_cvnn_logs['gradients'][2], keras_logs['gradients'][1])
# for k, o in zip(keras_hist.history.values(), own_cvnn_hist.history.values()):
# assert np.allclose(k, o), f"\n{keras_hist.history}\n !=\n{own_cvnn_hist.history}"
# DO AGAIN TO USE BIAS
keras_hist, keras_time, keras_logs = keras_fit(ds_train, ds_test)
keras_weigths = keras_logs['weights']
own_hist, own_time, own_logs = own_fit(ds_train, ds_test)
own_weigths = own_logs['weights']
assert [np.all(k_w == o_w) for k_w, o_w in zip(keras_weigths, own_weigths)]
assert keras_hist.history == own_hist.history, f"\n{keras_hist.history}\n !=\n{own_hist.history}"
assert own_logs['loss'] == keras_logs['loss']
# for k, k2, o in zip(keras_hist.history.values(), keras2_hist.history.values(), own_hist.history.values()):
# if np.all(np.array(k) == np.array(k2)):
# assert np.all(np.array(k) == np.array(o)), f"\n{keras_hist.history}\n !=\n{own_hist.history}"
if __name__ == "__main__":
# from importlib import reload
# import os
# import tensorflow
# reload(tensorflow)
# test_mnist()
# test_mnist_montecarlo()
ds_train, ds_test = get_dataset()
tensorflow_fit(ds_train, ds_test, train_bias=False)
own_fit(ds_train, ds_test)
| [
"tensorflow.random.set_seed",
"cvnn.layers.ComplexDense",
"tensorflow_datasets.load",
"tensorflow.keras.layers.Dense",
"timeit.default_timer",
"numpy.allclose",
"tensorflow.cast",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"cvnn.layers.ComplexFlatten",
"tensorflow.... | [((675, 779), 'tensorflow_datasets.load', 'tfds.load', (['"""mnist"""'], {'split': "['train', 'test']", 'shuffle_files': '(False)', 'as_supervised': '(True)', 'with_info': '(True)'}), "('mnist', split=['train', 'test'], shuffle_files=False,\n as_supervised=True, with_info=True)\n", (684, 779), True, 'import tensorflow_datasets as tfds\n'), ((1501, 1523), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(24)'], {}), '(24)\n', (1519, 1523), True, 'import tensorflow as tf\n'), ((2542, 2564), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2562, 2564), False, 'import timeit\n'), ((2715, 2737), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2735, 2737), False, 'import timeit\n'), ((2885, 2907), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(24)'], {}), '(24)\n', (2903, 2907), True, 'import tensorflow as tf\n'), ((4119, 4141), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4139, 4141), False, 'import timeit\n'), ((4292, 4314), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4312, 4314), False, 'import timeit\n'), ((4454, 4476), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(24)'], {}), '(24)\n', (4472, 4476), True, 'import tensorflow as tf\n'), ((5416, 5438), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5436, 5438), False, 'import timeit\n'), ((5589, 5611), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5609, 5611), False, 'import timeit\n'), ((6367, 6437), 'numpy.allclose', 'np.allclose', (["own_cvnn_logs['gradients'][2]", "keras_logs['gradients'][1]"], {}), "(own_cvnn_logs['gradients'][2], keras_logs['gradients'][1])\n", (6378, 6437), True, 'import numpy as np\n'), ((445, 473), 'tensorflow.cast', 'tf.cast', (['image', 'tf.complex64'], {}), '(image, tf.complex64)\n', (452, 473), True, 'import tensorflow as tf\n'), ((2136, 2153), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2151, 2153), True, 'import tensorflow as tf\n'), ((3721, 3738), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3736, 3738), True, 'import tensorflow as tf\n'), ((5010, 5027), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5025, 5027), True, 'import tensorflow as tf\n'), ((5686, 5711), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (5709, 5711), True, 'import tensorflow as tf\n'), ((6865, 6883), 'numpy.all', 'np.all', (['(k_w == o_w)'], {}), '(k_w == o_w)\n', (6871, 6883), True, 'import numpy as np\n'), ((578, 604), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (585, 604), True, 'import tensorflow as tf\n'), ((1627, 1693), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28, 1)', 'dtype': 'np.float32'}), '(input_shape=(28, 28, 1), dtype=np.float32)\n', (1650, 1693), True, 'import tensorflow as tf\n'), ((1701, 1815), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""', 'kernel_initializer': 'init1', 'dtype': 'np.float32', 'use_bias': 'train_bias'}), "(128, activation='relu', kernel_initializer=init1,\n dtype=np.float32, use_bias=train_bias)\n", (1722, 1815), True, 'import tensorflow as tf\n'), ((1819, 1935), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'kernel_initializer': 'init2', 'dtype': 'np.float32', 'use_bias': 'train_bias'}), "(10, activation='softmax', kernel_initializer=init2,\n dtype=np.float32, use_bias=train_bias)\n", (1840, 1935), True, 'import tensorflow as tf\n'), ((2024, 2055), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (2048, 2055), True, 'import tensorflow as tf\n'), ((2957, 3023), 'cvnn.layers.ComplexFlatten', 'layers.ComplexFlatten', ([], {'input_shape': '(28, 28, 1)', 'dtype': 'np.complex64'}), '(input_shape=(28, 28, 1), dtype=np.complex64)\n', (2978, 3023), False, 'from cvnn import layers\n'), ((3033, 3175), 'cvnn.layers.ComplexDense', 'layers.ComplexDense', (['(128)'], {'activation': '"""cart_relu"""', 'dtype': 'np.complex64', 'kernel_initializer': 'init1', 'use_bias': '(False)', 'init_technique': '"""zero_imag"""'}), "(128, activation='cart_relu', dtype=np.complex64,\n kernel_initializer=init1, use_bias=False, init_technique='zero_imag')\n", (3052, 3175), False, 'from cvnn import layers\n'), ((3209, 3353), 'cvnn.layers.ComplexDense', 'layers.ComplexDense', (['(10)'], {'activation': '"""cast_to_real"""', 'dtype': 'np.complex64', 'kernel_initializer': 'init2', 'use_bias': '(False)', 'init_technique': '"""zero_imag"""'}), "(10, activation='cast_to_real', dtype=np.complex64,\n kernel_initializer=init2, use_bias=False, init_technique='zero_imag')\n", (3228, 3353), False, 'from cvnn import layers\n'), ((3387, 3424), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {}), "('softmax')\n", (3413, 3424), True, 'import tensorflow as tf\n'), ((3517, 3548), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (3541, 3548), True, 'import tensorflow as tf\n'), ((4526, 4590), 'cvnn.layers.ComplexFlatten', 'layers.ComplexFlatten', ([], {'input_shape': '(28, 28, 1)', 'dtype': 'np.float32'}), '(input_shape=(28, 28, 1), dtype=np.float32)\n', (4547, 4590), False, 'from cvnn import layers\n'), ((4600, 4696), 'cvnn.layers.ComplexDense', 'layers.ComplexDense', (['(128)'], {'activation': '"""cart_relu"""', 'dtype': 'np.float32', 'kernel_initializer': 'init1'}), "(128, activation='cart_relu', dtype=np.float32,\n kernel_initializer=init1)\n", (4619, 4696), False, 'from cvnn import layers\n'), ((4702, 4810), 'cvnn.layers.ComplexDense', 'layers.ComplexDense', (['(10)'], {'activation': '"""softmax_real_with_abs"""', 'dtype': 'np.float32', 'kernel_initializer': 'init2'}), "(10, activation='softmax_real_with_abs', dtype=np.\n float32, kernel_initializer=init2)\n", (4721, 4810), False, 'from cvnn import layers\n'), ((4898, 4929), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (4922, 4929), True, 'import tensorflow as tf\n'), ((6150, 6168), 'numpy.all', 'np.all', (['(k_w == o_w)'], {}), '(k_w == o_w)\n', (6156, 6168), True, 'import numpy as np\n'), ((6248, 6264), 'numpy.all', 'np.all', (['(o_w == 0)'], {}), '(o_w == 0)\n', (6254, 6264), True, 'import numpy as np\n')] |
# %%
from nltk.lm import MLE
from nltk.util import ngrams
from nltk.lm.preprocessing import pad_both_ends, padded_everygram_pipeline
from sklearn.model_selection import KFold
from sklearn import metrics
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import seaborn as sns
import simplejson as json
import math
sns.set()
# %%
def split_chars(arr):
return [c for c in arr]
# %%
event_mapping = {
'SessionStarted': 0.0,
'Task1Started': 0.1,
'Task1Ended': 0.2,
'Task2Started': 0.3,
'Task2Ended': 0.4,
'Task3Started': 0.5,
'Task3Ended': 0.6,
'Task4Started': 0.7,
'Task4Ended': 0.8,
'LookUp': 1.0,
'LookRight': 1.1,
'LookDown': 1.2,
'LookLeft': 1.3,
'TurnRight': 2.0,
'TurnLeft': 2.1,
'MoveForward': 3.0,
'MoveBackward': 3.1,
'LiftUp': 4.0,
'LiftDown': 4.1,
'ArmRetract': 5.0,
'ArmExtend': 5.1,
'WristIn': 6.0,
'WristOut': 6.1,
'GripperClose': 7.0,
'GripperOpen': 7.1,
'ModeChange': -1.0,
'SetCameraView': -1.1,
'SpeedChange': -1.2
}
with open('data/mapping.json', 'r') as f:
char_mapping = json.load(f)
mode_change_char = char_mapping[str(event_mapping['ModeChange'])]
with open('data/grouped.json', 'r') as f:
events_grouped = json.load(f)
with open('data/simplified_all.txt', 'r') as f:
events_all = f.read().replace(mode_change_char, "")
# Ngram hyperparameters
min_n = 2
max_n = 20
# Confidence Threshold hyperparameters
min_ct = 0
max_ct = 1
ct_step = 0.05
# %%
# Each task has a different number of completions
# Task 1: 16
# Task 2: 10
# Task 3: 9
# Task 4: 7
ngram_results = []
for task in events_grouped:
# Get data for this task
event_list = []
for user in events_grouped[task]:
for completion in events_grouped[task][user]:
event_list.append(list(filter(lambda a: a != mode_change_char, completion)))
# Set hyperparameters
for n in range(min_n, max_n):
for confidence_threshold in np.arange(min_ct, max_ct, ct_step):
# Run cross validation with teh given hyperparameters
k = 5
kf = KFold(n_splits = k, shuffle=True) # 5 fold cross validation is used so that the test/train split is 20%
total_accuracy = 0
total_precision = 0
total_recall = 0
total_f1 = 0
total_threshold_accuracy = 0
total_grams = 0
total_above_threshold = 0
total_threshold_correct = 0
for i, split in enumerate(kf.split(event_list)):
event_list = np.array(event_list)
train_data = event_list[split[0]]
test_data = event_list[split[1]]
train_processced, train_vocab = padded_everygram_pipeline(n, train_data)
# Create and train model
model = MLE(n)
model.fit(train_processced, train_vocab)
# Test model
test_processced, test_vocab = padded_everygram_pipeline(n, test_data)
test_vocab_unique = set(list(test_vocab))
total_checked = 0
total_correct = 0
threshold_checked = 0
threshold_correct = 0
y_true = []
y_pred = []
for everygram in test_processced:
# I think there is one everygram generated per input seqence
for gram in everygram:
# We only want grams of length n
if len(gram) == n:
history = list(gram[:n-1])
answer = gram[-1]
max_probability = -1
max_gram = "failed"
for g in test_vocab_unique:
s = model.score(g, history)
#print(gram, g, history, s)
if s > max_probability:
max_probability = s
max_gram = g
total_checked += 1
if max_probability >= confidence_threshold:
threshold_checked += 1
if max_gram == answer:
total_correct += 1
if max_probability >= confidence_threshold:
threshold_correct += 1
y_true += [answer]
y_pred += [max_gram]
#print(gram, max_gram, max_probability)
total_metrics = metrics.classification_report(y_true, y_pred, output_dict=True)
total_precision += total_metrics['weighted avg']['precision']
total_recall += total_metrics['weighted avg']['recall']
total_f1 += total_metrics['weighted avg']['f1-score']
total_grams += total_checked
total_above_threshold += threshold_checked
total_accuracy += total_correct / total_checked
total_threshold_accuracy += threshold_correct / threshold_checked
total_threshold_correct += threshold_correct
ngram_results.append([task, n, confidence_threshold, total_accuracy/k, total_precision/k, total_recall/k, total_f1/k, total_threshold_accuracy/k, total_grams/k, total_above_threshold/k, total_threshold_correct/k])
# %%
df = pd.DataFrame(ngram_results, columns=['task_number', 'n', 'confidence_threshold', 'total_accuracy', 'total_precision', 'total_recall', 'total_f1-score', 'total_threshold_accuracy', 'total_grams', 'total_above_threshold', 'total_threshold_correct'])
#df['normalized_prediction_accuracy'] = (df['total_threshold_correct'] / df['total_above_threshold']) * df['total_above_threshold']
df.to_csv('data/ngram_results.csv', index=False)
df.head(100)
#%%
df = pd.read_csv("data/ngram_results.csv")
df['total_threshold_incorrect'] = df['total_above_threshold'] - df['total_threshold_correct']
df['normalized_accuracy'] = df['total_threshold_correct'] / df['total_threshold_incorrect']
#df['fixed_accuracy'] = (df['total_above_threshold'] * df['total_threshold_accuracy']) / df['total_above_threshold']
#df['normalized_predicted_accuracy'] = (df['fixed_accuracy'] * df['total_above_threshold'])
df.to_csv('data/ngram_results.csv', index=False)
# %%
df['percent_predictions_total'] = df['total_above_threshold'] / df['total_grams']
r = 4
c = 4
fig, big_axes = plt.subplots(nrows=r, ncols=1, figsize=(30, 25))
plt.subplots_adjust(hspace=0.4)
for row, big_ax in enumerate(big_axes, start=1):
big_ax.set_title(f"Task {row}", fontsize=16, y=1.08)
# Turn off axis lines and ticks of the big subplot
# obs alpha is 0 in RGBA string!
big_ax.tick_params(labelcolor=(1.,1.,1., 0.0), top='off', bottom='off', left='off', right='off')
# removes the white frame
big_ax._frameon = False
for i in range(0, 4):
filtered_data = df[df.task_number == i+1].round({'confidence_threshold': 2})
n_confidence_accuracy = filtered_data.pivot(index='n', columns='confidence_threshold', values='total_threshold_accuracy')
n_confidence_removed = filtered_data.pivot(index='n', columns='confidence_threshold', values='total_above_threshold')
log_norm = LogNorm(vmin=n_confidence_removed.min().min(), vmax=n_confidence_removed.max().max())
cbar_ticks = [math.pow(10, i) for i in range(math.floor(math.log10(n_confidence_removed.min().min())), 1+math.ceil(math.log10(n_confidence_removed.max().max())))]
g = sns.heatmap(n_confidence_accuracy, ax=fig.add_subplot(r,c,i*c+1))
g.set_title('N & Confidence Threshold vs. Accuracy')
g.set_xticklabels(g.get_xticklabels(), rotation=45)
g.set_ylabel('Ngram Size')
g.set_xlabel('Confidence Threshold')
h = sns.heatmap(n_confidence_removed, ax=fig.add_subplot(r,c,i*c+2), norm=log_norm, cbar_kws={"ticks": cbar_ticks})
h.set_title('N & Confidence Threshold vs. # of Predictions Made')
h.set_xticklabels(h.get_xticklabels(), rotation=45)
h.set_ylabel('Ngram Size')
h.set_xlabel('Confidence Threshold')
j = sns.scatterplot(data=filtered_data, x='total_threshold_accuracy', y='total_threshold_correct', hue='n', ax=fig.add_subplot(r,c,i*c+3))
j.set_title('Accuracy vs. # of Correct Predictions Made')
j.set_xlabel('Accuracy')
j.set_ylabel('# of Correct Predictions Made')
k = sns.scatterplot(data=filtered_data, x='total_threshold_accuracy', y='percent_predictions_total', hue='n', ax=fig.add_subplot(r,c,i*c+4))
k.set_title(f'Accuracy vs. % of Predictions Made')
k.set_xlabel('Accuracy')
k.set_ylabel('% of Predictions Made')
plt.show()
fig.savefig("data/ngram_results.png")
# %%
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"math.pow",
"pandas.read_csv",
"nltk.lm.preprocessing.padded_everygram_pipeline",
"nltk.lm.MLE",
"matplotlib.pyplot.subplots",
"sklearn.model_selection.KFold",
"simplejson.load",
"sklearn.metrics.classification_report",
"numpy.arange",
"numpy.array... | [((375, 384), 'seaborn.set', 'sns.set', ([], {}), '()\n', (382, 384), True, 'import seaborn as sns\n'), ((5692, 5951), 'pandas.DataFrame', 'pd.DataFrame', (['ngram_results'], {'columns': "['task_number', 'n', 'confidence_threshold', 'total_accuracy',\n 'total_precision', 'total_recall', 'total_f1-score',\n 'total_threshold_accuracy', 'total_grams', 'total_above_threshold',\n 'total_threshold_correct']"}), "(ngram_results, columns=['task_number', 'n',\n 'confidence_threshold', 'total_accuracy', 'total_precision',\n 'total_recall', 'total_f1-score', 'total_threshold_accuracy',\n 'total_grams', 'total_above_threshold', 'total_threshold_correct'])\n", (5704, 5951), True, 'import pandas as pd\n'), ((6144, 6181), 'pandas.read_csv', 'pd.read_csv', (['"""data/ngram_results.csv"""'], {}), "('data/ngram_results.csv')\n", (6155, 6181), True, 'import pandas as pd\n'), ((6743, 6791), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'r', 'ncols': '(1)', 'figsize': '(30, 25)'}), '(nrows=r, ncols=1, figsize=(30, 25))\n', (6755, 6791), True, 'import matplotlib.pyplot as plt\n'), ((6792, 6823), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.4)'}), '(hspace=0.4)\n', (6811, 6823), True, 'import matplotlib.pyplot as plt\n'), ((8954, 8964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8962, 8964), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1185), 'simplejson.load', 'json.load', (['f'], {}), '(f)\n', (1182, 1185), True, 'import simplejson as json\n'), ((1317, 1329), 'simplejson.load', 'json.load', (['f'], {}), '(f)\n', (1326, 1329), True, 'import simplejson as json\n'), ((2043, 2077), 'numpy.arange', 'np.arange', (['min_ct', 'max_ct', 'ct_step'], {}), '(min_ct, max_ct, ct_step)\n', (2052, 2077), True, 'import numpy as np\n'), ((7657, 7672), 'math.pow', 'math.pow', (['(10)', 'i'], {}), '(10, i)\n', (7665, 7672), False, 'import math\n'), ((2180, 2211), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'k', 'shuffle': '(True)'}), '(n_splits=k, shuffle=True)\n', (2185, 2211), False, 'from sklearn.model_selection import KFold\n'), ((2641, 2661), 'numpy.array', 'np.array', (['event_list'], {}), '(event_list)\n', (2649, 2661), True, 'import numpy as np\n'), ((2811, 2851), 'nltk.lm.preprocessing.padded_everygram_pipeline', 'padded_everygram_pipeline', (['n', 'train_data'], {}), '(n, train_data)\n', (2836, 2851), False, 'from nltk.lm.preprocessing import pad_both_ends, padded_everygram_pipeline\n'), ((2918, 2924), 'nltk.lm.MLE', 'MLE', (['n'], {}), '(n)\n', (2921, 2924), False, 'from nltk.lm import MLE\n'), ((3058, 3097), 'nltk.lm.preprocessing.padded_everygram_pipeline', 'padded_everygram_pipeline', (['n', 'test_data'], {}), '(n, test_data)\n', (3083, 3097), False, 'from nltk.lm.preprocessing import pad_both_ends, padded_everygram_pipeline\n'), ((4857, 4920), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_true', 'y_pred'], {'output_dict': '(True)'}), '(y_true, y_pred, output_dict=True)\n', (4886, 4920), False, 'from sklearn import metrics\n')] |
# -*- coding: utf-8 -*-
from numpy import array
def comp_height(self):
"""Compute the height of the Magnet.
Caution, the bottom of the Magnet is an Arc
Parameters
----------
self : Magnet
A Magnet object
Returns
-------
Htot: float
Height of the Magnet [m]
"""
surf = self.build_geometry()
# Numerical computation
point_list = surf[0].discretize(200)
point_list = array(point_list)
abs_list = abs(point_list)
return max(abs_list) - min(abs_list)
| [
"numpy.array"
] | [((440, 457), 'numpy.array', 'array', (['point_list'], {}), '(point_list)\n', (445, 457), False, 'from numpy import array\n')] |
"""
製作者:TODA
学習時に逐次行うテストクラスの定義
"""
import wave
import chainer
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from vrc_project.world_and_wave import wave2world, world2wave
from world_and_wave import fft
class TestModel(chainer.training.Extension):
"""
テストを行うExtention
"""
def __init__(self, _trainer, _args, data, _sp_input_length, only_score):
"""
変数の初期化と事前処理
Parameters
----------
_trainer: chainer.training.trainer
評価用トレーナ
_drec: str
ファイル出力ディレクトリパス
_source: np.ndarray
変換元音声
range: [-1.0, 1.0]
dtype: float
_label_spec: np.ndarray
目標話者パラレルテストデータのパワースペクトラム
Noneならば比較を省略
_voice_profile: dict of int
f0に関するデータ
_sp_input_length: int
モデルの入力データ長
only_score: str
画像・音声を出力するか否か
"""
self.model_name = _args["version"]
mpl.rcParams["agg.path.chunksize"] = 100000
self.dir = _args["wave_otp_dir"]
self.model_en = _trainer.updater.gen_ab
self.target = data[1]
source_f0, source_sp, source_ap = wave2world(data[0].astype(np.float64))
_, self.source_sp_l, _ = wave2world(data[1].astype(np.float64))
self.image_power_l = fft(data[1])
self.source_ap = source_ap
self.length = source_f0.shape[0]
padding_size = abs(_sp_input_length - source_sp.shape[0] % _sp_input_length)
ch = source_sp.shape[1]
source_sp = np.pad(source_sp, ((padding_size, 0), (0, 0)), "edge").reshape(-1, _sp_input_length, ch)
source_sp = source_sp.astype(np.float32).reshape(-1, _sp_input_length, ch, 1)
self.bs_sp = source_sp.shape[0]
r = int(2 ** np.ceil(np.log2(source_sp.shape[0]))) - source_sp.shape[0]
source_sp = np.pad(source_sp, ((0, r), (0, 0), (0, 0), (0, 0)), "constant")
source_ap = np.pad(source_ap, ((padding_size, 0), (0, 0)), "edge").reshape(-1, _sp_input_length, 1025)
source_ap = np.transpose(source_ap, [0, 2, 1]).astype(np.float32).reshape(-1, 1025, _sp_input_length, 1)
padding_size = abs(_sp_input_length - self.source_sp_l.shape[0] % _sp_input_length)
self.source_sp_l = np.pad(self.source_sp_l, ((padding_size, 0), (0, 0)), "edge").reshape(-1, _sp_input_length, ch)
self.source_sp_l = self.source_sp_l.astype(np.float32).reshape(-1, ch)
self.source_pp = chainer.backends.cuda.to_gpu(source_sp)
self.source_f0 = (source_f0 - data[2]["pre_sub"]) * np.sign(source_f0) * data[2]["pitch_rate"] + data[2]["post_add"] * np.sign(source_f0)
self.wave_len = data[0].shape[0]
self.only_score = only_score
super(TestModel, self).initialize(_trainer)
def convert(self):
"""
変換用関数
Returns
-------
otp: np.ndarray
変換後の音声波形データ
"""
chainer.using_config("train", False)
result = self.model_en(self.source_pp)
result = chainer.backends.cuda.to_cpu(result.data)
result = result[:self.bs_sp]
ch = result.shape[2]
result = result.reshape(-1, ch)
score_m = np.mean((result - self.source_sp_l)**2)
result_wave = world2wave(self.source_f0, result[-self.length:], self.source_ap)
otp = result_wave.reshape(-1)
head_cut_num = otp.shape[0]-self.wave_len
if head_cut_num > 0:
otp = otp[head_cut_num:]
chainer.using_config("train", True)
return otp, score_m, result
def __call__(self, _trainer):
"""
評価関数
やっていること
- パワースペクトラムの比較
- 音声/画像の保存
Parameters
----------
_trainer: chainer.training.trainer
テストに使用するトレーナー
"""
# testing
out_put, score_raw, _ = self.convert()
chainer.report({"env_test_loss": score_raw})
if self.only_score is not None:
out_puts = (out_put*32767).astype(np.int16)
image_power_spec = fft(out_put)
# calculating power spectrum
image_power_spec = fft(out_put)
n = min(image_power_spec.shape[0], self.image_power_l.shape[0])
score_fft = np.mean((image_power_spec[:n]-self.image_power_l[:n]) ** 2)
chainer.report({"test_loss": score_fft})
#saving fake power-spec image
figure = plt.figure(figsize=(8, 5))
gs = mpl.gridspec.GridSpec(nrows=5, ncols=2)
plt.subplots_adjust(hspace=0)
figure.add_subplot(gs[:3, :])
plt.subplots_adjust(top=0.95)
plt.title(self.model_name, pad=0.2)
_insert_image = np.transpose(image_power_spec, (1, 0))
plt.tick_params(labelbottom=False)
plt.imshow(_insert_image, vmin=-1.0, vmax=1.0, aspect="auto")
ax = figure.add_subplot(gs[3:4, :])
plt.tick_params(labeltop=False, labelbottom=False)
plt.margins(x=0)
plt.ylim(-1, 1)
ax.grid(which="major", axis="x", color="blue", alpha=0.8, linestyle="--", linewidth=1)
_t = out_put.shape[0] / 44100
_x = np.linspace(0, _t, out_put.shape[0])
plt.plot(_x, out_put)
figure.add_subplot(gs[4:, :])
plt.plot(np.abs(np.mean(image_power_spec, axis=0)-np.mean(self.image_power_l, axis=0)))
plt.plot(np.abs(np.std(image_power_spec, axis=0)-np.std(self.image_power_l, axis=0)))
plt.tick_params(labelbottom=False)
table = plt.table(cellText=[["iteraiton", "fft_diff", "spenv_diff"], ["%d (%s)" % (_trainer.updater.iteration, self.only_score), "%f" % score_fft, "%f" % score_raw]])
table.auto_set_font_size(False)
table.set_fontsize(8)
plt.savefig("%s%05d.png" % (self.dir, _trainer.updater.iteration))
plt.savefig("./latest.png")
#saving fake waves
path_save = self.dir + str(self.model_name)+"_"+ str(_trainer.updater.iteration).zfill(5)
voiced = out_puts.astype(np.int16)
wave_data = wave.open(path_save + ".wav", 'wb')
wave_data.setnchannels(1)
wave_data.setsampwidth(2)
wave_data.setframerate(44100)
wave_data.writeframes(voiced.reshape(-1).tobytes())
wave_data.close()
wave_data = wave.open("latest.wav", 'wb')
wave_data.setnchannels(1)
wave_data.setsampwidth(2)
wave_data.setframerate(44100)
wave_data.writeframes(voiced.reshape(-1).tobytes())
wave_data.close()
plt.clf()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"vrc_project.world_and_wave.world2wave",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.tick_params",
"numpy.pad",
"numpy.std",
"matplotlib.pyplot.imshow",
"numpy.transpose",
"world_and_wave.fft",
... | [((1334, 1346), 'world_and_wave.fft', 'fft', (['data[1]'], {}), '(data[1])\n', (1337, 1346), False, 'from world_and_wave import fft\n'), ((1875, 1938), 'numpy.pad', 'np.pad', (['source_sp', '((0, r), (0, 0), (0, 0), (0, 0))', '"""constant"""'], {}), "(source_sp, ((0, r), (0, 0), (0, 0), (0, 0)), 'constant')\n", (1881, 1938), True, 'import numpy as np\n'), ((2482, 2521), 'chainer.backends.cuda.to_gpu', 'chainer.backends.cuda.to_gpu', (['source_sp'], {}), '(source_sp)\n', (2510, 2521), False, 'import chainer\n'), ((2947, 2983), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (2967, 2983), False, 'import chainer\n'), ((3048, 3089), 'chainer.backends.cuda.to_cpu', 'chainer.backends.cuda.to_cpu', (['result.data'], {}), '(result.data)\n', (3076, 3089), False, 'import chainer\n'), ((3214, 3255), 'numpy.mean', 'np.mean', (['((result - self.source_sp_l) ** 2)'], {}), '((result - self.source_sp_l) ** 2)\n', (3221, 3255), True, 'import numpy as np\n'), ((3276, 3341), 'vrc_project.world_and_wave.world2wave', 'world2wave', (['self.source_f0', 'result[-self.length:]', 'self.source_ap'], {}), '(self.source_f0, result[-self.length:], self.source_ap)\n', (3286, 3341), False, 'from vrc_project.world_and_wave import wave2world, world2wave\n'), ((3504, 3539), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(True)'], {}), "('train', True)\n", (3524, 3539), False, 'import chainer\n'), ((3885, 3929), 'chainer.report', 'chainer.report', (["{'env_test_loss': score_raw}"], {}), "({'env_test_loss': score_raw})\n", (3899, 3929), False, 'import chainer\n'), ((4057, 4069), 'world_and_wave.fft', 'fft', (['out_put'], {}), '(out_put)\n', (4060, 4069), False, 'from world_and_wave import fft\n'), ((4142, 4154), 'world_and_wave.fft', 'fft', (['out_put'], {}), '(out_put)\n', (4145, 4154), False, 'from world_and_wave import fft\n'), ((4255, 4316), 'numpy.mean', 'np.mean', (['((image_power_spec[:n] - self.image_power_l[:n]) ** 2)'], {}), '((image_power_spec[:n] - self.image_power_l[:n]) ** 2)\n', (4262, 4316), True, 'import numpy as np\n'), ((4327, 4367), 'chainer.report', 'chainer.report', (["{'test_loss': score_fft}"], {}), "({'test_loss': score_fft})\n", (4341, 4367), False, 'import chainer\n'), ((4431, 4457), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (4441, 4457), True, 'import matplotlib.pyplot as plt\n'), ((4475, 4514), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', ([], {'nrows': '(5)', 'ncols': '(2)'}), '(nrows=5, ncols=2)\n', (4496, 4514), True, 'import matplotlib as mpl\n'), ((4527, 4556), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (4546, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4640), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)'}), '(top=0.95)\n', (4630, 4640), True, 'import matplotlib.pyplot as plt\n'), ((4653, 4688), 'matplotlib.pyplot.title', 'plt.title', (['self.model_name'], {'pad': '(0.2)'}), '(self.model_name, pad=0.2)\n', (4662, 4688), True, 'import matplotlib.pyplot as plt\n'), ((4717, 4755), 'numpy.transpose', 'np.transpose', (['image_power_spec', '(1, 0)'], {}), '(image_power_spec, (1, 0))\n', (4729, 4755), True, 'import numpy as np\n'), ((4768, 4802), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '(False)'}), '(labelbottom=False)\n', (4783, 4802), True, 'import matplotlib.pyplot as plt\n'), ((4815, 4876), 'matplotlib.pyplot.imshow', 'plt.imshow', (['_insert_image'], {'vmin': '(-1.0)', 'vmax': '(1.0)', 'aspect': '"""auto"""'}), "(_insert_image, vmin=-1.0, vmax=1.0, aspect='auto')\n", (4825, 4876), True, 'import matplotlib.pyplot as plt\n'), ((4937, 4987), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labeltop': '(False)', 'labelbottom': '(False)'}), '(labeltop=False, labelbottom=False)\n', (4952, 4987), True, 'import matplotlib.pyplot as plt\n'), ((5000, 5016), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)'}), '(x=0)\n', (5011, 5016), True, 'import matplotlib.pyplot as plt\n'), ((5029, 5044), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (5037, 5044), True, 'import matplotlib.pyplot as plt\n'), ((5203, 5239), 'numpy.linspace', 'np.linspace', (['(0)', '_t', 'out_put.shape[0]'], {}), '(0, _t, out_put.shape[0])\n', (5214, 5239), True, 'import numpy as np\n'), ((5252, 5273), 'matplotlib.pyplot.plot', 'plt.plot', (['_x', 'out_put'], {}), '(_x, out_put)\n', (5260, 5273), True, 'import matplotlib.pyplot as plt\n'), ((5526, 5560), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '(False)'}), '(labelbottom=False)\n', (5541, 5560), True, 'import matplotlib.pyplot as plt\n'), ((5581, 5748), 'matplotlib.pyplot.table', 'plt.table', ([], {'cellText': "[['iteraiton', 'fft_diff', 'spenv_diff'], ['%d (%s)' % (_trainer.updater.\n iteration, self.only_score), '%f' % score_fft, '%f' % score_raw]]"}), "(cellText=[['iteraiton', 'fft_diff', 'spenv_diff'], ['%d (%s)' % (\n _trainer.updater.iteration, self.only_score), '%f' % score_fft, '%f' %\n score_raw]])\n", (5590, 5748), True, 'import matplotlib.pyplot as plt\n'), ((5830, 5896), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s%05d.png' % (self.dir, _trainer.updater.iteration))"], {}), "('%s%05d.png' % (self.dir, _trainer.updater.iteration))\n", (5841, 5896), True, 'import matplotlib.pyplot as plt\n'), ((5909, 5936), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./latest.png"""'], {}), "('./latest.png')\n", (5920, 5936), True, 'import matplotlib.pyplot as plt\n'), ((6141, 6176), 'wave.open', 'wave.open', (["(path_save + '.wav')", '"""wb"""'], {}), "(path_save + '.wav', 'wb')\n", (6150, 6176), False, 'import wave\n'), ((6413, 6442), 'wave.open', 'wave.open', (['"""latest.wav"""', '"""wb"""'], {}), "('latest.wav', 'wb')\n", (6422, 6442), False, 'import wave\n'), ((6667, 6676), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6674, 6676), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1614), 'numpy.pad', 'np.pad', (['source_sp', '((padding_size, 0), (0, 0))', '"""edge"""'], {}), "(source_sp, ((padding_size, 0), (0, 0)), 'edge')\n", (1566, 1614), True, 'import numpy as np\n'), ((1959, 2013), 'numpy.pad', 'np.pad', (['source_ap', '((padding_size, 0), (0, 0))', '"""edge"""'], {}), "(source_ap, ((padding_size, 0), (0, 0)), 'edge')\n", (1965, 2013), True, 'import numpy as np\n'), ((2282, 2343), 'numpy.pad', 'np.pad', (['self.source_sp_l', '((padding_size, 0), (0, 0))', '"""edge"""'], {}), "(self.source_sp_l, ((padding_size, 0), (0, 0)), 'edge')\n", (2288, 2343), True, 'import numpy as np\n'), ((2649, 2667), 'numpy.sign', 'np.sign', (['source_f0'], {}), '(source_f0)\n', (2656, 2667), True, 'import numpy as np\n'), ((2582, 2600), 'numpy.sign', 'np.sign', (['source_f0'], {}), '(source_f0)\n', (2589, 2600), True, 'import numpy as np\n'), ((1804, 1831), 'numpy.log2', 'np.log2', (['source_sp.shape[0]'], {}), '(source_sp.shape[0])\n', (1811, 1831), True, 'import numpy as np\n'), ((2070, 2104), 'numpy.transpose', 'np.transpose', (['source_ap', '[0, 2, 1]'], {}), '(source_ap, [0, 2, 1])\n', (2082, 2104), True, 'import numpy as np\n'), ((5344, 5377), 'numpy.mean', 'np.mean', (['image_power_spec'], {'axis': '(0)'}), '(image_power_spec, axis=0)\n', (5351, 5377), True, 'import numpy as np\n'), ((5378, 5413), 'numpy.mean', 'np.mean', (['self.image_power_l'], {'axis': '(0)'}), '(self.image_power_l, axis=0)\n', (5385, 5413), True, 'import numpy as np\n'), ((5444, 5476), 'numpy.std', 'np.std', (['image_power_spec'], {'axis': '(0)'}), '(image_power_spec, axis=0)\n', (5450, 5476), True, 'import numpy as np\n'), ((5477, 5511), 'numpy.std', 'np.std', (['self.image_power_l'], {'axis': '(0)'}), '(self.image_power_l, axis=0)\n', (5483, 5511), True, 'import numpy as np\n')] |
import numpy as np
import cv2
class moving_detector:
def __init__(self):
a = 1
def draw_flow(slef,img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1)
fx, fy = flow[y, x].T
lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def warp_flow(self,img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:, :, 0] += np.arange(w)
flow[:, :, 1] += np.arange(h)[:, np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
def draw_hsv(self,flow):
h, w = flow.shape[:2]
fx, fy = flow[:, :, 0], flow[:, :, 1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx * fx + fy * fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[..., 0] = ang * (180 / np.pi / 2)
hsv[..., 1] = 255
hsv[..., 2] = np.minimum(v * 4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def show_flow (self,prev,cur):
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
img = cur
vis = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply color mask here
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, 0.5, 5, 15, 3, 5, 1.1, cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
flow_img = self.draw_flow(gray, flow)
gray1 = cv2.cvtColor(self.draw_hsv(flow), cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray1, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
# modify parameters for detecting close objects.
(x, y, w, h) = cv2.boundingRect(c)
if w > 100 and h > 100 and w < 200 and h < 300:
cv2.rectangle(vis, (x, y), (x + w, y + h), (0, 0, 255), 4)
return (flow_img,vis)
| [
"numpy.minimum",
"cv2.circle",
"cv2.polylines",
"numpy.arctan2",
"cv2.dilate",
"cv2.cvtColor",
"cv2.threshold",
"numpy.zeros",
"cv2.remap",
"cv2.rectangle",
"numpy.arange",
"numpy.int32",
"cv2.calcOpticalFlowFarneback",
"cv2.boundingRect",
"numpy.vstack",
"numpy.sqrt"
] | [((355, 376), 'numpy.int32', 'np.int32', (['(lines + 0.5)'], {}), '(lines + 0.5)\n', (363, 376), True, 'import numpy as np\n'), ((391, 428), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (403, 428), False, 'import cv2\n'), ((437, 478), 'cv2.polylines', 'cv2.polylines', (['vis', 'lines', '(0)', '(0, 255, 0)'], {}), '(vis, lines, 0, (0, 255, 0))\n', (450, 478), False, 'import cv2\n'), ((709, 721), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (718, 721), True, 'import numpy as np\n'), ((789, 833), 'cv2.remap', 'cv2.remap', (['img', 'flow', 'None', 'cv2.INTER_LINEAR'], {}), '(img, flow, None, cv2.INTER_LINEAR)\n', (798, 833), False, 'import cv2\n'), ((1012, 1038), 'numpy.sqrt', 'np.sqrt', (['(fx * fx + fy * fy)'], {}), '(fx * fx + fy * fy)\n', (1019, 1038), True, 'import numpy as np\n'), ((1053, 1082), 'numpy.zeros', 'np.zeros', (['(h, w, 3)', 'np.uint8'], {}), '((h, w, 3), np.uint8)\n', (1061, 1082), True, 'import numpy as np\n'), ((1177, 1199), 'numpy.minimum', 'np.minimum', (['(v * 4)', '(255)'], {}), '(v * 4, 255)\n', (1187, 1199), True, 'import numpy as np\n'), ((1214, 1250), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (1226, 1250), False, 'import cv2\n'), ((1326, 1364), 'cv2.cvtColor', 'cv2.cvtColor', (['prev', 'cv2.COLOR_BGR2GRAY'], {}), '(prev, cv2.COLOR_BGR2GRAY)\n', (1338, 1364), False, 'import cv2\n'), ((1423, 1460), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1435, 1460), False, 'import cv2\n'), ((1508, 1612), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prevgray', 'gray', '(0.5)', '(5)', '(15)', '(3)', '(5)', '(1.1)', 'cv2.OPTFLOW_FARNEBACK_GAUSSIAN'], {}), '(prevgray, gray, 0.5, 5, 15, 3, 5, 1.1, cv2.\n OPTFLOW_FARNEBACK_GAUSSIAN)\n', (1536, 1612), False, 'import cv2\n'), ((1810, 1848), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None'], {'iterations': '(2)'}), '(thresh, None, iterations=2)\n', (1820, 1848), False, 'import cv2\n'), ((532, 577), 'cv2.circle', 'cv2.circle', (['vis', '(x1, y1)', '(1)', '(0, 255, 0)', '(-1)'], {}), '(vis, (x1, y1), 1, (0, 255, 0), -1)\n', (542, 577), False, 'import cv2\n'), ((747, 759), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (756, 759), True, 'import numpy as np\n'), ((973, 991), 'numpy.arctan2', 'np.arctan2', (['fy', 'fx'], {}), '(fy, fx)\n', (983, 991), True, 'import numpy as np\n'), ((1741, 1789), 'cv2.threshold', 'cv2.threshold', (['gray1', '(25)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray1, 25, 255, cv2.THRESH_BINARY)\n', (1754, 1789), False, 'import cv2\n'), ((2143, 2162), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (2159, 2162), False, 'import cv2\n'), ((2239, 2297), 'cv2.rectangle', 'cv2.rectangle', (['vis', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(4)'], {}), '(vis, (x, y), (x + w, y + h), (0, 0, 255), 4)\n', (2252, 2297), False, 'import cv2\n'), ((285, 318), 'numpy.vstack', 'np.vstack', (['[x, y, x + fx, y + fy]'], {}), '([x, y, x + fx, y + fy])\n', (294, 318), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod,ABCMeta
import sys
import os
import math
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from data_fetcher import downloader
from datetime import datetime
from collections import OrderedDict,Set
import numpy as np
import matplotlib.pyplot as plt
def companies():
dataset = pd.read_csv(os.path.join("data","dow30.csv"))
return dataset
def symbol_list():
dataset = pd.read_csv(os.path.join("data","dow30.csv"))
return dataset['Symbol'].values.tolist()
class BaseData(object):
def __init__(self,symbol:str):
self.__symbol = symbol
@property
def symbol(self):
return self.__symbol
def save(self,file_dir:str,file_name:str,data:pd.DataFrame):
try:
if data is None:
return
full_path = os.path.join(file_dir,file_name)
include_index = False if data.index.name == None else True
if os.path.isdir(file_dir):
data.to_csv(full_path,index=include_index)
else:
os.makedirs(file_dir)
data.to_csv(full_path,index=include_index)
except OSError as err:
print("OS error for symbol {} : {}".format(self.symbol,err))
except:
print("Unexpected error for symbol {} : {}".format(self.symbol, sys.exc_info()[0]))
class Downloader(BaseData):
def __init__(self,symbol:str,start_date:str, end_date:str):
try:
BaseData.__init__(self,symbol)
self.__start_date = datetime.strptime(start_date,'%Y%m%d')
self.__end_date = datetime.strptime(end_date,'%Y%m%d')
self.__data = None
#Download data from Yahoo.
yah = downloader.load_yahoo_quote(symbol,start_date,end_date)
header = yah[0].split(',')
table = []
for i in yah[1:]:
quote = i.split(',')
if len(quote)>1:
d = dict()
d[header[0]] = quote[0]
d[header[1]] = quote[1]
d[header[2]] = quote[2]
d[header[3]] = quote[3]
d[header[4]] = quote[4]
d[header[5]] = quote[5]
d[header[6]] = quote[6]
table.append(d)
self.__data = pd.DataFrame(table)
self.__size = len(self.__data)
except OSError as err:
print("OS error for symbol {} : {}".format(symbol,err))
def save(self):
file_dir = os.path.join("./data",self.symbol)
BaseData.save(self,file_dir,"quotes.csv",self.__data)
@property
def start_date(self):
return self.__start_date
@property
def end_date(self):
return self.__end_date
@property
def data(self):
return self.__data
@property
def size(self):
return self.__size
class Feature_Selection(BaseData):
def __init__(self,symbol:str,data:pd.DataFrame,mfi_days=14):
BaseData.__init__(self,symbol)
self.__days = mfi_days
self.__data = None
self.__data_normal = None
cols = data.columns.values
cols_check = "Date,Open,High,Low,Close,Adj Close,Volume".split(',')
missing = False
for col in cols:
found = False
for name in cols_check:
if col == name:
found = True
break
if not found:
print("The column {} is missing.".format(col))
missing = True
break
if not missing:
self.__data = data
self.__data['Date'] = pd.to_datetime(self.__data['Date'])
self.__data.sort_values('Date',inplace=True)
self.__data.reset_index(drop=True,inplace=True)
self.__data.index.name = 'index'
@classmethod
def read_csv(cls,symbol:str,file_loc:str):
try:
data = pd.read_csv(file_loc)
return cls(symbol,data)
except OSError as err:
print("OS error {}".format(err))
return None
@property
def data(self):
return self.__data
@property
def data_normal(self):
return self.__data_normal
def calculate_features(self):
self.__cal_log_return("Adj Close")
self.__cal_mfi()
def __scale_data(self,col_Name:str):
values = self.__data[col_Name].iloc[self.__days:].values.reshape(-1,1)
scaler = MinMaxScaler(feature_range=(-1,1))
return scaler.fit_transform(values).flatten()
def __flatten_data(self,col_Name:str):
return self.__data[col_Name].iloc[self.__days:].values.flatten()
def normalize_data(self):
index = self.__data.index.values[self.__days:]
table = OrderedDict()
table['close'] = self.__flatten_data('Adj Close')
table['returns'] = self.__flatten_data('Adj Close_log_returns')
table['mfi'] = self.__flatten_data('mfi_index')
table['normal_close'] = self.__scale_data('Adj Close')
table['normal_returns'] = self.__scale_data('Adj Close_log_returns')
table['normal_mfi'] = self.__scale_data('mfi_index')
self.__data_normal = pd.DataFrame(table,index=index)
self.__data_normal.index.name = 'index'
def __cal_log_return(self,col_name:str):
values = self.__data[col_name].values
log_returns = np.zeros_like(values)
for idx in range(1,len(values)):
log_returns[idx] = math.log(values[idx]/values[idx-1])
self.__data[col_name+"_log_returns"] = pd.Series(log_returns, index = self.__data.index)
def save_stock_data(self):
file_dir = os.path.join("./data",self.symbol)
BaseData.save(self,file_dir,"quote_processed.csv",self.__data_normal)
def save_normalized_data(self):
file_dir = os.path.join("./data",self.symbol)
BaseData.save(self,file_dir,"normalized.csv",self.__data_normal)
def __cal_mfi(self):
typ_price = pd.DataFrame((self.__data["High"] + self.__data["Low"] + self.__data["Adj Close"])/3, columns =["price"] )
typ_price['volume'] = self.__data["Volume"]
typ_price['pos'] = 0
typ_price['neg'] = 0
typ_price['mfi_index'] = 0.0
for idx in range(1,len(typ_price)):
if typ_price['price'].iloc[idx] > typ_price['price'].iloc[idx-1]:
typ_price.at[idx,'pos' ] = typ_price['price'].iloc[idx] * typ_price['volume'].iloc[idx]
else:
typ_price.at[idx,'neg'] = typ_price['price'].iloc[idx] * typ_price['volume'].iloc[idx]
pointer = 1
for idx in range(self.__days,len(typ_price)):
pos = typ_price['pos'].iloc[pointer:idx + 1].sum()
neg = typ_price['neg'].iloc[pointer:idx + 1].sum()
if neg != 0:
base = (1.0 + (pos/neg))
else:
base = 1.0
typ_price.at[idx,'mfi_index'] = 100.0 - (100.0/base )
pointer += 1
self.__data["mfi_index"] = pd.Series(typ_price["mfi_index"].values, index = typ_price.index)
class Volatility(object):
def __init__(self,symbol:str):
try:
path_norm_data = "./data/{}/normalized.csv".format(symbol)
dataset = pd.read_csv(path_norm_data,index_col='index')
self.__volatility = dataset['returns'].std() * math.sqrt(252)
except:
self.__volatility = -1
@property
def annual(self):
return self.__volatility
class SequenceBase(ABC):
def __init__(self,symbol:str,window_size:int,target_length:int):
try:
self.__window_size = window_size
self.__target_length = target_length
path_norm_data = "./data/{}/normalized.csv".format(symbol)
self.__data_normal = pd.read_csv(path_norm_data,index_col='index')
except:
print("Unexpected error for symbol {} : {}".format(symbol,sys.exc_info()[0]))
@property
def data(self):
return self.__data_normal
@property
def original_data(self):
return self.__data_normal['normal_close'].values
@property
def window_size(self):
return self.__window_size
@property
def target_length(self):
return self.__target_length
@property
@abstractmethod
def X(self):
pass
@property
@abstractmethod
def y(self):
pass
class SimpleSequence(SequenceBase):
def __init__(self,symbol:str,window_size:int,target_length:int):
SequenceBase.__init__(self,symbol,window_size,target_length)
self.__sequence_data()
def __sequence_data(self):
close = self.data['normal_close'].values
X=[]
y=[]
pointer = 0
data_length = len(close)
while (pointer+self.window_size+self.target_length)<=data_length:
X.append(close[pointer:pointer+self.window_size])
y.append(close[pointer+self.window_size:pointer+self.window_size+self.target_length])
pointer+=1
self.__X = np.asarray(X)
self.__X = self.__X.reshape((-1,self.__X.shape[-1],1))
self.__y = np.asarray(y)
@property
def X(self):
return self.__X
@property
def y(self):
return self.__y
class MultiSequence(SequenceBase):
def __init__(self,symbol:str, window_size:int, target_length:int):
SequenceBase.__init__(self,symbol, window_size, target_length)
self.__sequence_data()
def __sequence_data(self):
close = self.data['normal_close'].values
returns = self.data['normal_returns'].values
mfi = self.data['normal_mfi'].values
X = []
y = []
pointer = 0
data_length = len(close)
while (pointer + self.window_size + self.target_length) <= data_length:
x_close = close[pointer:pointer + self.window_size].reshape(-1,1)
x_returns = returns[pointer:pointer + self.window_size].reshape(-1,1)
x_mfi = mfi[pointer:pointer + self.window_size].reshape(-1,1)
x_ = np.append(x_close,x_returns, axis=1)
x_ = np.append(x_,x_mfi, axis=1)
X.append(x_)
y.append(close[pointer + self.window_size:pointer + self.window_size + self.target_length])
pointer += 1
self.__X = np.asarray(X)
self.__y = np.asarray(y)
@property
def X(self):
return self.__X
@property
def y(self):
return self.__y
def split_data(seq_obj:SequenceBase,split_rate=0.2):
split = int(len(seq_obj.X) * (1-split_rate))
X_train = seq_obj.X[:split,:]
y_train = seq_obj.y[:split]
X_test = seq_obj.X[split:,:]
y_test = seq_obj.y[split:]
return X_train,y_train,X_test,y_test
def graph_prediction(trained_model,X_train,X_test,original,window_size):
train_predict = trained_model.predict(X_train)
test_predict = trained_model.predict(X_test)
plt.plot(original,color='k')
split = len(X_train)
split_pt = split + window_size
train_in = np.arange(window_size,split_pt,1)
plt.plot(train_in,train_predict,color='b')
test_in = np.arange(split_pt,split_pt+len(test_predict),1)
plt.plot(test_in,test_predict,color='r')
plt.xlabel('day')
plt.ylabel('(normalized) price of stock')
plt.legend(['original series','training fit','testing fit'],loc='center left', bbox_to_anchor=(1, 0.5))
plt.show() | [
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"numpy.arange",
"sys.exc_info",
"os.path.join",
"pandas.DataFrame",
"numpy.zeros_like",
"data_fetcher.downloader.load_yahoo_quote",
"numpy.append",
"math.log",
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.legend",
"numpy... | [((11430, 11459), 'matplotlib.pyplot.plot', 'plt.plot', (['original'], {'color': '"""k"""'}), "(original, color='k')\n", (11438, 11459), True, 'import matplotlib.pyplot as plt\n'), ((11537, 11572), 'numpy.arange', 'np.arange', (['window_size', 'split_pt', '(1)'], {}), '(window_size, split_pt, 1)\n', (11546, 11572), True, 'import numpy as np\n'), ((11576, 11620), 'matplotlib.pyplot.plot', 'plt.plot', (['train_in', 'train_predict'], {'color': '"""b"""'}), "(train_in, train_predict, color='b')\n", (11584, 11620), True, 'import matplotlib.pyplot as plt\n'), ((11688, 11730), 'matplotlib.pyplot.plot', 'plt.plot', (['test_in', 'test_predict'], {'color': '"""r"""'}), "(test_in, test_predict, color='r')\n", (11696, 11730), True, 'import matplotlib.pyplot as plt\n'), ((11736, 11753), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""day"""'], {}), "('day')\n", (11746, 11753), True, 'import matplotlib.pyplot as plt\n'), ((11759, 11800), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""(normalized) price of stock"""'], {}), "('(normalized) price of stock')\n", (11769, 11800), True, 'import matplotlib.pyplot as plt\n'), ((11806, 11917), 'matplotlib.pyplot.legend', 'plt.legend', (["['original series', 'training fit', 'testing fit']"], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(['original series', 'training fit', 'testing fit'], loc=\n 'center left', bbox_to_anchor=(1, 0.5))\n", (11816, 11917), True, 'import matplotlib.pyplot as plt\n'), ((11915, 11925), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11923, 11925), True, 'import matplotlib.pyplot as plt\n'), ((358, 391), 'os.path.join', 'os.path.join', (['"""data"""', '"""dow30.csv"""'], {}), "('data', 'dow30.csv')\n", (370, 391), False, 'import os\n'), ((461, 494), 'os.path.join', 'os.path.join', (['"""data"""', '"""dow30.csv"""'], {}), "('data', 'dow30.csv')\n", (473, 494), False, 'import os\n'), ((2650, 2685), 'os.path.join', 'os.path.join', (['"""./data"""', 'self.symbol'], {}), "('./data', self.symbol)\n", (2662, 2685), False, 'import os\n'), ((4707, 4742), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (4719, 4742), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5023, 5036), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5034, 5036), False, 'from collections import OrderedDict, Set\n'), ((5460, 5492), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {'index': 'index'}), '(table, index=index)\n', (5472, 5492), True, 'import pandas as pd\n'), ((5659, 5680), 'numpy.zeros_like', 'np.zeros_like', (['values'], {}), '(values)\n', (5672, 5680), True, 'import numpy as np\n'), ((5839, 5886), 'pandas.Series', 'pd.Series', (['log_returns'], {'index': 'self.__data.index'}), '(log_returns, index=self.__data.index)\n', (5848, 5886), True, 'import pandas as pd\n'), ((5943, 5978), 'os.path.join', 'os.path.join', (['"""./data"""', 'self.symbol'], {}), "('./data', self.symbol)\n", (5955, 5978), False, 'import os\n'), ((6116, 6151), 'os.path.join', 'os.path.join', (['"""./data"""', 'self.symbol'], {}), "('./data', self.symbol)\n", (6128, 6151), False, 'import os\n'), ((6274, 6385), 'pandas.DataFrame', 'pd.DataFrame', (["((self.__data['High'] + self.__data['Low'] + self.__data['Adj Close']) / 3)"], {'columns': "['price']"}), "((self.__data['High'] + self.__data['Low'] + self.__data[\n 'Adj Close']) / 3, columns=['price'])\n", (6286, 6385), True, 'import pandas as pd\n'), ((7353, 7416), 'pandas.Series', 'pd.Series', (["typ_price['mfi_index'].values"], {'index': 'typ_price.index'}), "(typ_price['mfi_index'].values, index=typ_price.index)\n", (7362, 7416), True, 'import pandas as pd\n'), ((9469, 9482), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (9479, 9482), True, 'import numpy as np\n'), ((9567, 9580), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (9577, 9580), True, 'import numpy as np\n'), ((10789, 10802), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (10799, 10802), True, 'import numpy as np\n'), ((10823, 10836), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (10833, 10836), True, 'import numpy as np\n'), ((871, 904), 'os.path.join', 'os.path.join', (['file_dir', 'file_name'], {}), '(file_dir, file_name)\n', (883, 904), False, 'import os\n'), ((992, 1015), 'os.path.isdir', 'os.path.isdir', (['file_dir'], {}), '(file_dir)\n', (1005, 1015), False, 'import os\n'), ((1604, 1643), 'datetime.datetime.strptime', 'datetime.strptime', (['start_date', '"""%Y%m%d"""'], {}), "(start_date, '%Y%m%d')\n", (1621, 1643), False, 'from datetime import datetime\n'), ((1674, 1711), 'datetime.datetime.strptime', 'datetime.strptime', (['end_date', '"""%Y%m%d"""'], {}), "(end_date, '%Y%m%d')\n", (1691, 1711), False, 'from datetime import datetime\n'), ((1804, 1861), 'data_fetcher.downloader.load_yahoo_quote', 'downloader.load_yahoo_quote', (['symbol', 'start_date', 'end_date'], {}), '(symbol, start_date, end_date)\n', (1831, 1861), False, 'from data_fetcher import downloader\n'), ((2438, 2457), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {}), '(table)\n', (2450, 2457), True, 'import pandas as pd\n'), ((3839, 3874), 'pandas.to_datetime', 'pd.to_datetime', (["self.__data['Date']"], {}), "(self.__data['Date'])\n", (3853, 3874), True, 'import pandas as pd\n'), ((4146, 4167), 'pandas.read_csv', 'pd.read_csv', (['file_loc'], {}), '(file_loc)\n', (4157, 4167), True, 'import pandas as pd\n'), ((5755, 5794), 'math.log', 'math.log', (['(values[idx] / values[idx - 1])'], {}), '(values[idx] / values[idx - 1])\n', (5763, 5794), False, 'import math\n'), ((7599, 7645), 'pandas.read_csv', 'pd.read_csv', (['path_norm_data'], {'index_col': '"""index"""'}), "(path_norm_data, index_col='index')\n", (7610, 7645), True, 'import pandas as pd\n'), ((8161, 8207), 'pandas.read_csv', 'pd.read_csv', (['path_norm_data'], {'index_col': '"""index"""'}), "(path_norm_data, index_col='index')\n", (8172, 8207), True, 'import pandas as pd\n'), ((10529, 10566), 'numpy.append', 'np.append', (['x_close', 'x_returns'], {'axis': '(1)'}), '(x_close, x_returns, axis=1)\n', (10538, 10566), True, 'import numpy as np\n'), ((10584, 10612), 'numpy.append', 'np.append', (['x_', 'x_mfi'], {'axis': '(1)'}), '(x_, x_mfi, axis=1)\n', (10593, 10612), True, 'import numpy as np\n'), ((1113, 1134), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (1124, 1134), False, 'import os\n'), ((7705, 7719), 'math.sqrt', 'math.sqrt', (['(252)'], {}), '(252)\n', (7714, 7719), False, 'import math\n'), ((1395, 1409), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1407, 1409), False, 'import sys\n'), ((8295, 8309), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8307, 8309), False, 'import sys\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.