code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Real time object detection dnn
import numpy as np
import imutils
import cv2
import time
prototxt = './MobileNetSSD_deploy.prototxt.txt'
model = 'MobileNetSSD_deploy.caffemodel'
confThresh = 0.2
CLASSES = ['background','aeroplane','bicycle','bird','boat',
'bottle','bus','car','cat','chair','cow','diningtable',
'dog','horse','motorbike','person','pottedplant','sheet',
'sofa','train','tvmonitor']
COLORS = np.random.uniform(0,255,size=(len(CLASSES),3))
print('Loading model...')
net = cv2.dnn.readNetFromCaffe(prototxt, model)
print('Model Loaded')
print('Straing Camera Feed...')
cam = cv2.VideoCapture(0)
time.sleep(2.0)
while True:
_,frame = cam.read()
frame = imutils.resize(frame,width=500)
(h,w) = frame.shape[:2]
imResizeBlod = cv2.resize(frame, (300,300))
blob = cv2.dnn.blobFromImage(imResizeBlod,0.007843,(300,300),127.5)
net.setInput(blob)
detections = net.forward()
detShape = detections.shape[2]
for i in np.arange(0,detShape):
confidence = detections[0,0,i,2]
if confidence > confThresh:
idx=int(detections[0,0,i,1])
# print('ClassID:',detections[0,0,i,1])
box = detections[0,0,i,3:7] * np.array([w,h,w,h])
(startX,startY,endX,endY) = box.astype('int')
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX,startY), (endX,endY), COLORS[idx],2)
if startY - 15 > 15:
y = startY - 15
# print(y)
else:
startY + 15
y = startY
cv2.putText(frame, label, (startX,y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
cv2.imshow("Frame",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
print("quit")
break
cam.release()
cv2.destroyAllWindows()
| [
"cv2.putText",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"cv2.imshow",
"time.sleep",
"cv2.VideoCapture",
"cv2.rectangle",
"numpy.arange",
"numpy.array",
"cv2.dnn.readNetFromCaffe",
"imutils.resize",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((505, 546), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['prototxt', 'model'], {}), '(prototxt, model)\n', (529, 546), False, 'import cv2\n'), ((607, 626), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (623, 626), False, 'import cv2\n'), ((627, 642), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (637, 642), False, 'import time\n'), ((1661, 1684), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1682, 1684), False, 'import cv2\n'), ((687, 719), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(500)'}), '(frame, width=500)\n', (701, 719), False, 'import imutils\n'), ((761, 790), 'cv2.resize', 'cv2.resize', (['frame', '(300, 300)'], {}), '(frame, (300, 300))\n', (771, 790), False, 'import cv2\n'), ((798, 862), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['imResizeBlod', '(0.007843)', '(300, 300)', '(127.5)'], {}), '(imResizeBlod, 0.007843, (300, 300), 127.5)\n', (819, 862), False, 'import cv2\n'), ((952, 974), 'numpy.arange', 'np.arange', (['(0)', 'detShape'], {}), '(0, detShape)\n', (961, 974), True, 'import numpy as np\n'), ((1547, 1573), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (1557, 1573), False, 'import cv2\n'), ((1580, 1594), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1591, 1594), False, 'import cv2\n'), ((1292, 1360), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(startX, startY)', '(endX, endY)', 'COLORS[idx]', '(2)'], {}), '(frame, (startX, startY), (endX, endY), COLORS[idx], 2)\n', (1305, 1360), False, 'import cv2\n'), ((1460, 1549), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'COLORS[idx]', '(2)'], {}), '(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n COLORS[idx], 2)\n', (1471, 1549), False, 'import cv2\n'), ((1149, 1171), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1157, 1171), True, 'import numpy as np\n')] |
"""
This Python script generates strokes from the line type ESRI shapefiles, mainly roads.
Author: <NAME>
Date: 29 February 2020
Version: 0.2
The script is a supplementary material to the full length article:
Title:
An open-source tool to extract natural continuity and hierarchy of urban street networks
Journal:
Environment and Planning B: Urban Analytics and City Science
Authors:
<NAME>, <NAME>, <NAME>, <NAME>
Citation:
<NAME>., <NAME>., <NAME>., & <NAME>. (2020). An open-source tool to extract natural continuity and hierarchy of urban street networks. Environment and Planning B: Urban Analytics and City Science. https://doi.org/10.1177%2F2399808320967680
GitHub repository:
https://github.com/PratyushTripathy/NetworkContinuity
"""
########################################################################################
########################################################################################
################# PLEASE DO NOT EDIT THE BELOW PART OF THE CODE ###################
##### SCROLL DOWN TO THE LOWER EXTREME OF THE SCRIPT TO CHANGE INPUT FILE NAME #####
########################################################################################
########################################################################################
import os, sys, math, time, multiprocessing
from functools import partial
import numpy as np
import shapefile as shp
#Set recurrsion depth limit to avoid error at a later stage
sys.setrecursionlimit(10000)
"""
The imported shapefile lines comes as tuple, whereas
the export requires list, this finction converts tuple
inside lines to list
"""
def tupleToList(line):
for a in range(0,len(line)):
line[a] = list(line[a])
return(line)
def listToTuple(line):
for a in range(0, len(line)):
line[a] = tuple(line[a])
return(tuple(line))
"""
This function rounds up the coordinates of the input
raw shapefile. The decimal places up to which round
up is expected can be changed from here.
"""
def roundCoordinates(edge, decimal=4):
x, y = edge
return(round(x, decimal), round(y, decimal))
"""
The below function takes a line as an input and splits
it at every point.
"""
def listToPairs(inList):
outList = []
index = 0
for index in range(0,len(inList)-1):
tempList = [list(roundCoordinates(inList[index])), list(roundCoordinates(inList[index+1]))]
outList.append(tempList)
return(outList)
"""
The function below calculates the angle between two points in space.
"""
def computeAngle(point1, point2):
height = abs(point2[1] - point1[1])
base = abs(point2[0] - point1[0])
angle = round(math.degrees(math.atan(height/base)), 3)
return(angle)
"""
This function calculates the orientation of a line segment.
Point1 is the lower one on the y-axes and vice-cersa for
Point2.
"""
def computeOrientation(line):
point1 = line[1]
point2 = line[0]
"""
If the latutide of a point is less and the longitude is more, or
If the latitude of a point is more and the longitude is less, then
the point is oriented leftward and wil have negative orientation.
"""
if ((point2[0] > point1[0]) and (point2[1] < point1[1])) or ((point2[0] < point1[0]) and (point2[1] > point1[1])):
return(-computeAngle(point1, point2))
#If the latitudes are same, the line is horizontal
elif point2[1] == point1[1]:
return(0)
#If the longitudes are same, the line is vertical
elif point2[0] == point1[0]:
return(90)
else:
return(computeAngle(point1, point2))
"""
This below function calculates the acute joining angle between
two given set of points.
"""
def pointsSetAngle(line1, line2):
l1orien = computeOrientation(line1)
l2orien = computeOrientation(line2)
if ((l1orien>0) and (l2orien<0)) or ((l1orien<0) and (l2orien>0)):
return(abs(l1orien)+abs(l2orien))
elif ((l1orien>0) and (l2orien>0)) or ((l1orien<0) and (l2orien<0)):
theta1 = abs(l1orien) + 180 - abs(l2orien)
theta2 = abs(l2orien) + 180 - abs(l1orien)
if theta1 < theta2:
return(theta1)
else:
return(theta2)
elif (l1orien==0) or (l2orien==0):
if l1orien<0:
return(180-abs(l1orien))
elif l2orien<0:
return(180-abs(l2orien))
else:
return(180 - (abs(computeOrientation(line1)) + abs(computeOrientation(line2))))
elif (l1orien==l2orien):
return(180)
"""
The below function calculates the joining angle between
two line segments.
"""
def angleBetweenTwoLines(line1, line2):
l1p1, l1p2 = line1
l2p1, l2p2 = line2
l1orien = computeOrientation(line1)
l2orien = computeOrientation(line2)
"""
If both lines have same orientation, return 180
If one of the lines is zero, exception for that
If both the lines are on same side of the horizontal plane, calculate 180-(sumOfOrientation)
If both the lines are on same side of the vertical plane, calculate pointSetAngle
"""
if (l1orien==l2orien):
angle = 180
elif (l1orien==0) or (l2orien==0):
angle = pointsSetAngle(line1, line2)
elif l1p1 == l2p1:
if ((l1p1[1] > l1p2[1]) and (l1p1[1] > l2p2[1])) or ((l1p1[1] < l1p2[1]) and (l1p1[1] < l2p2[1])):
angle = 180 - (abs(l1orien) + abs(l2orien))
else:
angle = pointsSetAngle([l1p1, l1p2], [l2p1,l2p2])
elif l1p1 == l2p2:
if ((l1p1[1] > l2p1[1]) and (l1p1[1] > l1p2[1])) or ((l1p1[1] < l2p1[1]) and (l1p1[1] < l1p2[1])):
angle = 180 - (abs(l1orien) + abs(l2orien))
else:
angle = pointsSetAngle([l1p1, l1p2], [l2p2,l2p1])
elif l1p2 == l2p1:
if ((l1p2[1] > l1p1[1]) and (l1p2[1] > l2p2[1])) or ((l1p2[1] < l1p1[1]) and (l1p2[1] < l2p2[1])):
angle = 180 - (abs(l1orien) + abs(l2orien))
else:
angle = pointsSetAngle([l1p2, l1p1], [l2p1,l2p2])
elif l1p2 == l2p2:
if ((l1p2[1] > l1p1[1]) and (l1p2[1] > l2p1[1])) or ((l1p2[1] < l1p1[1]) and (l1p2[1] < l2p1[1])):
angle = 180 - (abs(l1orien) + abs(l2orien))
else:
angle = pointsSetAngle([l1p2, l1p1], [l2p2,l2p1])
return(angle)
def getLinksMultiprocessing(n, total, tempArray):
# Printing the progress bar
if n%1000==0:
"""
Dividing by two to have 50 progress steps
Subtracting from 50, and not hundred to have less progress steps
"""
currentProgress = math.floor(100*n/total/2)
remainingProgress = 50 - currentProgress
print('>'*currentProgress + '-' * remainingProgress + ' [%d/%d] '%(n,total) + '%d%%'%(currentProgress*2), end='\r')
# Create mask for adjacent edges as endpoint 1
m1 = tempArray[:,1]==tempArray[n,1]
m2 = tempArray[:,2]==tempArray[n,1]
mask1 = m1 + m2
# Create mask for adjacent edges as endpoint 2
m1 = tempArray[:,1]==tempArray[n,2]
m2 = tempArray[:,2]==tempArray[n,2]
mask2 = m1 + m2
# Use the tempArray to extract only the uniqueIDs of the adjacent edges at both ends
mask1 = tempArray[:,0][~(mask1==0)]
mask2 = tempArray[:,0][~(mask2==0)]
# Links (excluding the segment itself) at both the ends are converted to list and added to the 'unique' attribute
return(n, list(mask1[mask1 != n]), list(mask2[mask2 != n]))
def mergeLinesMultiprocessing(n, total, uniqueDict):
# Printing the progress bar
if n%1000==0:
"""
Dividing by two to have 50 progress steps
Subtracting from 50, and not hundred to have less progress steps
"""
currentProgress = math.floor(100*n/total/2)
remainingProgress = 50 - currentProgress
print('>'*currentProgress + '-' * remainingProgress + ' [%d/%d] '%(n,total) + '%d%%'%(currentProgress*2), end='\r')
outlist = set()
currentEdge1 = n
outlist.add(currentEdge1)
while True:
if type(uniqueDict[currentEdge1][6]) == type(1) and \
uniqueDict[currentEdge1][6] not in outlist:
currentEdge1 = uniqueDict[currentEdge1][6]
outlist.add(currentEdge1)
elif type(uniqueDict[currentEdge1][7]) == type(1) and \
uniqueDict[currentEdge1][7] not in outlist:
currentEdge1 = uniqueDict[currentEdge1][7]
outlist.add(currentEdge1)
else:
break
currentEdge1 = n
while True:
if type(uniqueDict[currentEdge1][7]) == type(1) and \
uniqueDict[currentEdge1][7] not in outlist:
currentEdge1 = uniqueDict[currentEdge1][7]
outlist.add(currentEdge1)
elif type(uniqueDict[currentEdge1][6]) == type(1) and \
uniqueDict[currentEdge1][6] not in outlist:
currentEdge1 = uniqueDict[currentEdge1][6]
outlist.add(currentEdge1)
else:
break
outlist = list(outlist)
outlist.sort()
return(outlist)
class line():
def __init__(self, inFile):
self.name, self.ext = os.path.splitext(inFile)
self.sf = shp.Reader(inFile)
self.shape = self.sf.shapes()
self.getProjection()
self.getLines()
def getProjection(self):
with open(self.name+".prj", "r") as stream:
self.projection = stream.read()
return(self.projection)
def getLines(self):
self.lines = []
for parts in self.shape:
self.lines.append(parts.points)
def splitLines(self):
outLine = []
tempLine = []
self.tempArray = []
n = 0
#Iterate through the lines and split the edges
for line in self.lines:
for part in listToPairs(line):
outLine.append([part, computeOrientation(part), list(), list(), list(), list(), list(), list()])
# Merge the coordinates as string, this will help in finding adjacent edges in the function below
self.tempArray.append([n, '%.4f_%.4f'%(part[0][0], part[0][1]), '%.4f_%.4f'%(part[1][0], part[1][1])])
n += 1
self.split = outLine
def uniqueID(self):
#Loop through split lines, assign unique ID and
#store inside a list along with the connectivity dictionary
self.unique = dict(enumerate(self.split))
def getLinks(self):
global result
print("Finding adjacent segments...")
self.tempArray = np.array(self.tempArray, dtype=object)
iterations = [n for n in range(0,len(self.unique))]
pool = multiprocessing.Pool(multiprocessing.cpu_count())
constantParameterFunction = partial(getLinksMultiprocessing, total=len(self.unique), tempArray=self.tempArray)
result = pool.map(constantParameterFunction, iterations)
pool.close()
pool.join()
iterations = None
for a in result:
n = a[0]
self.unique[n][2] = a[1]
self.unique[n][3] = a[2]
print('>'*50 + ' [%d/%d] '%(len(self.unique),len(self.unique)) + '100%' + '\n', end='\r')
def bestLink(self):
self.anglePairs = dict()
for edge in range(0,len(self.unique)):
p1AngleSet = []
p2AngleSet = []
"""
Instead of computing the angle between the two segments twice, the method calculates
it once and stores in the dictionary for both the keys. So that it does not calculate
the second time because the key is already present in the dictionary.
"""
for link1 in self.unique[edge][2]:
self.anglePairs["%d_%d" % (edge, link1)] = angleBetweenTwoLines(self.unique[edge][0], self.unique[link1][0])
p1AngleSet.append(self.anglePairs["%d_%d" % (edge, link1)])
for link2 in self.unique[edge][3]:
self.anglePairs["%d_%d" % (edge, link2)] = angleBetweenTwoLines(self.unique[edge][0], self.unique[link2][0])
p2AngleSet.append(self.anglePairs["%d_%d" % (edge, link2)])
"""
Among the adjacent segments deflection angle values, check for the maximum value
at both the ends. The segment with the maximum angle is stored in the attributes
to be cross-checked later for before finalising the segments at both the ends.
"""
if len(p1AngleSet)!=0:
val1, idx1 = max((val, idx) for (idx, val) in enumerate(p1AngleSet))
self.unique[edge][4] = self.unique[edge][2][idx1], val1
else:
self.unique[edge][4] = 'DeadEnd'
if len(p2AngleSet)!=0:
val2, idx2 = max((val, idx) for (idx, val) in enumerate(p2AngleSet))
self.unique[edge][5] = self.unique[edge][3][idx2], val2
else:
self.unique[edge][5] = 'DeadEnd'
def crossCheckLinks(self, angleThreshold=0):
global edge, bestP1, bestP2
print("Cross-checking and finalising the links...")
for edge in range(0,len(self.unique)):
# Printing the progress bar
if edge%1000==0:
"""
Dividing by two to have 50 progress steps
Subtracting from 50, and not hundred to have less progress steps
"""
currentProgress = math.floor(100*edge/len(self.unique)/2)
remainingProgress = 50 - currentProgress
print('>'*currentProgress + '-' * remainingProgress + ' [%d/%d] '%(edge,len(self.unique)) + '%d%%'%(currentProgress*2), end='\r')
bestP1 = self.unique[edge][4][0]
bestP2 = self.unique[edge][5][0]
if type(bestP1) == type(1) and \
edge in [self.unique[bestP1][4][0], self.unique[bestP1][5][0]] and \
self.anglePairs["%d_%d" % (edge, bestP1)] > angleThreshold:
self.unique[edge][6] = bestP1
else:
self.unique[edge][6] = 'LineBreak'
if type(bestP2) == type(1) and \
edge in [self.unique[bestP2][4][0], self.unique[bestP2][5][0]] and \
self.anglePairs["%d_%d" % (edge, bestP2)] > angleThreshold:
self.unique[edge][7] = bestP2
else:
self.unique[edge][7] = 'LineBreak'
print('>'*50 + ' [%d/%d] '%(edge+1,len(self.unique)) + '100%' + '\n', end='\r')
def addLine(self, edge, parent=None, child='Undefined'):
if child=='Undefined':
self.mainEdge = len(self.merged)
if not edge in self.assignedList:
if parent==None:
currentid = len(self.merged)
self.merged[currentid] = set()
else:
currentid = self.mainEdge
self.merged[currentid].add(listToTuple(self.unique[edge][0]))
self.assignedList.append(edge)
link1 = self.unique[edge][6]
link2 = self.unique[edge][7]
if type(1) == type(link1):
self.addLine(link1, parent=edge, child=self.mainEdge)
if type(1) == type(link2):
self.addLine(link2, parent=edge, child=self.mainEdge)
def mergeLines(self):
print('Merging Lines...')
self.mergingList = list()
self.merged = list()
iterations = [n for n in range(0,len(self.unique))]
pool = multiprocessing.Pool(multiprocessing.cpu_count())
constantParameterFunction = partial(mergeLinesMultiprocessing, total=len(self.unique), uniqueDict=self.unique)
result = pool.map(constantParameterFunction, iterations)
pool.close()
pool.join()
iterations = None
for tempList in result:
if not tempList in self.mergingList:
self.mergingList.append(tempList)
self.merged.append({listToTuple(self.unique[key][0]) for key in tempList})
self.merged = dict(enumerate(self.merged))
print('>'*50 + ' [%d/%d] '%(len(self.unique),len(self.unique)) + '100%' + '\n', end='\r')
#Export requires 3 brackets, all in list form,
#Whereas it reads in 3 brackets, inner one as tuple
def exportPreMerge(self, outFile=None, unique = True):
if outFile == None:
outFile = "%s_%s_pythonScriptHierarchy.shp" % (time.strftime('%Y%m%d')[2:], self.name)
with shp.Writer(outFile) as w:
fields = ['UniqueID', 'Orientation', 'linksP1', 'linksP2', 'bestP1', 'bestP2', 'P1Final', 'P2Final']
for f in fields:
w.field(f, 'C')
for parts in range(0,len(self.unique)):
lineList = tupleToList(self.unique[parts][0])
w.line([lineList])
w.record(parts, self.unique[parts][1], self.unique[parts][2], self.unique[parts][3], self.unique[parts][4], self.unique[parts][5], self.unique[parts][6], self.unique[parts][7])
self.setProjection(outFile)
def exportStrokes(self, outFile=None):
if outFile == None:
outFile = "%s_%s_pythonScriptHierarchy.shp" % (time.strftime('%Y%m%d')[2:], self.name)
with shp.Writer(outFile) as w:
fields = ['ID', 'nSegments']
for field in fields:
w.field(field, 'C')
for a in self.merged:
w.record(a, len(self.merged[a]))
linelist = tupleToList(list(self.merged[a]))
w.line(linelist)
self.setProjection(outFile)
def setProjection(self, outFile):
outName, ext = os.path.splitext(outFile)
with open(outName + ".prj", "w") as stream:
stream.write(self.projection)
#######################################################
#######################################################
################ ALGORITHM ENDS HERE #############
##### PLEASE PROVIDE THE INPUT FILE DIRECTORY #####
#######################################################
#######################################################
#Set the path to input shapefile/shapefiles
myDir = r"E:\StreetHierarchy\Cities_OSMNX_Boundary\Chennai\edges"
os.chdir(myDir)
import glob
if __name__ == '__main__':
# If you wish to processone file only, change the name in the line below
for file in glob.glob("*.shp"):
t1 = time.time()
print('Processing file..\n%s\n' % (file))
name, ext = os.path.splitext(file)
#Read Shapefile
myStreet = line(file)
#Split lines
tempArray = myStreet.splitLines()
#Create unique ID
iterations = myStreet.uniqueID()
#Compute connectivity table
myStreet.getLinks()
#Find best link at every point for both lines
myStreet.bestLink()
#Cross check best links
#Enter the angle threshold for connectivity here
myStreet.crossCheckLinks(angleThreshold=0)
#Merge finalised links
myStreet.mergeLines()
#Export lines
#If you wish to export the premerge file,
#otherwise, feel free to comment the line below (None exports default name)
myStreet.exportPreMerge(outFile=None)
#Exporting the strokes (None exports default name)
myStreet.exportStrokes(outFile=None)
t2 = time.time()
minutes = math.floor((t2-t1) / 60)
seconds = (t2 - t1) % 60
print("Processing complete in %d minutes %.2f seconds." % (minutes, seconds))
| [
"math.atan",
"math.floor",
"time.strftime",
"time.time",
"shapefile.Writer",
"numpy.array",
"os.path.splitext",
"glob.glob",
"sys.setrecursionlimit",
"os.chdir",
"shapefile.Reader",
"multiprocessing.cpu_count"
] | [((1464, 1492), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (1485, 1492), False, 'import os, sys, math, time, multiprocessing\n'), ((18322, 18337), 'os.chdir', 'os.chdir', (['myDir'], {}), '(myDir)\n', (18330, 18337), False, 'import os, sys, math, time, multiprocessing\n'), ((18472, 18490), 'glob.glob', 'glob.glob', (['"""*.shp"""'], {}), "('*.shp')\n", (18481, 18490), False, 'import glob\n'), ((6525, 6556), 'math.floor', 'math.floor', (['(100 * n / total / 2)'], {}), '(100 * n / total / 2)\n', (6535, 6556), False, 'import os, sys, math, time, multiprocessing\n'), ((7670, 7701), 'math.floor', 'math.floor', (['(100 * n / total / 2)'], {}), '(100 * n / total / 2)\n', (7680, 7701), False, 'import os, sys, math, time, multiprocessing\n'), ((9078, 9102), 'os.path.splitext', 'os.path.splitext', (['inFile'], {}), '(inFile)\n', (9094, 9102), False, 'import os, sys, math, time, multiprocessing\n'), ((9121, 9139), 'shapefile.Reader', 'shp.Reader', (['inFile'], {}), '(inFile)\n', (9131, 9139), True, 'import shapefile as shp\n'), ((10477, 10515), 'numpy.array', 'np.array', (['self.tempArray'], {'dtype': 'object'}), '(self.tempArray, dtype=object)\n', (10485, 10515), True, 'import numpy as np\n'), ((17753, 17778), 'os.path.splitext', 'os.path.splitext', (['outFile'], {}), '(outFile)\n', (17769, 17778), False, 'import os, sys, math, time, multiprocessing\n'), ((18505, 18516), 'time.time', 'time.time', ([], {}), '()\n', (18514, 18516), False, 'import os, sys, math, time, multiprocessing\n'), ((18588, 18610), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (18604, 18610), False, 'import os, sys, math, time, multiprocessing\n'), ((19470, 19481), 'time.time', 'time.time', ([], {}), '()\n', (19479, 19481), False, 'import os, sys, math, time, multiprocessing\n'), ((19509, 19535), 'math.floor', 'math.floor', (['((t2 - t1) / 60)'], {}), '((t2 - t1) / 60)\n', (19519, 19535), False, 'import os, sys, math, time, multiprocessing\n'), ((2663, 2687), 'math.atan', 'math.atan', (['(height / base)'], {}), '(height / base)\n', (2672, 2687), False, 'import os, sys, math, time, multiprocessing\n'), ((10630, 10657), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10655, 10657), False, 'import os, sys, math, time, multiprocessing\n'), ((15585, 15612), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (15610, 15612), False, 'import os, sys, math, time, multiprocessing\n'), ((16563, 16582), 'shapefile.Writer', 'shp.Writer', (['outFile'], {}), '(outFile)\n', (16573, 16582), True, 'import shapefile as shp\n'), ((17329, 17348), 'shapefile.Writer', 'shp.Writer', (['outFile'], {}), '(outFile)\n', (17339, 17348), True, 'import shapefile as shp\n'), ((16510, 16533), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (16523, 16533), False, 'import os, sys, math, time, multiprocessing\n'), ((17276, 17299), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (17289, 17299), False, 'import os, sys, math, time, multiprocessing\n')] |
# -*- coding: utf-8 -*-
"""A collection of analyses possible on gene lists (of HGNC identifiers)."""
from typing import Dict, Iterable, List, Optional, Set, Tuple
import numpy as np
import pandas as pd
from scipy.stats import fisher_exact
from statsmodels.stats.multitest import multipletests
from indra_cogex.client.enrichment.utils import (
get_entity_to_regulators,
get_entity_to_targets,
get_go,
get_reactome,
get_wikipathways,
)
from indra_cogex.client.neo4j_client import Neo4jClient, autoclient
from indra_cogex.client.queries import get_genes_for_go_term
__all__ = [
"go_ora",
"wikipathways_ora",
"reactome_ora",
"indra_downstream_ora",
"indra_upstream_ora",
]
# fmt: off
#: This example list comes from human genes associated with COVID-19
#: (https://bgee.org/?page=top_anat#/result/9bbddda9dea22c21edcada56ad552a35cb8e29a7/)
EXAMPLE_GENE_IDS = [
"613", "1116", "1119", "1697", "7067", "2537", "2734", "29517", "8568", "4910", "4931", "4932", "4962", "4983",
"18873", "5432", "5433", "5981", "16404", "5985", "18358", "6018", "6019", "6021", "6118", "6120", "6122",
"6148", "6374", "6378", "6395", "6727", "14374", "8004", "18669", "8912", "30306", "23785", "9253", "9788",
"10498", "10819", "6769", "11120", "11133", "11432", "11584", "18348", "11849", "28948", "11876", "11878",
"11985", "20820", "12647", "20593", "12713"
]
# fmt: on
def _prepare_hypergeometric_test(
query_gene_set: Set[str],
pathway_gene_set: Set[str],
gene_universe: int,
) -> np.ndarray:
"""Prepare the matrix for hypergeometric test calculations.
Parameters
----------
query_gene_set:
gene set to test against pathway
pathway_gene_set:
pathway gene set
gene_universe:
number of HGNC symbols
Returns
-------
:
A 2x2 matrix
"""
return np.array(
[
[
len(query_gene_set.intersection(pathway_gene_set)),
len(query_gene_set.difference(pathway_gene_set)),
],
[
len(pathway_gene_set.difference(query_gene_set)),
gene_universe - len(pathway_gene_set.union(query_gene_set)),
],
]
)
@autoclient(cache=True)
def count_human_genes(client: Neo4jClient) -> int:
"""Count the number of HGNC genes in neo4j."""
query = f"""\
MATCH (n:BioEntity)
WHERE n.id STARTS WITH 'hgnc'
RETURN count(n) as count
"""
results = client.query_tx(query)
return results[0][0]
def gene_ontology_single_ora(
client: Neo4jClient, go_term: Tuple[str, str], gene_ids: List[str]
) -> float:
"""Get the *p*-value for the Fisher exact test a given GO term.
1. Look up genes associated with GO term or child terms
2. Run ORA and return results
"""
count = count_human_genes(client=client)
go_gene_ids = {
gene.db_id
for gene in get_genes_for_go_term(
client=client, go_term=go_term, include_indirect=True
)
}
table = _prepare_hypergeometric_test(
query_gene_set=set(gene_ids),
pathway_gene_set=go_gene_ids,
gene_universe=count,
)
return fisher_exact(table, alternative="greater")[1]
def _do_ora(
curie_to_hgnc_ids: Dict[Tuple[str, str], Set[str]],
gene_ids: Iterable[str],
count: int,
method: Optional[str] = "fdr_bh",
alpha: Optional[float] = None,
keep_insignificant: bool = True,
) -> pd.DataFrame:
if alpha is None:
alpha = 0.05
query_gene_set = set(gene_ids)
rows = []
for (curie, name), pathway_hgnc_ids in curie_to_hgnc_ids.items():
table = _prepare_hypergeometric_test(
query_gene_set=query_gene_set,
pathway_gene_set=pathway_hgnc_ids,
gene_universe=count,
)
_, pvalue = fisher_exact(table, alternative="greater")
rows.append((curie, name, pvalue))
df = pd.DataFrame(rows, columns=["curie", "name", "p"]).sort_values(
"p", ascending=True
)
df["mlp"] = -np.log10(df["p"])
if method:
correction_results = multipletests(
df["p"],
method=method,
is_sorted=True,
alpha=alpha,
)
df["q"] = correction_results[1]
df["mlq"] = -np.log10(df["q"])
df = df.sort_values("q", ascending=True)
if not keep_insignificant:
df = df[df["q"] < alpha]
return df
def go_ora(client: Neo4jClient, gene_ids: Iterable[str], **kwargs) -> pd.DataFrame:
"""Calculate over-representation on all GO terms."""
count = count_human_genes(client=client)
return _do_ora(get_go(client=client), gene_ids=gene_ids, count=count, **kwargs)
def wikipathways_ora(
client: Neo4jClient, gene_ids: Iterable[str], **kwargs
) -> pd.DataFrame:
"""Calculate over-representation on all WikiPathway pathways."""
count = count_human_genes(client=client)
return _do_ora(
get_wikipathways(client=client), gene_ids=gene_ids, count=count, **kwargs
)
def reactome_ora(
client: Neo4jClient, gene_ids: Iterable[str], **kwargs
) -> pd.DataFrame:
"""Calculate over-representation on all Reactome pathways."""
count = count_human_genes(client=client)
return _do_ora(
get_reactome(client=client), gene_ids=gene_ids, count=count, **kwargs
)
def indra_downstream_ora(
client: Neo4jClient, gene_ids: Iterable[str], **kwargs
) -> pd.DataFrame:
"""
Calculate a p-value for each entity in the INDRA database
based on the genes that are causally upstream of it and how
they compare to the query gene set.
"""
count = count_human_genes(client=client)
return _do_ora(
get_entity_to_regulators(client=client),
gene_ids=gene_ids,
count=count,
**kwargs,
)
def indra_upstream_ora(
client: Neo4jClient, gene_ids: Iterable[str], **kwargs
) -> pd.DataFrame:
"""
Calculate a p-value for each entity in the INDRA database
based on the set of genes that it regulates and how
they compare to the query gene set.
"""
count = count_human_genes(client=client)
return _do_ora(
get_entity_to_targets(client=client), gene_ids=gene_ids, count=count, **kwargs
)
def main():
client = Neo4jClient()
print("\nGO Enrichment\n")
print(
go_ora(client=client, gene_ids=EXAMPLE_GENE_IDS)
.head(15)
.to_markdown(index=False)
)
print("\n## WikiPathways Enrichment\n")
print(
wikipathways_ora(client=client, gene_ids=EXAMPLE_GENE_IDS)
.head(15)
.to_markdown(index=False)
)
print("\n## Reactome Enrichment\n")
print(
reactome_ora(client=client, gene_ids=EXAMPLE_GENE_IDS)
.head(15)
.to_markdown(index=False)
)
print("\n## INDRA Upstream Enrichment\n")
print(
indra_upstream_ora(client=client, gene_ids=EXAMPLE_GENE_IDS)
.head(15)
.to_markdown(index=False)
)
print("\n## INDRA Downstream Enrichment\n")
print(
indra_downstream_ora(client=client, gene_ids=EXAMPLE_GENE_IDS)
.head(15)
.to_markdown(index=False)
)
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"indra_cogex.client.neo4j_client.autoclient",
"indra_cogex.client.enrichment.utils.get_entity_to_targets",
"statsmodels.stats.multitest.multipletests",
"indra_cogex.client.neo4j_client.Neo4jClient",
"scipy.stats.fisher_exact",
"indra_cogex.client.enrichment.utils.get_entity_to_regula... | [((2252, 2274), 'indra_cogex.client.neo4j_client.autoclient', 'autoclient', ([], {'cache': '(True)'}), '(cache=True)\n', (2262, 2274), False, 'from indra_cogex.client.neo4j_client import Neo4jClient, autoclient\n'), ((6325, 6338), 'indra_cogex.client.neo4j_client.Neo4jClient', 'Neo4jClient', ([], {}), '()\n', (6336, 6338), False, 'from indra_cogex.client.neo4j_client import Neo4jClient, autoclient\n'), ((3223, 3265), 'scipy.stats.fisher_exact', 'fisher_exact', (['table'], {'alternative': '"""greater"""'}), "(table, alternative='greater')\n", (3235, 3265), False, 'from scipy.stats import fisher_exact\n'), ((3875, 3917), 'scipy.stats.fisher_exact', 'fisher_exact', (['table'], {'alternative': '"""greater"""'}), "(table, alternative='greater')\n", (3887, 3917), False, 'from scipy.stats import fisher_exact\n'), ((4085, 4102), 'numpy.log10', 'np.log10', (["df['p']"], {}), "(df['p'])\n", (4093, 4102), True, 'import numpy as np\n'), ((4147, 4213), 'statsmodels.stats.multitest.multipletests', 'multipletests', (["df['p']"], {'method': 'method', 'is_sorted': '(True)', 'alpha': 'alpha'}), "(df['p'], method=method, is_sorted=True, alpha=alpha)\n", (4160, 4213), False, 'from statsmodels.stats.multitest import multipletests\n'), ((4686, 4707), 'indra_cogex.client.enrichment.utils.get_go', 'get_go', ([], {'client': 'client'}), '(client=client)\n', (4692, 4707), False, 'from indra_cogex.client.enrichment.utils import get_entity_to_regulators, get_entity_to_targets, get_go, get_reactome, get_wikipathways\n'), ((4995, 5026), 'indra_cogex.client.enrichment.utils.get_wikipathways', 'get_wikipathways', ([], {'client': 'client'}), '(client=client)\n', (5011, 5026), False, 'from indra_cogex.client.enrichment.utils import get_entity_to_regulators, get_entity_to_targets, get_go, get_reactome, get_wikipathways\n'), ((5312, 5339), 'indra_cogex.client.enrichment.utils.get_reactome', 'get_reactome', ([], {'client': 'client'}), '(client=client)\n', (5324, 5339), False, 'from indra_cogex.client.enrichment.utils import get_entity_to_regulators, get_entity_to_targets, get_go, get_reactome, get_wikipathways\n'), ((5749, 5788), 'indra_cogex.client.enrichment.utils.get_entity_to_regulators', 'get_entity_to_regulators', ([], {'client': 'client'}), '(client=client)\n', (5773, 5788), False, 'from indra_cogex.client.enrichment.utils import get_entity_to_regulators, get_entity_to_targets, get_go, get_reactome, get_wikipathways\n'), ((6213, 6249), 'indra_cogex.client.enrichment.utils.get_entity_to_targets', 'get_entity_to_targets', ([], {'client': 'client'}), '(client=client)\n', (6234, 6249), False, 'from indra_cogex.client.enrichment.utils import get_entity_to_regulators, get_entity_to_targets, get_go, get_reactome, get_wikipathways\n'), ((2954, 3030), 'indra_cogex.client.queries.get_genes_for_go_term', 'get_genes_for_go_term', ([], {'client': 'client', 'go_term': 'go_term', 'include_indirect': '(True)'}), '(client=client, go_term=go_term, include_indirect=True)\n', (2975, 3030), False, 'from indra_cogex.client.queries import get_genes_for_go_term\n'), ((3970, 4020), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': "['curie', 'name', 'p']"}), "(rows, columns=['curie', 'name', 'p'])\n", (3982, 4020), True, 'import pandas as pd\n'), ((4334, 4351), 'numpy.log10', 'np.log10', (["df['q']"], {}), "(df['q'])\n", (4342, 4351), True, 'import numpy as np\n')] |
from gym_fem.fem_wrapper import AbaqusWrapper, SimulationError
from gym_fem.helpers import CSVLogger
import gym
from gym.utils import seeding
from gym.core import Wrapper
import numpy as np
from collections.abc import Iterable
from abc import ABC
from pathlib import Path
import configparser
import inspect
import imageio
import time
import logging
import shutil
class FEMEnv(gym.Env, ABC):
""" abstract class for fem-based environments
unset class variables have to be set by specific environment
"""
metadata = {'render.modes': ['human', 'rgb_array']}
# environment-id: used as GYM-environment name and for template-paths and simulation-storage-paths
ENV_ID = None
# readable names for individual actions, used for solver-templates
action_names = []
# Gym Spaces
action_space = None
observation_space = None
# per time-step string templates for the simulation-id (uses self.simulation_parameters)
_simulation_id_templates = None
# FEM-engine used (e.g. "Abaq")
fem_engine = None
# reward given if the simulation is not solvable
_not_solvable_reward = 0
# img used for rgb-, and human rendering when no image is available
_standard_img = np.zeros((731, 914, 3), dtype=np.uint8)
def __init__(self):
# current episode
self.episode = 0
# current time-step
self.time_step = 0
# data returned by step(self, action) for 'FEMLogger' or for special purposes like multiobjective-Learning
# label prefix conventions to enable generic visualization / agents etc.:
# rt: reward-term
# ao: actual observation (without artificial additive noise)
self.info = {}
# parameters filled into the solver-template before simulation and used to set the simulation-id
self.simulation_parameters = {}
# used for restart-simulations
self._root_simulation_id = None
self._base_simulation_id = None
# stochastic environment dynamic has to be derived from this
self._np_random_state = None
self.seed()
self.viewer = None
self.state_img_path = None
# read config
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
general_config = config['general parameters']
self.persistent_simulation = general_config.getboolean('persistent_simulation')
self.visualize = general_config.getboolean('visualize')
storage = general_config.get('simulation_storage', fallback=None)
# determine paths for current environment
if self.persistent_simulation:
if storage in [None, '', 'None']:
self.sim_storage = Path(f'/tmp/gym_fem/{self.ENV_ID}')
else:
self.sim_storage = Path(f'{storage}/{self.ENV_ID}')
else:
i = 0
self.sim_storage = Path(f'{storage}/tmp/{self.ENV_ID}_{i}')
while self.sim_storage.exists():
i += 1
self.sim_storage = Path(f'{storage}/tmp/{self.ENV_ID}_{i}')
logging.info(f'persistence off, creating temporary simulation-storage: {self.sim_storage}')
self.sim_storage.mkdir(exist_ok=True, parents=True)
# read abaqus specific config
if self.fem_engine == 'Abaq':
abaq_params = config['abaqus parameters']
template_folder = Path(__file__).parent.joinpath(f'assets/abaqus_models/{self.ENV_ID}')
solver_path = abaq_params.get('solver_path')
if solver_path in ['', 'None']:
solver_path = None
self.fem_wrapper = AbaqusWrapper(self.sim_storage,
template_folder,
solver_path,
abaq_params.get('abaq_version'),
abaq_params.getint('cpu_kernels', fallback=4),
abaq_params.getint('timeout', fallback=300),
abaq_params.getint('reader_version', fallback=0),
abaq_forcekill=abaq_params.getboolean('abaq_forcekill', fallback=False))
else:
raise NotImplementedError
def step(self, action):
"""
Args:
action (object): an action provided by the environment
Returns:
observation (object): agent's observation of the current environment
reward (float) :
amount of reward returned after previous action
done (boolean):
whether the episode has ended, in which case further step() calls will return undefined results
info (dict):
contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
# update simulation-parameters by the current action (supports multiple-input control cases)
if isinstance(action, Iterable):
for i, a in enumerate(action):
self.simulation_parameters[f'{self.action_names[i]}_{self.time_step}'] = a
else:
self.simulation_parameters[f'{self.action_names[0]}_{self.time_step}'] = action
# read out current process conditions
process_conditions = self._sample_process_conditions()
self.simulation_parameters.update(process_conditions)
self.info.update(process_conditions)
# create simulation-id (for file- and folder-names) from simulation-parameters
simulation_id = self._simulation_id_templates[self.time_step].format(**self.simulation_parameters)
logging.debug(simulation_id)
i = 1
# wait while simulation is locked
while not self.fem_wrapper.request_lock(simulation_id):
logging.warning(f'waiting for lock release {simulation_id}')
time.sleep(2 ** i)
i += 1
try:
# check simulation store for results, if none available: simulate
if self.fem_wrapper.simulation_results_available(simulation_id):
pass
else:
# run simulation
self.fem_wrapper.run_simulation(simulation_id,
self.simulation_parameters,
self.time_step,
self._base_simulation_id)
except SimulationError:
logging.warning(f'{simulation_id} not solvable!')
o = np.zeros(3)
r = 0
done = True
else:
# read FEM-results
fem_results = self.fem_wrapper.read_simulation_results(simulation_id,
root_simulation_id=self._root_simulation_id)
# apply reward function
r = self._apply_reward_function(fem_results)
# apply observation function
o = self._apply_observation_function(fem_results)
# visualize
if self.visualize:
self.state_img_path = self.fem_wrapper.get_state_visualization(simulation_id)
self.render()
done = self._is_done()
if self.time_step == 0:
self._root_simulation_id = simulation_id
self._base_simulation_id = simulation_id
self.time_step += 1
if done:
episode_string = f'{self.episode}: Reward {r}, Trajectory {simulation_id}'
# print(colorize(episode_string, 'green', bold=True))
logging.info(episode_string)
self.fem_wrapper.release_lock(simulation_id)
return o, r, done, self.info
def _apply_reward_function(self, fem_results):
""" to be implemented by special FEMEnv instance (use random numbers seeded in seed())
Args:
fem_results (tuple): tuple of pandas dataframes for element-wise- and node-wise results
Returns:
observation (object): reward for given simulation results
"""
raise NotImplementedError
def _apply_observation_function(self, fem_results):
""" to be implemented by special FEMEnv instance (use random numbers seeded in seed())
Args:
fem_results (tuple): tuple of pandas dataframes for element-wise- and node-wise results
Returns:
observation (object): observation vector for given simulation results
"""
raise NotImplementedError
def _sample_process_conditions(self):
""" to be implemented by special FEMEnv instance (uses random numbers seeded in seed())
Returns:
process-conditions (dict):
dictionary with process-conditions (Keys used have to be identical with abaq-template keys)
"""
raise NotImplementedError
def _is_done(self):
""" to be implemented by special FEMEnv instance, returns True if the current State is a terminal-state
Returns:
done (bool):
dictionary with process-conditions (Keys used have to be identical with abaq-template keys)
"""
raise NotImplementedError
def reset(self):
"""Resets the state of the environment and returns an initial observation.
Returns: observation (object): the initial observation of the
space.
"""
self.simulation_parameters = {}
self.time_step = 0
self.info = {}
self._base_simulation_id = None
self._root_simulation_id = None
self.episode += 1
def render(self, mode='human'):
"""Renders the environment.
- human: render to the current display or terminal seed and
return nothing. Usually for human consumption.
Args:
mode (str): the mode to render with
"""
if self.visualize:
if self.state_img_path is None:
return self._standard_img
img = imageio.imread(self.state_img_path, pilmode='RGB')
if mode == 'rgb_array':
return img
if mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer(maxwidth=1000)
self.viewer.imshow(img)
return self.viewer.isopen
super(FEMEnv, self).render(mode=mode)
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:Solver
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
self._np_random_state, seed = seeding.np_random(seed)
return [seed]
def close(self):
"""Override _close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
if not self.persistent_simulation:
shutil.rmtree(self.sim_storage)
if self.viewer is not None:
self.viewer.close()
self.viewer = None
"""
class PseudoContinuousActions(ActionWrapper):
'''
FEM simulations usually are computationally expensive. For the reuse of previous simulation-results,
the continuous actions are discretized artificially. To enable the application of continuous algorithms,
by using this wrapper the discretization-step is intransparent for the agent.
'''
def __init__(self, env):
assert (type(env) in inspect.getmro(type(env))), \
f"PseudoContinuousActions Wrapper is defined for Environments of type FEMEnv, " \
"given: {inspect.getmro(type(env))}"
logging.warning("PseudoContinuousActions Wrapper overwrites the environments action-space")
env.action_space = spaces.Box(min(env.action_values), max(env.action_values))
super().__init__(env)
def action(self, action):
return (np.abs(self.action_values - action)).argmin()
"""
class FEMCSVLogger(Wrapper):
"""
Specific csv-file logger for fem-environments. Complements the default gym monitor / stats_recorder.
"""
def __init__(self, env, outdir):
assert (type(env) in inspect.getmro(type(env))), \
f"FEMLogger Wrapper is defined for Environments of type FEMEnv, " \
f"given: {inspect.getmro(type(env))}"
self._iteration_start = time.time()
self._accumulated_reward = 0
outdir = Path(outdir)
outdir.mkdir(exist_ok=True, parents=True)
log_file = outdir.joinpath('env_log.csv')
if log_file.exists():
logging.warning(f'{log_file} already existent!')
self._logger = CSVLogger(log_file)
super().__init__(env)
def step(self, action):
o, r, done, info = super().step(action)
self._accumulated_reward += r
try:
for i, a in enumerate(action):
self._logger.set_value(f'action{i}_{self.time_step}', a)
except TypeError:
self._logger.set_value(f'action{self.time_step}', action)
self._logger.set_value(f'reward{self.time_step}', r)
self._logger.set_values(info)
if done:
self._set_episode_log_vals()
self._logger.write_log()
return o, r, done, info
def reset(self, **kwargs):
if self.episode > 0:
self._accumulated_reward = 0
self._iteration_start = time.time()
return self.env.reset(**kwargs)
def close(self):
self._logger.write_log()
return super().close()
def _set_episode_log_vals(self):
runtime = time.time() - self._iteration_start
self._logger.set_value('iteration', int(self.episode))
self._logger.set_value('runtime', runtime)
self._logger.set_value('reward', self._accumulated_reward)
| [
"logging.debug",
"logging.warning",
"gym_fem.helpers.CSVLogger",
"imageio.imread",
"numpy.zeros",
"time.time",
"time.sleep",
"logging.info",
"pathlib.Path",
"shutil.rmtree",
"configparser.ConfigParser",
"gym.envs.classic_control.rendering.SimpleImageViewer",
"gym.utils.seeding.np_random"
] | [((1225, 1264), 'numpy.zeros', 'np.zeros', (['(731, 914, 3)'], {'dtype': 'np.uint8'}), '((731, 914, 3), dtype=np.uint8)\n', (1233, 1264), True, 'import numpy as np\n'), ((2208, 2235), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2233, 2235), False, 'import configparser\n'), ((5769, 5797), 'logging.debug', 'logging.debug', (['simulation_id'], {}), '(simulation_id)\n', (5782, 5797), False, 'import logging\n'), ((11371, 11394), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (11388, 11394), False, 'from gym.utils import seeding\n'), ((13158, 13169), 'time.time', 'time.time', ([], {}), '()\n', (13167, 13169), False, 'import time\n'), ((13225, 13237), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (13229, 13237), False, 'from pathlib import Path\n'), ((13452, 13471), 'gym_fem.helpers.CSVLogger', 'CSVLogger', (['log_file'], {}), '(log_file)\n', (13461, 13471), False, 'from gym_fem.helpers import CSVLogger\n'), ((2940, 2980), 'pathlib.Path', 'Path', (['f"""{storage}/tmp/{self.ENV_ID}_{i}"""'], {}), "(f'{storage}/tmp/{self.ENV_ID}_{i}')\n", (2944, 2980), False, 'from pathlib import Path\n'), ((3137, 3238), 'logging.info', 'logging.info', (['f"""persistence off, creating temporary simulation-storage: {self.sim_storage}"""'], {}), "(\n f'persistence off, creating temporary simulation-storage: {self.sim_storage}'\n )\n", (3149, 3238), False, 'import logging\n'), ((5931, 5991), 'logging.warning', 'logging.warning', (['f"""waiting for lock release {simulation_id}"""'], {}), "(f'waiting for lock release {simulation_id}')\n", (5946, 5991), False, 'import logging\n'), ((6004, 6022), 'time.sleep', 'time.sleep', (['(2 ** i)'], {}), '(2 ** i)\n', (6014, 6022), False, 'import time\n'), ((7720, 7748), 'logging.info', 'logging.info', (['episode_string'], {}), '(episode_string)\n', (7732, 7748), False, 'import logging\n'), ((10137, 10187), 'imageio.imread', 'imageio.imread', (['self.state_img_path'], {'pilmode': '"""RGB"""'}), "(self.state_img_path, pilmode='RGB')\n", (10151, 10187), False, 'import imageio\n'), ((11702, 11733), 'shutil.rmtree', 'shutil.rmtree', (['self.sim_storage'], {}), '(self.sim_storage)\n', (11715, 11733), False, 'import shutil\n'), ((13380, 13428), 'logging.warning', 'logging.warning', (['f"""{log_file} already existent!"""'], {}), "(f'{log_file} already existent!')\n", (13395, 13428), False, 'import logging\n'), ((14210, 14221), 'time.time', 'time.time', ([], {}), '()\n', (14219, 14221), False, 'import time\n'), ((14404, 14415), 'time.time', 'time.time', ([], {}), '()\n', (14413, 14415), False, 'import time\n'), ((2755, 2790), 'pathlib.Path', 'Path', (['f"""/tmp/gym_fem/{self.ENV_ID}"""'], {}), "(f'/tmp/gym_fem/{self.ENV_ID}')\n", (2759, 2790), False, 'from pathlib import Path\n'), ((2844, 2876), 'pathlib.Path', 'Path', (['f"""{storage}/{self.ENV_ID}"""'], {}), "(f'{storage}/{self.ENV_ID}')\n", (2848, 2876), False, 'from pathlib import Path\n'), ((3084, 3124), 'pathlib.Path', 'Path', (['f"""{storage}/tmp/{self.ENV_ID}_{i}"""'], {}), "(f'{storage}/tmp/{self.ENV_ID}_{i}')\n", (3088, 3124), False, 'from pathlib import Path\n'), ((6604, 6653), 'logging.warning', 'logging.warning', (['f"""{simulation_id} not solvable!"""'], {}), "(f'{simulation_id} not solvable!')\n", (6619, 6653), False, 'import logging\n'), ((6670, 6681), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6678, 6681), True, 'import numpy as np\n'), ((10420, 10462), 'gym.envs.classic_control.rendering.SimpleImageViewer', 'rendering.SimpleImageViewer', ([], {'maxwidth': '(1000)'}), '(maxwidth=1000)\n', (10447, 10462), False, 'from gym.envs.classic_control import rendering\n'), ((2256, 2270), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2260, 2270), False, 'from pathlib import Path\n'), ((3453, 3467), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3457, 3467), False, 'from pathlib import Path\n')] |
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various networks for Jax Dopamine agents."""
import time
from typing import Tuple, Union
from dopamine.discrete_domains import atari_lib
from flax import linen as nn
import gin
import jax
import jax.numpy as jnp
import numpy as onp
gin.constant('jax_networks.CARTPOLE_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.CARTPOLE_MIN_VALS',
(-2.4, -5., -onp.pi/12., -onp.pi*2.))
gin.constant('jax_networks.CARTPOLE_MAX_VALS',
(2.4, 5., onp.pi/12., onp.pi*2.))
gin.constant('jax_networks.ACROBOT_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.ACROBOT_MIN_VALS',
(-1., -1., -1., -1., -5., -5.))
gin.constant('jax_networks.ACROBOT_MAX_VALS',
(1., 1., 1., 1., 5., 5.))
gin.constant('jax_networks.LUNAR_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.MOUNTAINCAR_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.MOUNTAINCAR_MIN_VALS', (-1.2, -0.07))
gin.constant('jax_networks.MOUNTAINCAR_MAX_VALS', (0.6, 0.07))
def preprocess_atari_inputs(x):
"""Input normalization for Atari 2600 input frames."""
return x.astype(jnp.float32) / 255.
identity_preprocess_fn = lambda x: x
### DQN Networks ###
@gin.configurable
class NatureDQNNetwork(nn.Module):
"""The convolutional network used to compute the agent's Q-values."""
num_actions: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x):
initializer = nn.initializers.xavier_uniform()
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
q_values = nn.Dense(features=self.num_actions,
kernel_init=initializer)(x)
return atari_lib.DQNNetworkType(q_values)
@gin.configurable
class ClassicControlDQNNetwork(nn.Module):
"""Jax DQN network for classic control environments."""
num_actions: int
num_layers: int = 2
hidden_units: int = 512
min_vals: Union[None, Tuple[float, ...]] = None
max_vals: Union[None, Tuple[float, ...]] = None
inputs_preprocessed: bool = False
def setup(self):
if self.min_vals is not None:
assert self.max_vals is not None
self._min_vals = jnp.array(self.min_vals)
self._max_vals = jnp.array(self.max_vals)
initializer = nn.initializers.xavier_uniform()
self.layers = [
nn.Dense(features=self.hidden_units, kernel_init=initializer)
for _ in range(self.num_layers)]
self.final_layer = nn.Dense(features=self.num_actions,
kernel_init=initializer)
def __call__(self, x):
if not self.inputs_preprocessed:
x = x.astype(jnp.float32)
x = x.reshape((-1)) # flatten
if self.min_vals is not None:
x -= self._min_vals
x /= self._max_vals - self._min_vals
x = 2.0 * x - 1.0 # Rescale in range [-1, 1].
for layer in self.layers:
x = layer(x)
x = nn.relu(x)
q_values = self.final_layer(x)
return atari_lib.DQNNetworkType(q_values)
### Rainbow Networks ###
@gin.configurable
class RainbowNetwork(nn.Module):
"""Convolutional network used to compute the agent's return distributions."""
num_actions: int
num_atoms: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, support):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
@gin.configurable
class ClassicControlRainbowNetwork(nn.Module):
"""Jax Rainbow network for classic control environments."""
num_actions: int
num_atoms: int
num_layers: int = 2
hidden_units: int = 512
min_vals: Union[None, Tuple[float, ...]] = None
max_vals: Union[None, Tuple[float, ...]] = None
inputs_preprocessed: bool = False
def setup(self):
if self.min_vals is not None:
self._min_vals = jnp.array(self.min_vals)
self._max_vals = jnp.array(self.max_vals)
initializer = nn.initializers.xavier_uniform()
self.layers = [
nn.Dense(features=self.hidden_units, kernel_init=initializer)
for _ in range(self.num_layers)]
self.final_layer = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)
def __call__(self, x, support):
if not self.inputs_preprocessed:
x = x.astype(jnp.float32)
x = x.reshape((-1)) # flatten
if self.min_vals is not None:
x -= self._min_vals
x /= self._max_vals - self._min_vals
x = 2.0 * x - 1.0 # Rescale in range [-1, 1].
for layer in self.layers:
x = layer(x)
x = nn.relu(x)
x = self.final_layer(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
### Implicit Quantile Networks ###
class ImplicitQuantileNetwork(nn.Module):
"""The Implicit Quantile Network (Dabney et al., 2018).."""
num_actions: int
quantile_embedding_dim: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, num_quantiles, rng):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
state_vector_length = x.shape[-1]
state_net_tiled = jnp.tile(x, [num_quantiles, 1])
quantiles_shape = [num_quantiles, 1]
quantiles = jax.random.uniform(rng, shape=quantiles_shape)
quantile_net = jnp.tile(quantiles, [1, self.quantile_embedding_dim])
quantile_net = (
jnp.arange(1, self.quantile_embedding_dim + 1, 1).astype(jnp.float32)
* onp.pi
* quantile_net)
quantile_net = jnp.cos(quantile_net)
quantile_net = nn.Dense(features=state_vector_length,
kernel_init=initializer)(quantile_net)
quantile_net = nn.relu(quantile_net)
x = state_net_tiled * quantile_net
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
quantile_values = nn.Dense(features=self.num_actions,
kernel_init=initializer)(x)
return atari_lib.ImplicitQuantileNetworkType(quantile_values, quantiles)
### Quantile Networks ###
@gin.configurable
class QuantileNetwork(nn.Module):
"""Convolutional network used to compute the agent's return quantiles."""
num_actions: int
num_atoms: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.mean(logits, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
### Noisy Nets for FullRainbowNetwork ###
@gin.configurable
class NoisyNetwork(nn.Module):
"""Noisy Network from Fortunato et al. (2018).
Attributes:
rng_key: jax.interpreters.xla.DeviceArray, key for JAX RNG.
eval_mode: bool, whether to turn off noise during evaluation.
"""
rng_key: jax.interpreters.xla.DeviceArray
eval_mode: bool = False
@staticmethod
def sample_noise(key, shape):
return jax.random.normal(key, shape)
@staticmethod
def f(x):
# See (10) and (11) in Fortunato et al. (2018).
return jnp.multiply(jnp.sign(x), jnp.power(jnp.abs(x), 0.5))
@nn.compact
def __call__(self, x, features, bias=True, kernel_init=None):
def mu_init(key, shape):
# Initialization of mean noise parameters (Section 3.2)
low = -1 / jnp.power(x.shape[0], 0.5)
high = 1 / jnp.power(x.shape[0], 0.5)
return jax.random.uniform(key, minval=low, maxval=high, shape=shape)
def sigma_init(key, shape, dtype=jnp.float32): # pylint: disable=unused-argument
# Initialization of sigma noise parameters (Section 3.2)
return jnp.ones(shape, dtype) * (0.1 / onp.sqrt(x.shape[0]))
if self.eval_mode:
# Turn off noise during evaluation
w_epsilon = onp.zeros(shape=(x.shape[0], features), dtype=onp.float32)
b_epsilon = onp.zeros(shape=(features,), dtype=onp.float32)
else:
# Factored gaussian noise in (10) and (11) in Fortunato et al. (2018).
p = NoisyNetwork.sample_noise(self.rng_key, [x.shape[0], 1])
q = NoisyNetwork.sample_noise(self.rng_key, [1, features])
f_p = NoisyNetwork.f(p)
f_q = NoisyNetwork.f(q)
w_epsilon = f_p * f_q
b_epsilon = jnp.squeeze(f_q)
# See (8) and (9) in Fortunato et al. (2018) for output computation.
w_mu = self.param('kernel_mu', mu_init, (x.shape[0], features))
w_sigma = self.param('kernel_sigma', sigma_init, (x.shape[0], features))
w = w_mu + jnp.multiply(w_sigma, w_epsilon)
ret = jnp.matmul(x, w)
b_mu = self.param('bias_mu', mu_init, (features,))
b_sigma = self.param('bias_sigma', sigma_init, (features,))
b = b_mu + jnp.multiply(b_sigma, b_epsilon)
return jnp.where(bias, ret + b, ret)
### FullRainbowNetwork ###
def feature_layer(key, noisy, eval_mode=False):
"""Network feature layer depending on whether noisy_nets are used on or not."""
def noisy_net(x, features):
return NoisyNetwork(rng_key=key, eval_mode=eval_mode)(x, features)
def dense_net(x, features):
return nn.Dense(features, kernel_init=nn.initializers.xavier_uniform())(x)
return noisy_net if noisy else dense_net
@gin.configurable
class FullRainbowNetwork(nn.Module):
"""Jax Rainbow network for Full Rainbow.
Attributes:
num_actions: int, number of actions the agent can take at any state.
num_atoms: int, the number of buckets of the value function distribution.
noisy: bool, Whether to use noisy networks.
dueling: bool, Whether to use dueling network architecture.
distributional: bool, whether to use distributional RL.
"""
num_actions: int
num_atoms: int
noisy: bool = True
dueling: bool = True
distributional: bool = True
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, support, eval_mode=False, key=None):
# Generate a random number generation key if not provided
if key is None:
key = jax.random.PRNGKey(int(time.time() * 1e6))
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
hidden_sizes = [32, 64, 64]
kernel_sizes = [8, 4, 3]
stride_sizes = [4, 2, 1]
for hidden_size, kernel_size, stride_size in zip(hidden_sizes, kernel_sizes,
stride_sizes):
x = nn.Conv(
features=hidden_size,
kernel_size=(kernel_size, kernel_size),
strides=(stride_size, stride_size),
kernel_init=nn.initializers.xavier_uniform())(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
net = feature_layer(key, self.noisy, eval_mode=eval_mode)
x = net(x, features=512) # Single hidden layer of size 512
x = nn.relu(x)
if self.dueling:
adv = net(x, features=self.num_actions * self.num_atoms)
value = net(x, features=self.num_atoms)
adv = adv.reshape((self.num_actions, self.num_atoms))
value = value.reshape((1, self.num_atoms))
logits = value + (adv - (jnp.mean(adv, axis=0, keepdims=True)))
else:
x = net(x, features=self.num_actions * self.num_atoms)
logits = x.reshape((self.num_actions, self.num_atoms))
if self.distributional:
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
q_values = jnp.sum(logits, axis=1) # Sum over all the num_atoms
return atari_lib.DQNNetworkType(q_values)
| [
"dopamine.discrete_domains.atari_lib.RainbowNetworkType",
"dopamine.discrete_domains.atari_lib.ImplicitQuantileNetworkType",
"numpy.sqrt",
"flax.linen.softmax",
"jax.random.uniform",
"jax.numpy.tile",
"jax.numpy.squeeze",
"jax.random.normal",
"jax.numpy.where",
"dopamine.discrete_domains.atari_lib... | [((840, 908), 'gin.constant', 'gin.constant', (['"""jax_networks.CARTPOLE_OBSERVATION_DTYPE"""', 'jnp.float64'], {}), "('jax_networks.CARTPOLE_OBSERVATION_DTYPE', jnp.float64)\n", (852, 908), False, 'import gin\n'), ((909, 1004), 'gin.constant', 'gin.constant', (['"""jax_networks.CARTPOLE_MIN_VALS"""', '(-2.4, -5.0, -onp.pi / 12.0, -onp.pi * 2.0)'], {}), "('jax_networks.CARTPOLE_MIN_VALS', (-2.4, -5.0, -onp.pi / 12.0,\n -onp.pi * 2.0))\n", (921, 1004), False, 'import gin\n'), ((1007, 1099), 'gin.constant', 'gin.constant', (['"""jax_networks.CARTPOLE_MAX_VALS"""', '(2.4, 5.0, onp.pi / 12.0, onp.pi * 2.0)'], {}), "('jax_networks.CARTPOLE_MAX_VALS', (2.4, 5.0, onp.pi / 12.0, \n onp.pi * 2.0))\n", (1019, 1099), False, 'import gin\n'), ((1101, 1168), 'gin.constant', 'gin.constant', (['"""jax_networks.ACROBOT_OBSERVATION_DTYPE"""', 'jnp.float64'], {}), "('jax_networks.ACROBOT_OBSERVATION_DTYPE', jnp.float64)\n", (1113, 1168), False, 'import gin\n'), ((1169, 1256), 'gin.constant', 'gin.constant', (['"""jax_networks.ACROBOT_MIN_VALS"""', '(-1.0, -1.0, -1.0, -1.0, -5.0, -5.0)'], {}), "('jax_networks.ACROBOT_MIN_VALS', (-1.0, -1.0, -1.0, -1.0, -5.0,\n -5.0))\n", (1181, 1256), False, 'import gin\n'), ((1260, 1337), 'gin.constant', 'gin.constant', (['"""jax_networks.ACROBOT_MAX_VALS"""', '(1.0, 1.0, 1.0, 1.0, 5.0, 5.0)'], {}), "('jax_networks.ACROBOT_MAX_VALS', (1.0, 1.0, 1.0, 1.0, 5.0, 5.0))\n", (1272, 1337), False, 'import gin\n'), ((1345, 1410), 'gin.constant', 'gin.constant', (['"""jax_networks.LUNAR_OBSERVATION_DTYPE"""', 'jnp.float64'], {}), "('jax_networks.LUNAR_OBSERVATION_DTYPE', jnp.float64)\n", (1357, 1410), False, 'import gin\n'), ((1411, 1482), 'gin.constant', 'gin.constant', (['"""jax_networks.MOUNTAINCAR_OBSERVATION_DTYPE"""', 'jnp.float64'], {}), "('jax_networks.MOUNTAINCAR_OBSERVATION_DTYPE', jnp.float64)\n", (1423, 1482), False, 'import gin\n'), ((1483, 1547), 'gin.constant', 'gin.constant', (['"""jax_networks.MOUNTAINCAR_MIN_VALS"""', '(-1.2, -0.07)'], {}), "('jax_networks.MOUNTAINCAR_MIN_VALS', (-1.2, -0.07))\n", (1495, 1547), False, 'import gin\n'), ((1548, 1610), 'gin.constant', 'gin.constant', (['"""jax_networks.MOUNTAINCAR_MAX_VALS"""', '(0.6, 0.07)'], {}), "('jax_networks.MOUNTAINCAR_MAX_VALS', (0.6, 0.07))\n", (1560, 1610), False, 'import gin\n'), ((2040, 2072), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (2070, 2072), True, 'from flax import linen as nn\n'), ((2264, 2274), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (2271, 2274), True, 'from flax import linen as nn\n'), ((2392, 2402), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (2399, 2402), True, 'from flax import linen as nn\n'), ((2520, 2530), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (2527, 2530), True, 'from flax import linen as nn\n'), ((2633, 2643), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (2640, 2643), True, 'from flax import linen as nn\n'), ((2758, 2792), 'dopamine.discrete_domains.atari_lib.DQNNetworkType', 'atari_lib.DQNNetworkType', (['q_values'], {}), '(q_values)\n', (2782, 2792), False, 'from dopamine.discrete_domains import atari_lib\n'), ((3324, 3356), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (3354, 3356), True, 'from flax import linen as nn\n'), ((3511, 3571), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.num_actions', 'kernel_init': 'initializer'}), '(features=self.num_actions, kernel_init=initializer)\n', (3519, 3571), True, 'from flax import linen as nn\n'), ((4016, 4050), 'dopamine.discrete_domains.atari_lib.DQNNetworkType', 'atari_lib.DQNNetworkType', (['q_values'], {}), '(q_values)\n', (4040, 4050), False, 'from dopamine.discrete_domains import atari_lib\n'), ((4663, 4673), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (4670, 4673), True, 'from flax import linen as nn\n'), ((4791, 4801), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (4798, 4801), True, 'from flax import linen as nn\n'), ((4919, 4929), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (4926, 4929), True, 'from flax import linen as nn\n'), ((5032, 5042), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (5039, 5042), True, 'from flax import linen as nn\n'), ((5228, 5246), 'flax.linen.softmax', 'nn.softmax', (['logits'], {}), '(logits)\n', (5238, 5246), True, 'from flax import linen as nn\n'), ((5262, 5302), 'jax.numpy.sum', 'jnp.sum', (['(support * probabilities)'], {'axis': '(1)'}), '(support * probabilities, axis=1)\n', (5269, 5302), True, 'import jax.numpy as jnp\n'), ((5314, 5375), 'dopamine.discrete_domains.atari_lib.RainbowNetworkType', 'atari_lib.RainbowNetworkType', (['q_values', 'logits', 'probabilities'], {}), '(q_values, logits, probabilities)\n', (5342, 5375), False, 'from dopamine.discrete_domains import atari_lib\n'), ((5893, 5925), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (5923, 5925), True, 'from flax import linen as nn\n'), ((6080, 6157), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(self.num_actions * self.num_atoms)', 'kernel_init': 'initializer'}), '(features=self.num_actions * self.num_atoms, kernel_init=initializer)\n', (6088, 6157), True, 'from flax import linen as nn\n'), ((6672, 6690), 'flax.linen.softmax', 'nn.softmax', (['logits'], {}), '(logits)\n', (6682, 6690), True, 'from flax import linen as nn\n'), ((6706, 6746), 'jax.numpy.sum', 'jnp.sum', (['(support * probabilities)'], {'axis': '(1)'}), '(support * probabilities, axis=1)\n', (6713, 6746), True, 'import jax.numpy as jnp\n'), ((6758, 6819), 'dopamine.discrete_domains.atari_lib.RainbowNetworkType', 'atari_lib.RainbowNetworkType', (['q_values', 'logits', 'probabilities'], {}), '(q_values, logits, probabilities)\n', (6786, 6819), False, 'from dopamine.discrete_domains import atari_lib\n'), ((7439, 7449), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (7446, 7449), True, 'from flax import linen as nn\n'), ((7567, 7577), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (7574, 7577), True, 'from flax import linen as nn\n'), ((7695, 7705), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (7702, 7705), True, 'from flax import linen as nn\n'), ((7801, 7832), 'jax.numpy.tile', 'jnp.tile', (['x', '[num_quantiles, 1]'], {}), '(x, [num_quantiles, 1])\n', (7809, 7832), True, 'import jax.numpy as jnp\n'), ((7890, 7936), 'jax.random.uniform', 'jax.random.uniform', (['rng'], {'shape': 'quantiles_shape'}), '(rng, shape=quantiles_shape)\n', (7908, 7936), False, 'import jax\n'), ((7956, 8009), 'jax.numpy.tile', 'jnp.tile', (['quantiles', '[1, self.quantile_embedding_dim]'], {}), '(quantiles, [1, self.quantile_embedding_dim])\n', (7964, 8009), True, 'import jax.numpy as jnp\n'), ((8169, 8190), 'jax.numpy.cos', 'jnp.cos', (['quantile_net'], {}), '(quantile_net)\n', (8176, 8190), True, 'import jax.numpy as jnp\n'), ((8335, 8356), 'flax.linen.relu', 'nn.relu', (['quantile_net'], {}), '(quantile_net)\n', (8342, 8356), True, 'from flax import linen as nn\n'), ((8463, 8473), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (8470, 8473), True, 'from flax import linen as nn\n'), ((8602, 8667), 'dopamine.discrete_domains.atari_lib.ImplicitQuantileNetworkType', 'atari_lib.ImplicitQuantileNetworkType', (['quantile_values', 'quantiles'], {}), '(quantile_values, quantiles)\n', (8639, 8667), False, 'from dopamine.discrete_domains import atari_lib\n'), ((9269, 9279), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (9276, 9279), True, 'from flax import linen as nn\n'), ((9397, 9407), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (9404, 9407), True, 'from flax import linen as nn\n'), ((9525, 9535), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (9532, 9535), True, 'from flax import linen as nn\n'), ((9638, 9648), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (9645, 9648), True, 'from flax import linen as nn\n'), ((9834, 9852), 'flax.linen.softmax', 'nn.softmax', (['logits'], {}), '(logits)\n', (9844, 9852), True, 'from flax import linen as nn\n'), ((9868, 9892), 'jax.numpy.mean', 'jnp.mean', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (9876, 9892), True, 'import jax.numpy as jnp\n'), ((9904, 9965), 'dopamine.discrete_domains.atari_lib.RainbowNetworkType', 'atari_lib.RainbowNetworkType', (['q_values', 'logits', 'probabilities'], {}), '(q_values, logits, probabilities)\n', (9932, 9965), False, 'from dopamine.discrete_domains import atari_lib\n'), ((10389, 10418), 'jax.random.normal', 'jax.random.normal', (['key', 'shape'], {}), '(key, shape)\n', (10406, 10418), False, 'import jax\n'), ((11943, 11959), 'jax.numpy.matmul', 'jnp.matmul', (['x', 'w'], {}), '(x, w)\n', (11953, 11959), True, 'import jax.numpy as jnp\n'), ((12139, 12168), 'jax.numpy.where', 'jnp.where', (['bias', '(ret + b)', 'ret'], {}), '(bias, ret + b, ret)\n', (12148, 12168), True, 'import jax.numpy as jnp\n'), ((14096, 14106), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (14103, 14106), True, 'from flax import linen as nn\n'), ((14767, 14790), 'jax.numpy.sum', 'jnp.sum', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (14774, 14790), True, 'import jax.numpy as jnp\n'), ((14832, 14866), 'dopamine.discrete_domains.atari_lib.DQNNetworkType', 'atari_lib.DQNNetworkType', (['q_values'], {}), '(q_values)\n', (14856, 14866), False, 'from dopamine.discrete_domains import atari_lib\n'), ((2155, 2241), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(32)', 'kernel_size': '(8, 8)', 'strides': '(4, 4)', 'kernel_init': 'initializer'}), '(features=32, kernel_size=(8, 8), strides=(4, 4), kernel_init=\n initializer)\n', (2162, 2241), True, 'from flax import linen as nn\n'), ((2283, 2369), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(4, 4)', 'strides': '(2, 2)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(4, 4), strides=(2, 2), kernel_init=\n initializer)\n', (2290, 2369), True, 'from flax import linen as nn\n'), ((2411, 2497), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(3, 3), strides=(1, 1), kernel_init=\n initializer)\n', (2418, 2497), True, 'from flax import linen as nn\n'), ((2574, 2621), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (2582, 2621), True, 'from flax import linen as nn\n'), ((2659, 2719), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.num_actions', 'kernel_init': 'initializer'}), '(features=self.num_actions, kernel_init=initializer)\n', (2667, 2719), True, 'from flax import linen as nn\n'), ((3233, 3257), 'jax.numpy.array', 'jnp.array', (['self.min_vals'], {}), '(self.min_vals)\n', (3242, 3257), True, 'import jax.numpy as jnp\n'), ((3281, 3305), 'jax.numpy.array', 'jnp.array', (['self.max_vals'], {}), '(self.max_vals)\n', (3290, 3305), True, 'import jax.numpy as jnp\n'), ((3385, 3446), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.hidden_units', 'kernel_init': 'initializer'}), '(features=self.hidden_units, kernel_init=initializer)\n', (3393, 3446), True, 'from flax import linen as nn\n'), ((3959, 3969), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (3966, 3969), True, 'from flax import linen as nn\n'), ((4554, 4640), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(32)', 'kernel_size': '(8, 8)', 'strides': '(4, 4)', 'kernel_init': 'initializer'}), '(features=32, kernel_size=(8, 8), strides=(4, 4), kernel_init=\n initializer)\n', (4561, 4640), True, 'from flax import linen as nn\n'), ((4682, 4768), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(4, 4)', 'strides': '(2, 2)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(4, 4), strides=(2, 2), kernel_init=\n initializer)\n', (4689, 4768), True, 'from flax import linen as nn\n'), ((4810, 4896), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(3, 3), strides=(1, 1), kernel_init=\n initializer)\n', (4817, 4896), True, 'from flax import linen as nn\n'), ((4973, 5020), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (4981, 5020), True, 'from flax import linen as nn\n'), ((5051, 5128), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(self.num_actions * self.num_atoms)', 'kernel_init': 'initializer'}), '(features=self.num_actions * self.num_atoms, kernel_init=initializer)\n', (5059, 5128), True, 'from flax import linen as nn\n'), ((5802, 5826), 'jax.numpy.array', 'jnp.array', (['self.min_vals'], {}), '(self.min_vals)\n', (5811, 5826), True, 'import jax.numpy as jnp\n'), ((5850, 5874), 'jax.numpy.array', 'jnp.array', (['self.max_vals'], {}), '(self.max_vals)\n', (5859, 5874), True, 'import jax.numpy as jnp\n'), ((5954, 6015), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.hidden_units', 'kernel_init': 'initializer'}), '(features=self.hidden_units, kernel_init=initializer)\n', (5962, 6015), True, 'from flax import linen as nn\n'), ((6554, 6564), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (6561, 6564), True, 'from flax import linen as nn\n'), ((7330, 7416), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(32)', 'kernel_size': '(8, 8)', 'strides': '(4, 4)', 'kernel_init': 'initializer'}), '(features=32, kernel_size=(8, 8), strides=(4, 4), kernel_init=\n initializer)\n', (7337, 7416), True, 'from flax import linen as nn\n'), ((7458, 7544), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(4, 4)', 'strides': '(2, 2)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(4, 4), strides=(2, 2), kernel_init=\n initializer)\n', (7465, 7544), True, 'from flax import linen as nn\n'), ((7586, 7672), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(3, 3), strides=(1, 1), kernel_init=\n initializer)\n', (7593, 7672), True, 'from flax import linen as nn\n'), ((8210, 8273), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'state_vector_length', 'kernel_init': 'initializer'}), '(features=state_vector_length, kernel_init=initializer)\n', (8218, 8273), True, 'from flax import linen as nn\n'), ((8404, 8451), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (8412, 8451), True, 'from flax import linen as nn\n'), ((8496, 8556), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.num_actions', 'kernel_init': 'initializer'}), '(features=self.num_actions, kernel_init=initializer)\n', (8504, 8556), True, 'from flax import linen as nn\n'), ((9160, 9246), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(32)', 'kernel_size': '(8, 8)', 'strides': '(4, 4)', 'kernel_init': 'initializer'}), '(features=32, kernel_size=(8, 8), strides=(4, 4), kernel_init=\n initializer)\n', (9167, 9246), True, 'from flax import linen as nn\n'), ((9288, 9374), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(4, 4)', 'strides': '(2, 2)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(4, 4), strides=(2, 2), kernel_init=\n initializer)\n', (9295, 9374), True, 'from flax import linen as nn\n'), ((9416, 9502), 'flax.linen.Conv', 'nn.Conv', ([], {'features': '(64)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'kernel_init': 'initializer'}), '(features=64, kernel_size=(3, 3), strides=(1, 1), kernel_init=\n initializer)\n', (9423, 9502), True, 'from flax import linen as nn\n'), ((9579, 9626), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (9587, 9626), True, 'from flax import linen as nn\n'), ((9657, 9734), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(self.num_actions * self.num_atoms)', 'kernel_init': 'initializer'}), '(features=self.num_actions * self.num_atoms, kernel_init=initializer)\n', (9665, 9734), True, 'from flax import linen as nn\n'), ((10524, 10535), 'jax.numpy.sign', 'jnp.sign', (['x'], {}), '(x)\n', (10532, 10535), True, 'import jax.numpy as jnp\n'), ((10837, 10898), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'minval': 'low', 'maxval': 'high', 'shape': 'shape'}), '(key, minval=low, maxval=high, shape=shape)\n', (10855, 10898), False, 'import jax\n'), ((11199, 11257), 'numpy.zeros', 'onp.zeros', ([], {'shape': '(x.shape[0], features)', 'dtype': 'onp.float32'}), '(shape=(x.shape[0], features), dtype=onp.float32)\n', (11208, 11257), True, 'import numpy as onp\n'), ((11276, 11323), 'numpy.zeros', 'onp.zeros', ([], {'shape': '(features,)', 'dtype': 'onp.float32'}), '(shape=(features,), dtype=onp.float32)\n', (11285, 11323), True, 'import numpy as onp\n'), ((11649, 11665), 'jax.numpy.squeeze', 'jnp.squeeze', (['f_q'], {}), '(f_q)\n', (11660, 11665), True, 'import jax.numpy as jnp\n'), ((11900, 11932), 'jax.numpy.multiply', 'jnp.multiply', (['w_sigma', 'w_epsilon'], {}), '(w_sigma, w_epsilon)\n', (11912, 11932), True, 'import jax.numpy as jnp\n'), ((12095, 12127), 'jax.numpy.multiply', 'jnp.multiply', (['b_sigma', 'b_epsilon'], {}), '(b_sigma, b_epsilon)\n', (12107, 12127), True, 'import jax.numpy as jnp\n'), ((13915, 13925), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (13922, 13925), True, 'from flax import linen as nn\n'), ((14600, 14618), 'flax.linen.softmax', 'nn.softmax', (['logits'], {}), '(logits)\n', (14610, 14618), True, 'from flax import linen as nn\n'), ((14636, 14676), 'jax.numpy.sum', 'jnp.sum', (['(support * probabilities)'], {'axis': '(1)'}), '(support * probabilities, axis=1)\n', (14643, 14676), True, 'import jax.numpy as jnp\n'), ((14690, 14751), 'dopamine.discrete_domains.atari_lib.RainbowNetworkType', 'atari_lib.RainbowNetworkType', (['q_values', 'logits', 'probabilities'], {}), '(q_values, logits, probabilities)\n', (14718, 14751), False, 'from dopamine.discrete_domains import atari_lib\n'), ((10547, 10557), 'jax.numpy.abs', 'jnp.abs', (['x'], {}), '(x)\n', (10554, 10557), True, 'import jax.numpy as jnp\n'), ((10753, 10779), 'jax.numpy.power', 'jnp.power', (['x.shape[0]', '(0.5)'], {}), '(x.shape[0], 0.5)\n', (10762, 10779), True, 'import jax.numpy as jnp\n'), ((10797, 10823), 'jax.numpy.power', 'jnp.power', (['x.shape[0]', '(0.5)'], {}), '(x.shape[0], 0.5)\n', (10806, 10823), True, 'import jax.numpy as jnp\n'), ((11062, 11084), 'jax.numpy.ones', 'jnp.ones', (['shape', 'dtype'], {}), '(shape, dtype)\n', (11070, 11084), True, 'import jax.numpy as jnp\n'), ((4402, 4415), 'jax.numpy.sqrt', 'jnp.sqrt', (['(3.0)'], {}), '(3.0)\n', (4410, 4415), True, 'import jax.numpy as jnp\n'), ((7178, 7191), 'jax.numpy.sqrt', 'jnp.sqrt', (['(3.0)'], {}), '(3.0)\n', (7186, 7191), True, 'import jax.numpy as jnp\n'), ((9008, 9021), 'jax.numpy.sqrt', 'jnp.sqrt', (['(3.0)'], {}), '(3.0)\n', (9016, 9021), True, 'import jax.numpy as jnp\n'), ((11094, 11114), 'numpy.sqrt', 'onp.sqrt', (['x.shape[0]'], {}), '(x.shape[0])\n', (11102, 11114), True, 'import numpy as onp\n'), ((12501, 12533), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (12531, 12533), True, 'from flax import linen as nn\n'), ((14378, 14414), 'jax.numpy.mean', 'jnp.mean', (['adv'], {'axis': '(0)', 'keepdims': '(True)'}), '(adv, axis=0, keepdims=True)\n', (14386, 14414), True, 'import jax.numpy as jnp\n'), ((8039, 8088), 'jax.numpy.arange', 'jnp.arange', (['(1)', '(self.quantile_embedding_dim + 1)', '(1)'], {}), '(1, self.quantile_embedding_dim + 1, 1)\n', (8049, 8088), True, 'import jax.numpy as jnp\n'), ((13364, 13375), 'time.time', 'time.time', ([], {}), '()\n', (13373, 13375), False, 'import time\n'), ((13868, 13900), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (13898, 13900), True, 'from flax import linen as nn\n')] |
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
###############################################################################
###############################################################################
###############################################################################
import keras.models
import keras.backend as K
import numpy as np
from . import base
from .. import layers as ilayers
from .. import utils as iutils
from ..utils import keras as kutils
__all__ = [
"WrapperBase",
"AugmentReduceBase",
"GaussianSmoother",
"PathIntegrator",
]
###############################################################################
###############################################################################
###############################################################################
class WrapperBase(base.AnalyzerBase):
def __init__(self, subanalyzer, *args, **kwargs):
self._subanalyzer = subanalyzer
model = None
super(WrapperBase, self).__init__(model,
*args, **kwargs)
def analyze(self, *args, **kwargs):
return self._subanalyzer.analyze(*args, **kwargs)
def _get_state(self):
sa_class_name, sa_state = self._subanalyzer.save()
state = {}
state.update({"subanalyzer_class_name": sa_class_name})
state.update({"subanalyzer_state": sa_state})
return state
@classmethod
def _state_to_kwargs(clazz, state):
sa_class_name = state.pop("subanalyzer_class_name")
sa_state = state.pop("subanalyzer_state")
assert len(state) == 0
subanalyzer = base.AnalyzerBase.load(sa_class_name, sa_state)
kwargs = {"subanalyzer": subanalyzer}
return kwargs
###############################################################################
###############################################################################
###############################################################################
class AugmentReduceBase(WrapperBase):
def __init__(self, subanalyzer, *args, **kwargs):
self._augment_by_n = kwargs.pop("augment_by_n", 2)
super(AugmentReduceBase, self).__init__(subanalyzer,
*args, **kwargs)
self._keras_based_augment_reduce = False
if isinstance(self._subanalyzer, base.AnalyzerNetworkBase):
# Take the keras analyzer model and
# add augment and reduce functionality.
self._keras_based_augment_reduce = True
def compile_analyzer(self):
if not self._keras_based_augment_reduce:
return
self._subanalyzer.compile_analyzer()
if self._subanalyzer._n_debug_output > 0:
raise Exception("No debug output at subanalyzer is supported.")
model = self._subanalyzer._analyzer_model
if None in model.input_shape[1:]:
raise ValueError("The input shape for the model needs "
"to be fully specified (except the batch axis). "
"Model input shape is: %s" % (model.input_shape,))
inputs = model.inputs[:self._subanalyzer._n_data_input]
extra_inputs = model.inputs[self._subanalyzer._n_data_input:]
# todo: check this, index seems not right.
outputs = model.outputs[:self._subanalyzer._n_data_input]
extra_outputs = model.outputs[self._subanalyzer._n_data_input:]
if len(extra_outputs) > 0:
raise Exception("No extra output is allowed "
"with this wrapper.")
new_inputs = iutils.to_list(self._keras_based_augment(inputs))
tmp = iutils.to_list(model(new_inputs))
new_outputs = iutils.to_list(self._keras_based_reduce(tmp))
new_constant_inputs = self._keras_get_constant_inputs()
new_model = keras.models.Model(
inputs=inputs+extra_inputs+new_constant_inputs,
outputs=new_outputs+extra_outputs)
new_model.compile(optimizer="sgd", loss="mse")
self._subanalyzer._analyzer_model = new_model
def analyze(self, X, *args, **kwargs):
if self._keras_based_augment_reduce is True:
if not hasattr(self._subanalyzer, "_analyzer_model"):
self.compile_analyzer()
return self._subanalyzer.analyze(X, *args, **kwargs)
else:
# todo: remove python based code.
# also tests.
raise DeprecationWarning("Not supported anymore.")
return_list = isinstance(X, list)
X = self._python_based_augment(iutils.to_list(X))
ret = self._subanalyzer.analyze(X, *args, **kwargs)
ret = self._python_based_reduce(ret)
if return_list is True:
return ret
else:
return ret[0]
def _python_based_augment(self, X):
return [np.repeat(x, self._augment_by_n, axis=0) for x in X]
def _python_based_reduce(self, X):
tmp = [x.reshape((-1, self._augment_by_n)+x.shape[1:]) for x in X]
tmp = [x.mean(axis=1) for x in tmp]
return tmp
def _keras_get_constant_inputs(self):
return list()
def _keras_based_augment(self, X):
repeat = ilayers.Repeat(self._augment_by_n, axis=0)
return [repeat(x) for x in iutils.to_list(X)]
def _keras_based_reduce(self, X):
X_shape = [K.int_shape(x) for x in iutils.to_list(X)]
reshape = [ilayers.Reshape((-1, self._augment_by_n)+shape[1:])
for shape in X_shape]
mean = ilayers.Mean(axis=1)
return [mean(reshape_x(x)) for x, reshape_x in zip(X, reshape)]
def _get_state(self):
state = super(AugmentReduceBase, self)._get_state()
state.update({"augment_by_n": self._augment_by_n})
return state
@classmethod
def _state_to_kwargs(clazz, state):
augment_by_n = state.pop("augment_by_n")
kwargs = super(AugmentReduceBase, clazz)._state_to_kwargs(state)
kwargs.update({"augment_by_n": augment_by_n})
return kwargs
###############################################################################
###############################################################################
###############################################################################
class GaussianSmoother(AugmentReduceBase):
def __init__(self, subanalyzer, *args, **kwargs):
self._noise_scale = kwargs.pop("noise_scale", 1)
super(GaussianSmoother, self).__init__(subanalyzer,
*args, **kwargs)
def _python_based_augment(self, X):
tmp = super(GaussianSmoother, self)._python_based_augment(X)
ret = [x + np.random.normal(0, self._noise_scale, size=x.shape)
for x in tmp]
return ret
def _keras_based_augment(self, X):
tmp = super(GaussianSmoother, self)._keras_based_augment(X)
noise = ilayers.TestPhaseGaussianNoise(stddev=self._noise_scale)
return [noise(x) for x in tmp]
def _get_state(self):
state = super(GaussianSmoother, self)._get_state()
state.update({"noise_scale": self._noise_scale})
return state
@classmethod
def _state_to_kwargs(clazz, state):
noise_scale = state.pop("noise_scale")
kwargs = super(GaussianSmoother, clazz)._state_to_kwargs(state)
kwargs.update({"noise_scale": noise_scale})
return kwargs
###############################################################################
###############################################################################
###############################################################################
class PathIntegrator(AugmentReduceBase):
def __init__(self, subanalyzer, *args, **kwargs):
steps = kwargs.pop("steps", 16)
self._reference_inputs = kwargs.pop("reference_inputs", 0)
self._keras_constant_inputs = None
super(PathIntegrator, self).__init__(subanalyzer,
*args,
augment_by_n=steps,
**kwargs)
def _python_based_compute_difference(self, X):
if getattr(self, "_difference", None) is None:
reference_inputs = iutils.to_list(self._reference_inputs)
difference = [ri-x for ri, x in zip(reference_inputs, X)]
self._difference = difference
return self._difference
def _python_based_augment(self, X):
difference = self._python_based_compute_difference(X)
tmp = super(PathIntegrator, self)._python_based_augment(X)
tmp = [x.reshape((-1, self._augment_by_n)+x.shape[1:]) for x in tmp]
# Make broadcastable.
difference = [x.reshape((-1, 1)+x.shape[1:]) for x in difference]
alpha = (K.cast_to_floatx(np.arange(self._augment_by_n)) /
self._augment_by_n)
# Make broadcastable.
alpha = [alpha.reshape((1, self._augment_by_n) +
tuple(np.ones_like(x.shape[2:])))
for x in difference]
# Compute path steps.
path_steps = [a * d for a, d in zip(alpha, difference)]
ret = [x+p for x, p in zip(tmp, path_steps)]
ret = [x.reshape((-1,)+x.shape[2:]) for x in ret]
return ret
def _python_based_reduce(self, X):
tmp = super(PathIntegrator, self)._python_based_reduce(X)
# todo: make this part nicer!
difference = self._python_based_compute_difference(X)
del self._difference
return [x*d for x, d in zip(tmp, difference)]
def _keras_set_constant_inputs(self, inputs):
tmp = [K.variable(x) for x in inputs]
self._keras_constant_inputs = [
keras.layers.Input(tensor=x, shape=x.shape[1:])
for x in tmp]
def _keras_get_constant_inputs(self):
return self._keras_constant_inputs
def _keras_based_compute_difference(self, X):
if self._keras_constant_inputs is None:
def none_to_one(tmp):
return [1 if x is None else x for x in tmp]
if isinstance(self._reference_inputs, list):
tmp = [np.broadcast_to(ri, none_to_one(K.int_shape(x)))
for x, ri in zip(X, self._reference_inputs)]
else:
tmp = [np.broadcast_to(self._reference_inputs,
none_to_one(K.int_shape(x)))
for x in X]
self._keras_set_constant_inputs(tmp)
reference_inputs = self._keras_get_constant_inputs()
return [keras.layers.Subtract()([ri, x])
for ri, x in zip(reference_inputs, X)]
def _keras_based_augment(self, X):
tmp = super(PathIntegrator, self)._keras_based_augment(X)
tmp = [ilayers.Reshape((-1, self._augment_by_n)+K.int_shape(x)[1:])(x)
for x in tmp]
difference = self._keras_based_compute_difference(X)
self._keras_difference = difference
# Make broadcastable.
difference = [ilayers.Reshape((-1, 1)+K.int_shape(x)[1:])(x)
for x in difference]
# Compute path steps.
multiply_with_linspace = ilayers.MultiplyWithLinspace(
0, 1,
n=self._augment_by_n,
axis=1)
path_steps = [multiply_with_linspace(d) for d in difference]
ret = [keras.layers.Add()([x, p]) for x, p in zip(tmp, path_steps)]
ret = [ilayers.Reshape((-1,)+K.int_shape(x)[2:])(x) for x in ret]
return ret
def _keras_based_reduce(self, X):
tmp = super(PathIntegrator, self)._keras_based_reduce(X)
difference = self._keras_difference
del self._keras_difference
return [keras.layers.Multiply()([x, d])
for x, d in zip(tmp, difference)]
def _get_state(self):
state = super(PathIntegrator, self)._get_state()
state.update({"reference_inputs": self._reference_inputs})
return state
@classmethod
def _state_to_kwargs(clazz, state):
reference_inputs = state.pop("reference_inputs")
kwargs = super(PathIntegrator, clazz)._state_to_kwargs(state)
kwargs.update({"reference_inputs": reference_inputs})
# We use steps instead.
kwargs.update({"steps": kwargs["augment_by_n"]})
del kwargs["augment_by_n"]
return kwargs
| [
"numpy.ones_like",
"numpy.arange",
"numpy.random.normal",
"builtins.zip",
"keras.backend.int_shape",
"keras.backend.variable",
"numpy.repeat"
] | [((5286, 5326), 'numpy.repeat', 'np.repeat', (['x', 'self._augment_by_n'], {'axis': '(0)'}), '(x, self._augment_by_n, axis=0)\n', (5295, 5326), True, 'import numpy as np\n'), ((5794, 5808), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (5805, 5808), True, 'import keras.backend as K\n'), ((10139, 10152), 'keras.backend.variable', 'K.variable', (['x'], {}), '(x)\n', (10149, 10152), True, 'import keras.backend as K\n'), ((6041, 6056), 'builtins.zip', 'zip', (['X', 'reshape'], {}), '(X, reshape)\n', (6044, 6056), False, 'from builtins import range, map, zip, filter\n'), ((7133, 7185), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self._noise_scale'], {'size': 'x.shape'}), '(0, self._noise_scale, size=x.shape)\n', (7149, 7185), True, 'import numpy as np\n'), ((9298, 9327), 'numpy.arange', 'np.arange', (['self._augment_by_n'], {}), '(self._augment_by_n)\n', (9307, 9327), True, 'import numpy as np\n'), ((9628, 9650), 'builtins.zip', 'zip', (['alpha', 'difference'], {}), '(alpha, difference)\n', (9631, 9650), False, 'from builtins import range, map, zip, filter\n'), ((9684, 9704), 'builtins.zip', 'zip', (['tmp', 'path_steps'], {}), '(tmp, path_steps)\n', (9687, 9704), False, 'from builtins import range, map, zip, filter\n'), ((10051, 10071), 'builtins.zip', 'zip', (['tmp', 'difference'], {}), '(tmp, difference)\n', (10054, 10071), False, 'from builtins import range, map, zip, filter\n'), ((11146, 11170), 'builtins.zip', 'zip', (['reference_inputs', 'X'], {}), '(reference_inputs, X)\n', (11149, 11170), False, 'from builtins import range, map, zip, filter\n'), ((11924, 11944), 'builtins.zip', 'zip', (['tmp', 'path_steps'], {}), '(tmp, path_steps)\n', (11927, 11944), False, 'from builtins import range, map, zip, filter\n'), ((12299, 12319), 'builtins.zip', 'zip', (['tmp', 'difference'], {}), '(tmp, difference)\n', (12302, 12319), False, 'from builtins import range, map, zip, filter\n'), ((8810, 8834), 'builtins.zip', 'zip', (['reference_inputs', 'X'], {}), '(reference_inputs, X)\n', (8813, 8834), False, 'from builtins import range, map, zip, filter\n'), ((9492, 9517), 'numpy.ones_like', 'np.ones_like', (['x.shape[2:]'], {}), '(x.shape[2:])\n', (9504, 9517), True, 'import numpy as np\n'), ((10741, 10771), 'builtins.zip', 'zip', (['X', 'self._reference_inputs'], {}), '(X, self._reference_inputs)\n', (10744, 10771), False, 'from builtins import range, map, zip, filter\n'), ((10688, 10702), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (10699, 10702), True, 'import keras.backend as K\n'), ((10905, 10919), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (10916, 10919), True, 'import keras.backend as K\n'), ((11334, 11348), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (11345, 11348), True, 'import keras.backend as K\n'), ((11568, 11582), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (11579, 11582), True, 'import keras.backend as K\n'), ((11983, 11997), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (11994, 11997), True, 'import keras.backend as K\n')] |
"""Tests for processors utilities."""
import numpy as np
import rimseval.data_io.crd_utils as cu
def test_shot_to_tof_mapper():
"""Map ions per shot to all_tofs array."""
ions_per_shot = np.array([0, 0, 3, 5, 0, 7])
mapper_exp = np.array([[0, 0], [0, 0], [0, 3], [3, 8], [8, 8], [8, 15]])
mapper_rec = cu.shot_to_tof_mapper(ions_per_shot)
np.testing.assert_equal(mapper_rec, mapper_exp)
| [
"rimseval.data_io.crd_utils.shot_to_tof_mapper",
"numpy.array",
"numpy.testing.assert_equal"
] | [((199, 227), 'numpy.array', 'np.array', (['[0, 0, 3, 5, 0, 7]'], {}), '([0, 0, 3, 5, 0, 7])\n', (207, 227), True, 'import numpy as np\n'), ((245, 304), 'numpy.array', 'np.array', (['[[0, 0], [0, 0], [0, 3], [3, 8], [8, 8], [8, 15]]'], {}), '([[0, 0], [0, 0], [0, 3], [3, 8], [8, 8], [8, 15]])\n', (253, 304), True, 'import numpy as np\n'), ((322, 358), 'rimseval.data_io.crd_utils.shot_to_tof_mapper', 'cu.shot_to_tof_mapper', (['ions_per_shot'], {}), '(ions_per_shot)\n', (343, 358), True, 'import rimseval.data_io.crd_utils as cu\n'), ((363, 410), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['mapper_rec', 'mapper_exp'], {}), '(mapper_rec, mapper_exp)\n', (386, 410), True, 'import numpy as np\n')] |
from d3m import container
from d3m.container.numpy import ndarray
from d3m.primitive_interfaces import base
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin
from d3m.primitive_interfaces.base import SamplingCompositionalityMixin
from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.base import Gradients
from d3m.primitive_interfaces.base import CallResult
# Import config file
from primitives_ubc.config_files import config
import os
import time
import math
import random
import importlib
import numpy as np # type: ignore
import pandas as pd # type: ignore
from sklearn.impute import SimpleImputer # type: ignore
from sklearn.preprocessing import OneHotEncoder # type: ignore
from typing import Any, cast, Dict, List, Union, Sequence, Optional, Tuple
__all__ = ('LogisticRegressionPrimitive',)
Inputs = container.DataFrame
Outputs = container.DataFrame
class ImportModules:
"""
Import heavy modules after calling the primitive as not to slow down d3m.index
"""
_weight_files = []
def __init__(self):
self._initialized = False
def _import_lib(self):
if self._initialized:
return
global theano, pm, MultiTrace
global Model, Normal, Bernoulli, NUTS
global invlogit, sample, sample_ppc, find_MAP
theano = importlib.import_module('theano')
pm = importlib.import_module('pymc3')
Model = importlib.import_module('pymc3', 'Model')
Normal = importlib.import_module('pymc3', 'Normal')
Bernoulli = importlib.import_module('pymc3', 'Bernoulli')
NUTS = importlib.import_module('pymc3', 'NUTS')
invlogit = importlib.import_module('pymc3', 'invlogit')
sample = importlib.import_module('pymc3', 'sample')
sample_ppc = importlib.import_module('pymc3', 'sample_ppc')
find_MAP = importlib.import_module('pymc3', 'find_MAP')
MultiTrace = importlib.import_module('pymc3.backends.base', 'MultiTrace')
self._initialized = True
class Params(params.Params):
weights: Optional[Any]
_categories: Optional[Any]
target_names_: Optional[List[str]]
class Hyperparams(hyperparams.Hyperparams):
burnin = hyperparams.Hyperparameter[int](
default=1000,
description='The number of samples to take before storing them',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
mu = hyperparams.Hyperparameter[float](
default=0.0,
description='Mean of Gaussian prior on weights',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
sd = hyperparams.Hyperparameter[float](
default=1.0,
description='Standard deviation of Gaussian prior on weights',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
num_iterations = hyperparams.Hyperparameter[int](
default=1000,
description="Number of iterations to sample the model.",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
class LogisticRegressionPrimitive(ProbabilisticCompositionalityMixin[Inputs, Outputs, Params, Hyperparams],
SamplingCompositionalityMixin[Inputs, Outputs, Params, Hyperparams],
SupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams],
ImportModules):
"""
-------------
Inputs: DataFrame of features of shape: NxM, where N = samples and M = features.
Outputs: DataFrame containing the target column of shape Nx1 or denormalized dataset.
-------------
"""
# Metadata
__author__ = 'UBC DARPA D3M Team, <NAME> <<EMAIL>>'
metadata = metadata_base.PrimitiveMetadata({
"id": "8a992784-3b41-4915-bb47-3ff00e51e60f",
"version": config.VERSION,
"name": "Bayesian Logistic Regression",
"description": "A bayesian approach to Logistic Regression",
"python_path": "d3m.primitives.classification.logistic_regression.UBC",
"primitive_family": metadata_base.PrimitiveFamily.CLASSIFICATION,
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.LOGISTIC_REGRESSION,],
"source": {
"name": config.D3M_PERFORMER_TEAM,
"contact": config.D3M_CONTACT,
"uris": [config.REPOSITORY],
},
"keywords": ['bayesian', 'binary classification'],
"installation": [config.INSTALLATION],
})
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0, _verbose: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
self.hyperparams = hyperparams
self._random_state = random_seed
self._verbose = _verbose
self._training_inputs: Inputs = None
self._training_outputs: Outputs = None
# Intialize ImportModules
ImportModules.__init__(self)
# Import other needed modules
ImportModules._import_lib(self)
# Set parameters
self._mu = self.hyperparams['mu']
self._sd = self.hyperparams['sd']
self._burnin = self.hyperparams['burnin']
self._trace = None # type: MultiTrace
self._model = None # type: Model
# Is the model fit on data
self._fitted = False
def _curate_data(self, training_inputs, training_outputs, get_labels):
# if self._training_inputs is None or self._training_outputs is None:
if training_inputs is None:
raise ValueError("Missing data.")
# Get training data and labels data
try:
feature_columns_1 = training_inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/Attribute')
except:
feature_columns_1 = None
try:
feature_columns_2 = training_inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/FileName')
except:
feature_columns_2 = None
# Remove columns if outputs present in inputs
if len(feature_columns_2) >= 1:
for fc_2 in feature_columns_2:
try:
feature_columns_1.remove(fc_2)
except ValueError:
pass
# Get labels data if present in training input
try:
label_columns = training_inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
except:
label_columns = training_inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
# If no error but no label-columns found, force try SuggestedTarget
if len(label_columns) == 0 or label_columns == None:
label_columns = training_inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
# Remove columns if outputs present in inputs
if len(label_columns) >= 1:
for lbl_c in label_columns:
try:
feature_columns_1.remove(lbl_c)
except ValueError:
pass
# Training Set
feature_columns_1 = [int(fc) for fc in feature_columns_1]
try:
new_XTrain = training_inputs.iloc[:, feature_columns_1]
new_XTrain = self._to_numeric_and_fill_missing_vals(new_XTrain)
new_XTrain = (new_XTrain.to_numpy()).astype(np.float)
except ValueError:
# Most likely Numpy ndarray series
XTrain = training_inputs.iloc[:, feature_columns_1]
XTrain_shape = XTrain.shape[0]
XTrain = ((XTrain.iloc[:, -1]).to_numpy())
# Unpack
new_XTrain = []
for arr in range(XTrain_shape):
new_XTrain.append(XTrain[arr])
new_XTrain = np.array(new_XTrain)
# del to save memory
del XTrain
# Training labels
if get_labels:
if training_outputs is None:
raise ValueError("Missing data.")
# Get label column names
label_name_columns = []
label_name_columns_ = list(training_outputs.columns)
for lbl_c in label_columns:
label_name_columns.append(label_name_columns_[lbl_c])
self.label_name_columns = label_name_columns
# Get labelled dataset
try:
label_columns = training_outputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
except ValueError:
label_columns = training_outputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
# If no error but no label-columns force try SuggestedTarget
if len(label_columns) == 0 or label_columns == None:
label_columns = training_outputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
YTrain = (training_outputs.iloc[:, label_columns])
YTrain, _categories = self._to_numeric_and_fill_missing_vals(YTrain, return_categories=True)
YTrain = (YTrain.to_numpy()).astype(np.int)
self._categories = _categories
return new_XTrain, YTrain, feature_columns_1
return new_XTrain, feature_columns_1
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
inputs, outputs, _ = self._curate_data(training_inputs=inputs, training_outputs=outputs, get_labels=True)
self._training_inputs = inputs
self._training_outputs = outputs
self._new_training_data = True
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
"""
Returns the MAP estimate of p(y | x = inputs; w)
inputs: (num_inputs, D) numpy array
outputs : numpy array of dimension (num_inputs)
"""
if not self._fitted:
raise Exception('Please fit the model before calling produce!')
# Curate data
XTest, feature_columns = self._curate_data(training_inputs=inputs, training_outputs=None, get_labels=False)
w = self._trace['weights']
w = np.squeeze(w, axis=2)
predictions = (np.einsum('kj,ij->i', w, XTest) > 0).astype(int)
predictions_binary = np.eye(2)[predictions]
# Inverse map categories
predictions = self._categories.inverse_transform(predictions_binary)
# Convert from ndarray from DataFrame
predictions = container.DataFrame(predictions, generate_metadata=True)
# Delete columns with path names of nested media files
outputs = inputs.remove_columns(feature_columns)
# Update Metadata for each feature vector column
for col in range(predictions.shape[1]):
col_dict = dict(predictions.metadata.query((metadata_base.ALL_ELEMENTS, col)))
col_dict['structural_type'] = type(1.0)
col_dict['name'] = self.label_name_columns[col]
col_dict["semantic_types"] = ("http://schema.org/Float", "https://metadata.datadrivendiscovery.org/types/PredictedTarget",)
predictions.metadata = predictions.metadata.update((metadata_base.ALL_ELEMENTS, col), col_dict)
# Rename Columns to match label columns
predictions.columns = self.label_name_columns
# Append to outputs
outputs = outputs.append_columns(predictions)
return base.CallResult(outputs)
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult:
"""
Sample a Bayesian Logistic Regression model using NUTS to find
some reasonable weights.
"""
if self._fitted:
return base.CallResult(None)
if self._training_inputs is None or self._training_outputs is None:
raise ValueError("Missing training data.")
# make sure that all training outputs are either 0 or 1
if not ((self._training_outputs == 0) | (self._training_outputs == 1)).all():
raise ValueError("training outputs must be either 0 or 1")
# Set all files
if iterations == None:
_iterations = self.hyperparams['num_iterations']
else:
_iterations = iterations
# training data needs to be a Theano shared variable for
# the later produce code to work
_, n_features = self._training_inputs.shape
self._training_inputs = theano.shared(self._training_inputs)
self._training_outputs = theano.shared(self._training_outputs)
# As the model depends on number of features it has to be here
# and not in __init__
with Model.Model() as model:
weights = Normal.Normal('weights', mu=self._mu, sd=self._sd, shape=(n_features, 1))
p = invlogit.invlogit(pm.math.dot(self._training_inputs, weights))
Bernoulli.Bernoulli('y', p, observed=self._training_outputs)
trace = sample.sample(_iterations,
random_seed=self.random_seed,
trace=self._trace,
tune=self._burnin, progressbar=False)
self._trace = trace
self._model = model
self._fitted = True
return CallResult(None)
def sample(self, *, inputs: Inputs, num_samples: int = 1, timeout: float = None, iterations: int = None) -> CallResult[Sequence[Outputs]]:
# Set shared variables to test data, outputs just need to be the correct shape
self._training_inputs.set_value(inputs)
self._training_outputs.set_value(np.random.binomial(1, 0.5, inputs.shape[0]))
with self._model:
post_pred = sample_ppc.sample_ppc(self._trace,
samples=num_samples,
progressbar=False)
return CallResult(post_pred['y'].astype(int))
def _to_numeric_and_fill_missing_vals(self, dataframe, return_categories=False):
# Missing values
imp1 = SimpleImputer(missing_values='', strategy="most_frequent")
imp2 = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
# Encoder
enc = OneHotEncoder(handle_unknown='ignore')
i = 0
all_types = []
for col in dataframe.columns:
try:
dataframe[[col]] = imp1.fit_transform(dataframe[[col]])
dataframe[[col]] = imp2.fit_transform(dataframe[[col]])
except ValueError:
# Assuimg empty column and replace nan with 0
dataframe[col].fillna(0, inplace=True)
try:
dataframe[col] = pd.to_numeric(dataframe[col])
except ValueError:
# Assuming string
dataframe[col] = np.argmax(enc.fit_transform(dataframe[[col]]).toarray(), axis=1)
# Replace nan with 0
dataframe[col].fillna(0, inplace=True)
i += 1
if return_categories:
return dataframe, enc
return dataframe
def _log_likelihood(self, *, input: Inputs, output: Outputs) -> float:
"""
Provides a likelihood of one output given the inputs and weights
L_(y | x; w) = log(p(y | x; w)) = log(p) if y = 0 else log(1 - p)
where: p = invl(w^T * x)
invl(x) = exp(x) / 1 + exp(x)
"""
logp = self._model.logp
weights = self._trace["weights"]
self._training_inputs.set_value(input)
self._training_outputs.set_value(output)
return float(np.array([logp(dict(y=output, weights=w)) for w in weights]).mean())
def log_likelihoods(self, *, outputs: Outputs, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Sequence[float]]:
"""
Provides a likelihood of the data given the weights
"""
return CallResult(np.array([self._log_likelihood(input=[input], output=[output]) for input, output in zip(inputs, outputs)]))
def get_params(self) -> Params:
w = self._trace['weights']
return Params(weights=w,\
_categories=self._categories,\
target_names_=self.label_name_columns)
def set_params(self, *, params: Params) -> None:
self._trace = {}
self._trace['weights'] = params["weights"]
self._categories = params["_categories"]
self.label_name_columns = params["target_names_"]
self._fitted = True
def __getstate__(self) -> dict:
state = super().__getstate__()
state['random_state'] = self._random_state
return state
def __setstate__(self, state: dict) -> None:
super().__setstate__(state)
self._random_state = state['random_state']
| [
"sklearn.impute.SimpleImputer",
"numpy.random.binomial",
"importlib.import_module",
"sklearn.preprocessing.OneHotEncoder",
"numpy.einsum",
"d3m.metadata.base.PrimitiveMetadata",
"d3m.container.DataFrame",
"d3m.primitive_interfaces.base.CallResult",
"numpy.array",
"numpy.squeeze",
"numpy.eye",
... | [((3975, 4638), 'd3m.metadata.base.PrimitiveMetadata', 'metadata_base.PrimitiveMetadata', (["{'id': '8a992784-3b41-4915-bb47-3ff00e51e60f', 'version': config.VERSION,\n 'name': 'Bayesian Logistic Regression', 'description':\n 'A bayesian approach to Logistic Regression', 'python_path':\n 'd3m.primitives.classification.logistic_regression.UBC',\n 'primitive_family': metadata_base.PrimitiveFamily.CLASSIFICATION,\n 'algorithm_types': [metadata_base.PrimitiveAlgorithmType.\n LOGISTIC_REGRESSION], 'source': {'name': config.D3M_PERFORMER_TEAM,\n 'contact': config.D3M_CONTACT, 'uris': [config.REPOSITORY]}, 'keywords':\n ['bayesian', 'binary classification'], 'installation': [config.\n INSTALLATION]}"], {}), "({'id':\n '8a992784-3b41-4915-bb47-3ff00e51e60f', 'version': config.VERSION,\n 'name': 'Bayesian Logistic Regression', 'description':\n 'A bayesian approach to Logistic Regression', 'python_path':\n 'd3m.primitives.classification.logistic_regression.UBC',\n 'primitive_family': metadata_base.PrimitiveFamily.CLASSIFICATION,\n 'algorithm_types': [metadata_base.PrimitiveAlgorithmType.\n LOGISTIC_REGRESSION], 'source': {'name': config.D3M_PERFORMER_TEAM,\n 'contact': config.D3M_CONTACT, 'uris': [config.REPOSITORY]}, 'keywords':\n ['bayesian', 'binary classification'], 'installation': [config.\n INSTALLATION]})\n", (4006, 4638), True, 'from d3m.metadata import base as metadata_base, hyperparams, params\n'), ((1465, 1498), 'importlib.import_module', 'importlib.import_module', (['"""theano"""'], {}), "('theano')\n", (1488, 1498), False, 'import importlib\n'), ((1520, 1552), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""'], {}), "('pymc3')\n", (1543, 1552), False, 'import importlib\n'), ((1574, 1615), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""Model"""'], {}), "('pymc3', 'Model')\n", (1597, 1615), False, 'import importlib\n'), ((1637, 1679), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""Normal"""'], {}), "('pymc3', 'Normal')\n", (1660, 1679), False, 'import importlib\n'), ((1701, 1746), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""Bernoulli"""'], {}), "('pymc3', 'Bernoulli')\n", (1724, 1746), False, 'import importlib\n'), ((1768, 1808), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""NUTS"""'], {}), "('pymc3', 'NUTS')\n", (1791, 1808), False, 'import importlib\n'), ((1830, 1874), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""invlogit"""'], {}), "('pymc3', 'invlogit')\n", (1853, 1874), False, 'import importlib\n'), ((1896, 1938), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""sample"""'], {}), "('pymc3', 'sample')\n", (1919, 1938), False, 'import importlib\n'), ((1960, 2006), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""sample_ppc"""'], {}), "('pymc3', 'sample_ppc')\n", (1983, 2006), False, 'import importlib\n'), ((2028, 2072), 'importlib.import_module', 'importlib.import_module', (['"""pymc3"""', '"""find_MAP"""'], {}), "('pymc3', 'find_MAP')\n", (2051, 2072), False, 'import importlib\n'), ((2094, 2154), 'importlib.import_module', 'importlib.import_module', (['"""pymc3.backends.base"""', '"""MultiTrace"""'], {}), "('pymc3.backends.base', 'MultiTrace')\n", (2117, 2154), False, 'import importlib\n'), ((10703, 10724), 'numpy.squeeze', 'np.squeeze', (['w'], {'axis': '(2)'}), '(w, axis=2)\n', (10713, 10724), True, 'import numpy as np\n'), ((11030, 11086), 'd3m.container.DataFrame', 'container.DataFrame', (['predictions'], {'generate_metadata': '(True)'}), '(predictions, generate_metadata=True)\n', (11049, 11086), False, 'from d3m import container\n'), ((11981, 12005), 'd3m.primitive_interfaces.base.CallResult', 'base.CallResult', (['outputs'], {}), '(outputs)\n', (11996, 12005), False, 'from d3m.primitive_interfaces import base\n'), ((13832, 13848), 'd3m.primitive_interfaces.base.CallResult', 'CallResult', (['None'], {}), '(None)\n', (13842, 13848), False, 'from d3m.primitive_interfaces.base import CallResult\n'), ((14615, 14673), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': '""""""', 'strategy': '"""most_frequent"""'}), "(missing_values='', strategy='most_frequent')\n", (14628, 14673), False, 'from sklearn.impute import SimpleImputer\n'), ((14689, 14751), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""most_frequent"""'}), "(missing_values=np.nan, strategy='most_frequent')\n", (14702, 14751), False, 'from sklearn.impute import SimpleImputer\n'), ((14785, 14823), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (14798, 14823), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((10827, 10836), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (10833, 10836), True, 'import numpy as np\n'), ((12263, 12284), 'd3m.primitive_interfaces.base.CallResult', 'base.CallResult', (['None'], {}), '(None)\n', (12278, 12284), False, 'from d3m.primitive_interfaces import base\n'), ((14170, 14213), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)', 'inputs.shape[0]'], {}), '(1, 0.5, inputs.shape[0])\n', (14188, 14213), True, 'import numpy as np\n'), ((8216, 8236), 'numpy.array', 'np.array', (['new_XTrain'], {}), '(new_XTrain)\n', (8224, 8236), True, 'import numpy as np\n'), ((15259, 15288), 'pandas.to_numeric', 'pd.to_numeric', (['dataframe[col]'], {}), '(dataframe[col])\n', (15272, 15288), True, 'import pandas as pd\n'), ((10749, 10780), 'numpy.einsum', 'np.einsum', (['"""kj,ij->i"""', 'w', 'XTest'], {}), "('kj,ij->i', w, XTest)\n", (10758, 10780), True, 'import numpy as np\n')] |
import time
from rectgle import Rectgle
from terrain import Terrain
from queue import PriorityQueue
import numpy as np
import mayavi.mlab as ma
import matplotlib.pyplot as plt
from math import tan, sqrt
from calculateur import Astar, Calculateur, Dijkstra
RACINEDEDEUX = sqrt(2)
class Traces():
def __init__(self, terrain: Terrain, points: list, largeurs: list = []):
""" Trace attachée à terrain
points est une liste de (x,y) d'au moins 2 points (départ arrivée)
largeurs est une liste de floats largeurs de longueur len(points)-1 qui représente
la zone d'évolution autour de la droite (points[i] points[i+1]) soit
les 2 droites parallèles à -largeur[i]/2 et + largeur[i]/2
Pour définir la trace à partir de rectangles utiliser set_rects
"""
self.terrain = terrain
self.points = points
largeurmax = RACINEDEDEUX * \
max(self.terrain.ymax-self.terrain.ymin,
self.terrain.xmax-self.terrain.xmin)
if largeurs == []:
self.largeurs = [largeurmax for i in range(
len(self.points)-1)] # par défaut tout le terrain
else:
self.largeurs = largeurs
assert len(self.largeurs) == len(self.points)-1
self.generate_rects()
def set_rects(self, rectangles):
self.rects: list[Rectgle] = rectangles
def generate_rects(self):
"""calcule les rectangles d'évolution de la trace
il y en a len(self.points) -1 = len(largeurs)
le self.rects[i] a la largeur self.largeur[i] et contient self.points[i] et self.points[i+1]
le rectangle est donné par 3 de ses points A,B,D. C (non donné est tel que vec AC=vec AB+vec AD)
on rajoute une marge en longueur de self.cellsize pour avoir l'arrivée tjs dans le rectangle
même après approximation
"""
self.rects = []
for i in range(len(self.points)-1):
self.rects.append(Rectgle(
self.points[i], self.points[i+1], self.largeurs[i], self.terrain.cellsize))
def ijisinrect(self, i, j, rect: Rectgle):
"""Retourne True si le points i,j est dans rect défini par les coordonnées de A,B et D
False sinon"""
x, y = self.terrain.ijtoxy(i, j)
return rect.contains(x, y)
def calculate_trace(self, methode='Dijkstra'):
#Dans l'idée: calculer le chemin entre chaque points de la liste self.points
#sous traiter ce calcul à une une calsse de calculateurs avec des héritages
#genre class Calculateur(): (il faudra lui fournir le terrain pour les conversions)
#puis class Djikstra(Calculateur)
# et dans le calculateur une méthode .generate_path qui renvoie un chemin
# dans le calculateur, il faudrait tenir compte des largeurs avec une méthode qui
# élimine les points hors de ces largeurs (genr en les mettant float('inf') de le terrain
args = 0, 0, 0, 0
# dictionnaire avec les calculateurs
calcdict = {'Dijkstra': Dijkstra, 'Astar': Astar, 'A*': Astar}
if methode not in calcdict:
print('méthode de calcul non supportée méthode supportées:\n')
print(calcdict.keys())
return
print('Calcul de la Trace')
t1 = time.perf_counter()
for i in range(len(self.points)-1):
depart = self.points[i]
arrivee = self.points[i+1]
rectangle = self.rects[i]
terrain = self.terrain
args = depart, arrivee, rectangle, terrain
calculateur = calcdict[methode](*args)
x, y, z = calculateur.calculate_path()
if i == 0:
self.tracexyz = x, y, z
else:
debuttrace = self.tracexyz
oldx, oldy, oldz = debuttrace
self.tracexyz = np.concatenate((oldx, x)),\
np.concatenate((oldy, y)),\
np.concatenate((oldz, z))
t2 = time.perf_counter()
print('Calcul fini en {:.2f}s'.format(t2-t1))
def plot3D(self, figure, Zfactor):
"""affiche la trace sur la figure (mayaplot 3D)
TODO faire un affichage des rectangles:
-Avec une surface calculée à plot avec de l'alpha (lent ?)
-Faire un cadre avec des lignes plot3d
------
| | de alt 0 à 5000 ou zmin à zmax
------
ou
------
|////| de alt 0 à 5000 ou zmin à zmax
------
"""
x, y, z = self.tracexyz
zplot = Zfactor*z.copy()
ma.figure(figure)
ma.plot3d(x, y, zplot, tube_radius=1, color=(1, 0, 1), figure=figure)
ma.text3d(x[0], y[0], zplot[0]+Zfactor*10, 'Depart',
figure=figure, scale=(20, 20, 20), color=(1, 0, 0))
fin = len(x)-1
ma.text3d(x[fin], y[fin], zplot[fin]+Zfactor*10, 'Arrivee',
figure=figure, scale=(20, 20, 20), color=(1, 0, 0))
ma.show()
def plot2D(self, axes):
"""affiche la trace sur la figure (2D)"""
x, y, z = self.tracexyz
axes.plot(x, y, color=(1, 0, 1))
# plt.show()
| [
"mayavi.mlab.text3d",
"mayavi.mlab.figure",
"math.sqrt",
"mayavi.mlab.show",
"time.perf_counter",
"rectgle.Rectgle",
"mayavi.mlab.plot3d",
"numpy.concatenate"
] | [((282, 289), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (286, 289), False, 'from math import tan, sqrt\n'), ((3390, 3409), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3407, 3409), False, 'import time\n'), ((4113, 4132), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4130, 4132), False, 'import time\n'), ((4733, 4750), 'mayavi.mlab.figure', 'ma.figure', (['figure'], {}), '(figure)\n', (4742, 4750), True, 'import mayavi.mlab as ma\n'), ((4760, 4829), 'mayavi.mlab.plot3d', 'ma.plot3d', (['x', 'y', 'zplot'], {'tube_radius': '(1)', 'color': '(1, 0, 1)', 'figure': 'figure'}), '(x, y, zplot, tube_radius=1, color=(1, 0, 1), figure=figure)\n', (4769, 4829), True, 'import mayavi.mlab as ma\n'), ((4839, 4951), 'mayavi.mlab.text3d', 'ma.text3d', (['x[0]', 'y[0]', '(zplot[0] + Zfactor * 10)', '"""Depart"""'], {'figure': 'figure', 'scale': '(20, 20, 20)', 'color': '(1, 0, 0)'}), "(x[0], y[0], zplot[0] + Zfactor * 10, 'Depart', figure=figure,\n scale=(20, 20, 20), color=(1, 0, 0))\n", (4848, 4951), True, 'import mayavi.mlab as ma\n'), ((4996, 5116), 'mayavi.mlab.text3d', 'ma.text3d', (['x[fin]', 'y[fin]', '(zplot[fin] + Zfactor * 10)', '"""Arrivee"""'], {'figure': 'figure', 'scale': '(20, 20, 20)', 'color': '(1, 0, 0)'}), "(x[fin], y[fin], zplot[fin] + Zfactor * 10, 'Arrivee', figure=\n figure, scale=(20, 20, 20), color=(1, 0, 0))\n", (5005, 5116), True, 'import mayavi.mlab as ma\n'), ((5136, 5145), 'mayavi.mlab.show', 'ma.show', ([], {}), '()\n', (5143, 5145), True, 'import mayavi.mlab as ma\n'), ((2039, 2128), 'rectgle.Rectgle', 'Rectgle', (['self.points[i]', 'self.points[i + 1]', 'self.largeurs[i]', 'self.terrain.cellsize'], {}), '(self.points[i], self.points[i + 1], self.largeurs[i], self.terrain.\n cellsize)\n', (2046, 2128), False, 'from rectgle import Rectgle\n'), ((3975, 4000), 'numpy.concatenate', 'np.concatenate', (['(oldx, x)'], {}), '((oldx, x))\n', (3989, 4000), True, 'import numpy as np\n'), ((4024, 4049), 'numpy.concatenate', 'np.concatenate', (['(oldy, y)'], {}), '((oldy, y))\n', (4038, 4049), True, 'import numpy as np\n'), ((4073, 4098), 'numpy.concatenate', 'np.concatenate', (['(oldz, z)'], {}), '((oldz, z))\n', (4087, 4098), True, 'import numpy as np\n')] |
import gzip
import pickle as pkl
import numpy as np
from keras.models import Model
from keras.layers import Dense, Input
from keras.layers import Dropout, GaussianNoise
from keras.layers import Embedding, concatenate
from keras.layers import Convolution1D, GlobalMaxPooling1D #, MaxPooling1D
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
import scikitplot as skplt
import matplotlib.pyplot as plt
# Model Hyperparameters
HYPERPARAMETERS = {
'filter_sizes': 3,
'num_filters': 100,
'max_num_words_in_vocabulary': 20000,
'position_dims': 50
}
# Training parameters
TRAINING_PARAMETERS = {
'batch_size': 64,
'num_epochs': 1,
'dropout_rate': 0,
'std_noise': 0
}
RESULTS = []
def extract_gzip_data(infile):
with gzip.open(infile, 'rb') as file:
data = pkl.load(file)
file.close()
return data
def extract_matrices(infile):
data = extract_gzip_data(infile)
return data['train'], data['test']
def prepare_model(max_sequence_length, embedding_matrix, max_position, n_out):
words_input = Input(shape=(max_sequence_length,), dtype='int32', name='words_input')
words = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1],
weights=[embedding_matrix], trainable=False)(words_input)
distance1_input = Input(shape=(max_sequence_length,), dtype='int32', name='distance1_input')
distance1 = Embedding(max_position, HYPERPARAMETERS['position_dims'])(distance1_input)
distance2_input = Input(shape=(max_sequence_length,), dtype='int32', name='distance2_input')
distance2 = Embedding(max_position, HYPERPARAMETERS['position_dims'])(distance2_input)
output = concatenate([words, distance1, distance2])
output = GaussianNoise(TRAINING_PARAMETERS['std_noise'])(output)
output = Convolution1D(filters=HYPERPARAMETERS['num_filters'],
kernel_size=HYPERPARAMETERS['filter_sizes'],
padding='valid',
activation='relu',
strides=1)(output)
output = GlobalMaxPooling1D()(output)
output = Dropout(TRAINING_PARAMETERS['dropout_rate'])(output)
output = Dense(n_out, activation='softmax')(output)
model = Model(inputs=[words_input, distance1_input, distance2_input], outputs=[output])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def train_model(model, train_data):
j = 1; k = 10; cvscores = []
skf = StratifiedKFold(n_splits=k, shuffle=True, random_state=1337)
sentence_train, yLabel_train, positionMatrix1_train, positionMatrix2_train = train_data
Y = np.argmax(yLabel_train, axis=-1)
for train, test in skf.split(sentence_train, Y):
# Fit the model
RESULTS.append(f'train {k}: {j} / {k}')
model.fit([sentence_train[train],
positionMatrix1_train[train],
positionMatrix2_train[train]],
yLabel_train[train],
epochs=TRAINING_PARAMETERS['num_epochs'],
batch_size=TRAINING_PARAMETERS['batch_size'],
verbose=1)
# evaluate the model
scores = model.evaluate([sentence_train[test], positionMatrix1_train[test], positionMatrix2_train[test]],
yLabel_train[test], verbose=0)
RESULTS.append(f'val_acc: {scores[1] * 100}%') # model.metrics_names[1]
cvscores.append(scores[1] * 100)
j = j + 1
RESULTS.append(f'{np.mean(cvscores)}% (+/-{np.std(cvscores)})')
def main(join_data_file, embeddings_file):
train_data, test_data = extract_matrices(join_data_file)
sentence_train, yLabel_train, positionMatrix1_train, positionMatrix2_train = train_data
sentence_test, yLabel_test, positionMatrix1_test, positionMatrix2_test = test_data
max_position = max(np.max(positionMatrix1_train), np.max(positionMatrix2_train)) + 1
n_out = yLabel_train.shape[1]
max_sequence_length = sentence_train.shape[1]
embedding_matrix = np.load(open(embeddings_file, 'rb'))
# prepare and test model
model = prepare_model(max_sequence_length, embedding_matrix, max_position, n_out)
train_model(model, train_data)
# test model
predicted_yLabel = model.predict(
[sentence_test, positionMatrix1_test, positionMatrix2_test],
batch_size=None,
verbose=0,
steps=None
)
predicted_yLabel = np.argmax(predicted_yLabel, axis=-1)
yLabel_test = np.argmax(yLabel_test, axis=-1)
print(f'{yLabel_test}, {predicted_yLabel}')
RESULTS.append(f'Classification report: \n {classification_report(yLabel_test, predicted_yLabel)}')
return '\n'.join(RESULTS)
| [
"gzip.open",
"keras.layers.Convolution1D",
"numpy.argmax",
"numpy.std",
"keras.layers.Dropout",
"keras.models.Model",
"sklearn.metrics.classification_report",
"numpy.max",
"pickle.load",
"sklearn.model_selection.StratifiedKFold",
"keras.layers.Embedding",
"keras.layers.Dense",
"numpy.mean",
... | [((1112, 1182), 'keras.layers.Input', 'Input', ([], {'shape': '(max_sequence_length,)', 'dtype': '"""int32"""', 'name': '"""words_input"""'}), "(shape=(max_sequence_length,), dtype='int32', name='words_input')\n", (1117, 1182), False, 'from keras.layers import Dense, Input\n'), ((1362, 1436), 'keras.layers.Input', 'Input', ([], {'shape': '(max_sequence_length,)', 'dtype': '"""int32"""', 'name': '"""distance1_input"""'}), "(shape=(max_sequence_length,), dtype='int32', name='distance1_input')\n", (1367, 1436), False, 'from keras.layers import Dense, Input\n'), ((1551, 1625), 'keras.layers.Input', 'Input', ([], {'shape': '(max_sequence_length,)', 'dtype': '"""int32"""', 'name': '"""distance2_input"""'}), "(shape=(max_sequence_length,), dtype='int32', name='distance2_input')\n", (1556, 1625), False, 'from keras.layers import Dense, Input\n'), ((1731, 1773), 'keras.layers.concatenate', 'concatenate', (['[words, distance1, distance2]'], {}), '([words, distance1, distance2])\n', (1742, 1773), False, 'from keras.layers import Embedding, concatenate\n'), ((2295, 2374), 'keras.models.Model', 'Model', ([], {'inputs': '[words_input, distance1_input, distance2_input]', 'outputs': '[output]'}), '(inputs=[words_input, distance1_input, distance2_input], outputs=[output])\n', (2300, 2374), False, 'from keras.models import Model\n'), ((2564, 2624), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k', 'shuffle': '(True)', 'random_state': '(1337)'}), '(n_splits=k, shuffle=True, random_state=1337)\n', (2579, 2624), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2725, 2757), 'numpy.argmax', 'np.argmax', (['yLabel_train'], {'axis': '(-1)'}), '(yLabel_train, axis=-1)\n', (2734, 2757), True, 'import numpy as np\n'), ((4519, 4555), 'numpy.argmax', 'np.argmax', (['predicted_yLabel'], {'axis': '(-1)'}), '(predicted_yLabel, axis=-1)\n', (4528, 4555), True, 'import numpy as np\n'), ((4574, 4605), 'numpy.argmax', 'np.argmax', (['yLabel_test'], {'axis': '(-1)'}), '(yLabel_test, axis=-1)\n', (4583, 4605), True, 'import numpy as np\n'), ((805, 828), 'gzip.open', 'gzip.open', (['infile', '"""rb"""'], {}), "(infile, 'rb')\n", (814, 828), False, 'import gzip\n'), ((853, 867), 'pickle.load', 'pkl.load', (['file'], {}), '(file)\n', (861, 867), True, 'import pickle as pkl\n'), ((1195, 1308), 'keras.layers.Embedding', 'Embedding', (['embedding_matrix.shape[0]', 'embedding_matrix.shape[1]'], {'weights': '[embedding_matrix]', 'trainable': '(False)'}), '(embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[\n embedding_matrix], trainable=False)\n', (1204, 1308), False, 'from keras.layers import Embedding, concatenate\n'), ((1453, 1510), 'keras.layers.Embedding', 'Embedding', (['max_position', "HYPERPARAMETERS['position_dims']"], {}), "(max_position, HYPERPARAMETERS['position_dims'])\n", (1462, 1510), False, 'from keras.layers import Embedding, concatenate\n'), ((1642, 1699), 'keras.layers.Embedding', 'Embedding', (['max_position', "HYPERPARAMETERS['position_dims']"], {}), "(max_position, HYPERPARAMETERS['position_dims'])\n", (1651, 1699), False, 'from keras.layers import Embedding, concatenate\n'), ((1787, 1834), 'keras.layers.GaussianNoise', 'GaussianNoise', (["TRAINING_PARAMETERS['std_noise']"], {}), "(TRAINING_PARAMETERS['std_noise'])\n", (1800, 1834), False, 'from keras.layers import Dropout, GaussianNoise\n'), ((1856, 2010), 'keras.layers.Convolution1D', 'Convolution1D', ([], {'filters': "HYPERPARAMETERS['num_filters']", 'kernel_size': "HYPERPARAMETERS['filter_sizes']", 'padding': '"""valid"""', 'activation': '"""relu"""', 'strides': '(1)'}), "(filters=HYPERPARAMETERS['num_filters'], kernel_size=\n HYPERPARAMETERS['filter_sizes'], padding='valid', activation='relu',\n strides=1)\n", (1869, 2010), False, 'from keras.layers import Convolution1D, GlobalMaxPooling1D\n'), ((2131, 2151), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (2149, 2151), False, 'from keras.layers import Convolution1D, GlobalMaxPooling1D\n'), ((2173, 2217), 'keras.layers.Dropout', 'Dropout', (["TRAINING_PARAMETERS['dropout_rate']"], {}), "(TRAINING_PARAMETERS['dropout_rate'])\n", (2180, 2217), False, 'from keras.layers import Dropout, GaussianNoise\n'), ((2239, 2273), 'keras.layers.Dense', 'Dense', (['n_out'], {'activation': '"""softmax"""'}), "(n_out, activation='softmax')\n", (2244, 2273), False, 'from keras.layers import Dense, Input\n'), ((3939, 3968), 'numpy.max', 'np.max', (['positionMatrix1_train'], {}), '(positionMatrix1_train)\n', (3945, 3968), True, 'import numpy as np\n'), ((3970, 3999), 'numpy.max', 'np.max', (['positionMatrix2_train'], {}), '(positionMatrix2_train)\n', (3976, 3999), True, 'import numpy as np\n'), ((3584, 3601), 'numpy.mean', 'np.mean', (['cvscores'], {}), '(cvscores)\n', (3591, 3601), True, 'import numpy as np\n'), ((3609, 3625), 'numpy.std', 'np.std', (['cvscores'], {}), '(cvscores)\n', (3615, 3625), True, 'import numpy as np\n'), ((4703, 4755), 'sklearn.metrics.classification_report', 'classification_report', (['yLabel_test', 'predicted_yLabel'], {}), '(yLabel_test, predicted_yLabel)\n', (4724, 4755), False, 'from sklearn.metrics import classification_report\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
from common import *
from math import log,pow
#Not sure why, I decided to make another file for this test...
from study import *
traces = ["kth", "kthmorningquad", "kthmorningsingle", "caida18"]
traces = ["kthmorningquad"]
dolegend = True
print("Plotting tail latency according to load")
cores = range(1,16)
fill = False
ext = 'pdf'
suffixes = ['','-fw']
for suffix in suffixes:
for trace in traces:
print("Reading trace %s%s" % (trace,suffix))
data_s = []
for serie in series:
data=[]
for core in cores:
try:
d = np.genfromtxt("drop-%s%s/%s_-_Number_of_processing_cores__%dTHROUGHPUT.csv" % (trace, suffix, serie, core))
data.append(np.array(d,ndmin=1))
#data = data[ (data[:,0] < 50) & (data[:,0] > 5) ]
#print(data)
except Exception as e:
data.append(np.asarray([]))
print("While reading trace %s:" % trace)
print(e)
data_s.append(np.asarray(data))
plt.clf()
y=3.5
if dolegend:
y=4.2
plt.rcParams["figure.figsize"] = (6,y)
fig, ax1 = plt.subplots()
ax2 = ax1
#rcolor = darker(colors[1])
#ax2.set_ylabel('Packets in queues', color=rcolor) # we already handled the x-label with ax1
ax2.set_ylabel('Throughput (Gbps)')
for i,data in enumerate(data_s):
scolor = tcolors[i]
#X = data[:,0] #/ 1000000000
X = cores
Y = [np.median(d) if len(d) > 0 else np.nan for d in data]
perc25 = [np.percentile(d,25) if len(d) > 0 else np.nan for d in data]
perc75 = [np.percentile(d,75) if len(d) > 0 else np.nan for d in data]
if fill:
rects = ax2.plot(X, Y, color=scolor,marker=tmarkers[i],label=labels[i])
rects = ax2.fill_between(X, perc25, perc75, color=list(scolor) + [0.25])
else:
rects = ax2.errorbar(X,Y,yerr=[np.std(d) if len(d) > 0 else np.nan for d in data], label=labels[i], color=scolor, marker=tmarkers[i], ls='none', fillstyle='none')
#t = np.arange(7) * 5
ax2.xaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: "%d" % (x)))
ax2.set_xlabel("Number of processing cores")
plt.grid(True, axis="y")
#ax2.set_yscale("symlog")
#ax2.set_ylim(32,2048)
ax2.set_ylim(0,100 if suffix == '-fw' else None)
#ax2.set_yticks([32,64,128,256,512,1024,2048])
ax2.set_xlim(0.5)
ax2.set_xticks(np.arange(int(max(cores) / 2) + 1) * 2 + 1)
if dolegend:
# get handles
handles, labels = ax2.get_legend_handles_labels()
# remove the errorbars
handles = [h[0] for h in handles]
ax2.legend(handles, labels, ncol=3,bbox_to_anchor=(0.5,1),loc="lower center")
ax2.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, pos: "%d" % (x)))
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig('throughput-%s%s.%s' % (trace,suffix,ext))
plt.clf()
| [
"matplotlib.pyplot.clf",
"numpy.median",
"numpy.std",
"numpy.asarray",
"numpy.genfromtxt",
"numpy.percentile",
"matplotlib.ticker.FuncFormatter",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] | [((3337, 3346), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3344, 3346), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1239), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1237, 1239), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1373), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1371, 1373), True, 'import matplotlib.pyplot as plt\n'), ((2525, 2549), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'axis': '"""y"""'}), "(True, axis='y')\n", (2533, 2549), True, 'import matplotlib.pyplot as plt\n'), ((3275, 3331), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('throughput-%s%s.%s' % (trace, suffix, ext))"], {}), "('throughput-%s%s.%s' % (trace, suffix, ext))\n", (3286, 3331), True, 'import matplotlib.pyplot as plt\n'), ((2414, 2459), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (["(lambda x, pos: '%d' % x)"], {}), "(lambda x, pos: '%d' % x)\n", (2434, 2459), True, 'import matplotlib.ticker as ticker\n'), ((3139, 3184), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (["(lambda x, pos: '%d' % x)"], {}), "(lambda x, pos: '%d' % x)\n", (3159, 3184), True, 'import matplotlib.ticker as ticker\n'), ((1203, 1219), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1213, 1219), True, 'import numpy as np\n'), ((734, 845), 'numpy.genfromtxt', 'np.genfromtxt', (["('drop-%s%s/%s_-_Number_of_processing_cores__%dTHROUGHPUT.csv' % (trace,\n suffix, serie, core))"], {}), "('drop-%s%s/%s_-_Number_of_processing_cores__%dTHROUGHPUT.csv' %\n (trace, suffix, serie, core))\n", (747, 845), True, 'import numpy as np\n'), ((1732, 1744), 'numpy.median', 'np.median', (['d'], {}), '(d)\n', (1741, 1744), True, 'import numpy as np\n'), ((1808, 1828), 'numpy.percentile', 'np.percentile', (['d', '(25)'], {}), '(d, 25)\n', (1821, 1828), True, 'import numpy as np\n'), ((1891, 1911), 'numpy.percentile', 'np.percentile', (['d', '(75)'], {}), '(d, 75)\n', (1904, 1911), True, 'import numpy as np\n'), ((874, 894), 'numpy.array', 'np.array', (['d'], {'ndmin': '(1)'}), '(d, ndmin=1)\n', (882, 894), True, 'import numpy as np\n'), ((1070, 1084), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (1080, 1084), True, 'import numpy as np\n'), ((2215, 2224), 'numpy.std', 'np.std', (['d'], {}), '(d)\n', (2221, 2224), True, 'import numpy as np\n')] |
"""
// Copyright (c) 2008, <NAME> (casey dot duncan at gmail dot com)
// see LICENSE.txt for details
// $Id$
"""
import random
from math import floor
from typing import Optional, Sequence
import numpy as np
from ._tables import GRAD3, GRAD4, M_1_PI, PERM, SIMPLEX
"Native-code simplex noise functions"
# 2D simplex skew factors
F2 = 0.3660254037844386 # 0.5 * (sqrt(3.0) - 1.0)
G2 = 0.21132486540518713 # (3.0 - sqrt(3.0)) / 6.0
def _snoise2_impl(x: float, y: float) -> float:
s = (x + y) * F2
i = floor(x + s)
j = floor(y + s)
t = (i + j) * G2
xx = [0.0, 0.0, 0.0]
yy = [0.0, 0.0, 0.0]
f = [0.0, 0.0, 0.0]
noise = [0.0, 0.0, 0.0]
g = [0, 0, 0]
xx[0] = x - (i - t)
yy[0] = y - (j - t)
i1 = xx[0] > yy[0]
j1 = xx[0] <= yy[0]
xx[2] = xx[0] + G2 * 2.0 - 1.0
yy[2] = yy[0] + G2 * 2.0 - 1.0
xx[1] = xx[0] - i1 + G2
yy[1] = yy[0] - j1 + G2
I = int(i & 255)
J = int(j & 255)
g[0] = PERM[I + PERM[J]] % 12
g[1] = PERM[I + i1 + PERM[J + j1]] % 12
g[2] = PERM[I + 1 + PERM[J + 1]] % 12
for c in range(0, 3):
f[c] = 0.5 - xx[c] * xx[c] - yy[c] * yy[c]
for c in range(0, 3):
if f[c] > 0:
noise[c] = (
f[c] * f[c] * f[c] * f[c] * (GRAD3[g[c]][0] * xx[c] + GRAD3[g[c]][1] * yy[c])
)
return (noise[0] + noise[1] + noise[2]) * 70.0
def dot3(v1, v2):
return v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]
def ASSIGN(a, v0, v1, v2):
a[0] = v0
a[1] = v1
a[2] = v2
F3 = 1.0 / 3.0
G3 = 1.0 / 6.0
def _snoise3_impl(x: float, y: float, z: float) -> float:
# int c, o1[3], o2[3], g[4], I, J, K;
o1 = [0, 0, 0]
o2 = [0, 0, 0]
g = [0, 0, 0, 0]
f = [0.0] * 4
noise = [0.0] * 4
s = (x + y + z) * F3
i = np.floor(x + s)
j = np.floor(y + s)
k = np.floor(z + s)
t = (i + j + k) * G3
pos = np.zeros((4, 3), dtype="float")
# float pos[4][3];
pos[0][0] = x - (i - t)
pos[0][1] = y - (j - t)
pos[0][2] = z - (k - t)
if pos[0][0] >= pos[0][1]:
if pos[0][1] >= pos[0][2]:
ASSIGN(o1, 1, 0, 0)
ASSIGN(o2, 1, 1, 0)
elif pos[0][0] >= pos[0][2]:
ASSIGN(o1, 1, 0, 0)
ASSIGN(o2, 1, 0, 1)
else:
ASSIGN(o1, 0, 0, 1)
ASSIGN(o2, 1, 0, 1)
else:
if pos[0][1] < pos[0][2]:
ASSIGN(o1, 0, 0, 1)
ASSIGN(o2, 0, 1, 1)
elif pos[0][0] < pos[0][2]:
ASSIGN(o1, 0, 1, 0)
ASSIGN(o2, 0, 1, 1)
else:
ASSIGN(o1, 0, 1, 0)
ASSIGN(o2, 1, 1, 0)
for c in range(0, 3):
pos[3][c] = pos[0][c] - 1.0 + 3.0 * G3
pos[2][c] = pos[0][c] - o2[c] + 2.0 * G3
pos[1][c] = pos[0][c] - o1[c] + G3
I = int(i & 255)
J = int(j & 255)
K = int(k & 255)
g[0] = PERM[I + PERM[J + PERM[K]]] % 12
g[1] = PERM[I + o1[0] + PERM[J + o1[1] + PERM[o1[2] + K]]] % 12
g[2] = PERM[I + o2[0] + PERM[J + o2[1] + PERM[o2[2] + K]]] % 12
g[3] = PERM[I + 1 + PERM[J + 1 + PERM[K + 1]]] % 12
for c in range(0, 4):
f[c] = 0.6 - pos[c][0] * pos[c][0] - pos[c][1] * pos[c][1] - pos[c][2] * pos[c][2]
for c in range(0, 4):
if f[c] > 0:
noise[c] = f[c] * f[c] * f[c] * f[c] * dot3(pos[c], GRAD3[g[c]])
return (noise[0] + noise[1] + noise[2] + noise[3]) * 32.0
def _fbm_noise3_impl(
x: float, y: float, z: float, octaves: int, persistence: float, lacunarity: float
) -> float:
freq = 1.0
amp = 1.0
max = 1.0
total = _snoise3_impl(x, y, z)
for i in range(1, octaves):
freq *= lacunarity
amp *= persistence
max += amp
total += _snoise3_impl(x * freq, y * freq, z * freq) * amp
return total / max
def _dot4(v1, x, y, z, w):
return v1[0] * x + v1[1] * y + v1[2] * z + v1[3] * w
F4 = 0.30901699437494745 # /* (sqrt(5.0) - 1.0) / 4.0 */
G4 = 0.1381966011250105 # /* (5.0 - sqrt(5.0)) / 20.0 */
def _noise4_impl(x: float, y: float, z: float, w: float) -> float:
noise = [0.0] * 5
s = (x + y + z + w) * F4
i = np.floor(x + s)
j = np.floor(y + s)
k = np.floor(z + s)
l = np.floor(w + s)
t = (i + j + k + l) * G4
x0 = x - (i - t)
y0 = y - (j - t)
z0 = z - (k - t)
w0 = w - (l - t)
c = (
(x0 > y0) * 32
+ (x0 > z0) * 16
+ (y0 > z0) * 8
+ (x0 > w0) * 4
+ (y0 > w0) * 2
+ (z0 > w0)
)
i1 = SIMPLEX[c][0] >= 3
j1 = SIMPLEX[c][1] >= 3
k1 = SIMPLEX[c][2] >= 3
l1 = SIMPLEX[c][3] >= 3
i2 = SIMPLEX[c][0] >= 2
j2 = SIMPLEX[c][1] >= 2
k2 = SIMPLEX[c][2] >= 2
l2 = SIMPLEX[c][3] >= 2
i3 = SIMPLEX[c][0] >= 1
j3 = SIMPLEX[c][1] >= 1
k3 = SIMPLEX[c][2] >= 1
l3 = SIMPLEX[c][3] >= 1
x1 = x0 - i1 + G4
y1 = y0 - j1 + G4
z1 = z0 - k1 + G4
w1 = w0 - l1 + G4
x2 = x0 - i2 + 2.0 * G4
y2 = y0 - j2 + 2.0 * G4
z2 = z0 - k2 + 2.0 * G4
w2 = w0 - l2 + 2.0 * G4
x3 = x0 - i3 + 3.0 * G4
y3 = y0 - j3 + 3.0 * G4
z3 = z0 - k3 + 3.0 * G4
w3 = w0 - l3 + 3.0 * G4
x4 = x0 - 1.0 + 4.0 * G4
y4 = y0 - 1.0 + 4.0 * G4
z4 = z0 - 1.0 + 4.0 * G4
w4 = w0 - 1.0 + 4.0 * G4
I = int(i & 255)
J = int(j & 255)
K = int(k & 255)
L = int(l & 255)
gi0 = PERM[I + PERM[J + PERM[K + PERM[L]]]] & 0x1F
gi1 = PERM[I + i1 + PERM[J + j1 + PERM[K + k1 + PERM[L + l1]]]] & 0x1F
gi2 = PERM[I + i2 + PERM[J + j2 + PERM[K + k2 + PERM[L + l2]]]] & 0x1F
gi3 = PERM[I + i3 + PERM[J + j3 + PERM[K + k3 + PERM[L + l3]]]] & 0x1F
gi4 = PERM[I + 1 + PERM[J + 1 + PERM[K + 1 + PERM[L + 1]]]] & 0x1F
# float t0, t1, t2, t3, t4;
t0 = 0.6 - x0 * x0 - y0 * y0 - z0 * z0 - w0 * w0
if t0 >= 0.0:
t0 *= t0
noise[0] = t0 * t0 * _dot4(GRAD4[gi0], x0, y0, z0, w0)
t1 = 0.6 - x1 * x1 - y1 * y1 - z1 * z1 - w1 * w1
if t1 >= 0.0:
t1 *= t1
noise[1] = t1 * t1 * _dot4(GRAD4[gi1], x1, y1, z1, w1)
t2 = 0.6 - x2 * x2 - y2 * y2 - z2 * z2 - w2 * w2
if t2 >= 0.0:
t2 *= t2
noise[2] = t2 * t2 * _dot4(GRAD4[gi2], x2, y2, z2, w2)
t3 = 0.6 - x3 * x3 - y3 * y3 - z3 * z3 - w3 * w3
if t3 >= 0.0:
t3 *= t3
noise[3] = t3 * t3 * _dot4(GRAD4[gi3], x3, y3, z3, w3)
t4 = 0.6 - x4 * x4 - y4 * y4 - z4 * z4 - w4 * w4
if t4 >= 0.0:
t4 *= t4
noise[4] = t4 * t4 * _dot4(GRAD4[gi4], x4, y4, z4, w4)
return 27.0 * (noise[0] + noise[1] + noise[2] + noise[3] + noise[4])
def _fbm_noise4_impl(
x: float, y: float, z: float, w: float, octaves: int, persistence: float, lacunarity: float
) -> float:
freq = 1.0
amp = 1.0
max = 1.0
total = _noise4_impl(x, y, z, w)
for i in range(1, octaves):
freq *= lacunarity
amp *= persistence
max += amp
total += _noise4_impl(x * freq, y * freq, z * freq, w * freq) * amp
return total / max
class SNoise:
def __init__(self, seed: Optional[int] = None):
if seed is not None:
self.seed(seed)
else:
self._set_perm(PERM)
def seed(self, s: int) -> None:
perm = list(PERM)
random.Random(s).shuffle(perm)
self._set_perm(perm)
def _set_perm(self, perm: Sequence[int]) -> None:
self._perm = np.array(list(perm) * 2, dtype=np.uint8)
def noise2(
self,
x: float,
y: float,
octaves: int = 1,
persistence: float = 0.5,
lacunarity: float = 2.0,
repeatx: Optional[int] = None,
repeaty: Optional[int] = None,
base: int = 0,
) -> float:
"""
noise2(x, y, octaves=1, persistence=0.5, lacunarity=2.0, repeatx=None, repeaty=None, base=0.0)
return simplex noise value for specified 2D coordinate.
octaves -- specifies the number of passes, defaults to 1 (simple noise).
persistence -- specifies the amplitude of each successive octave relative
to the one below it. Defaults to 0.5 (each higher octave's amplitude
is halved). Note the amplitude of the first pass is always 1.0.
lacunarity -- specifies the frequency of each successive octave relative
"to the one below it, similar to persistence. Defaults to 2.0.
repeatx, repeaty -- specifies the interval along each axis when
"the noise values repeat. This can be used as the tile size for creating
"tileable textures
base -- specifies a fixed offset for the noise coordinates. Useful for
generating different noise textures with the same repeat interval
"""
z = 0.0
if octaves <= 0:
raise ValueError("Expected octaves value > 0")
if repeatx is None and repeaty is None:
# Flat noise, no tiling
freq = 1.0
amp = 1.0
max = 1.0
total = _snoise2_impl(x + z, y + z)
for i in range(1, octaves):
freq *= lacunarity
amp *= persistence
max += amp
total += _snoise2_impl(x * freq + z, y * freq + z) * amp
return total / max
else: # Tiled noise
w = z
if repeaty is not None:
yf = y * 2.0 / repeaty
yr = repeaty * M_1_PI * 0.5
vy = np.sin(yf) # originally fast_sin
vyz = np.cos(yf) # originally fast_cos
y = vy * yr
w += vyz * yr
if repeatx is None:
return _fbm_noise3_impl(x, y, w, octaves, persistence, lacunarity)
if repeatx is not None:
xf = x * 2.0 / repeatx
xr = repeatx * M_1_PI * 0.5
vx = np.sin(xf)
vxz = np.cos(xf)
x = vx * xr
z += vxz * xr
if repeaty is None:
return _fbm_noise3_impl(x, y, z, octaves, persistence, lacunarity)
return _fbm_noise4_impl(x, y, z, w, octaves, persistence, lacunarity)
def noise3(
self,
x: float,
y: float,
z: float,
octaves: int = 1,
persistence: float = 0.5,
lacunarity: float = 2.0,
) -> float:
"""
noise3(x, y, z, octaves=1, persistence=0.5, lacunarity=2.0) return simplex noise value for
specified 3D coordinate
octaves -- specifies the number of passes, defaults to 1 (simple noise).
persistence -- specifies the amplitude of each successive octave relative
to the one below it. Defaults to 0.5 (each higher octave's amplitude
is halved). Note the amplitude of the first pass is always 1.0.
lacunarity -- specifies the frequency of each successive octave relative
to the one below it, similar to persistence. Defaults to 2.0.
"""
if octaves == 1:
# Single octave, return simple noise
return _snoise3_impl(x, y, z)
elif octaves > 1:
return _fbm_noise3_impl(x, y, z, octaves, persistence, lacunarity)
else:
raise ValueError("Expected octaves value > 0")
def noise4(
self,
x: float,
y: float,
z: float,
w: float,
octaves: int = 1,
persistence: float = 0.5,
lacunarity: float = 2.0,
) -> float:
"""
noise4(x, y, z, w, octaves=1, persistence=0.5, lacunarity=2.0) return simplex noise value for
specified 4D coordinate
octaves -- specifies the number of passes, defaults to 1 (simple noise).
persistence -- specifies the amplitude of each successive octave relative
to the one below it. Defaults to 0.5 (each higher octave's amplitude
is halved). Note the amplitude of the first pass is always 1.0.
lacunarity -- specifies the frequency of each successive octave relative
to the one below it, similar to persistence. Defaults to 2.0.
"""
if octaves == 1:
# Single octave, return simple noise
return _noise4_impl(x, y, z, w)
elif octaves > 1:
return _fbm_noise4_impl(x, y, z, w, octaves, persistence, lacunarity)
else:
raise ValueError("Expected octaves value > 0")
| [
"random.Random",
"numpy.floor",
"numpy.zeros",
"math.floor",
"numpy.sin",
"numpy.cos"
] | [((514, 526), 'math.floor', 'floor', (['(x + s)'], {}), '(x + s)\n', (519, 526), False, 'from math import floor\n'), ((535, 547), 'math.floor', 'floor', (['(y + s)'], {}), '(y + s)\n', (540, 547), False, 'from math import floor\n'), ((1804, 1819), 'numpy.floor', 'np.floor', (['(x + s)'], {}), '(x + s)\n', (1812, 1819), True, 'import numpy as np\n'), ((1828, 1843), 'numpy.floor', 'np.floor', (['(y + s)'], {}), '(y + s)\n', (1836, 1843), True, 'import numpy as np\n'), ((1852, 1867), 'numpy.floor', 'np.floor', (['(z + s)'], {}), '(z + s)\n', (1860, 1867), True, 'import numpy as np\n'), ((1904, 1935), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {'dtype': '"""float"""'}), "((4, 3), dtype='float')\n", (1912, 1935), True, 'import numpy as np\n'), ((4140, 4155), 'numpy.floor', 'np.floor', (['(x + s)'], {}), '(x + s)\n', (4148, 4155), True, 'import numpy as np\n'), ((4164, 4179), 'numpy.floor', 'np.floor', (['(y + s)'], {}), '(y + s)\n', (4172, 4179), True, 'import numpy as np\n'), ((4188, 4203), 'numpy.floor', 'np.floor', (['(z + s)'], {}), '(z + s)\n', (4196, 4203), True, 'import numpy as np\n'), ((4212, 4227), 'numpy.floor', 'np.floor', (['(w + s)'], {}), '(w + s)\n', (4220, 4227), True, 'import numpy as np\n'), ((7227, 7243), 'random.Random', 'random.Random', (['s'], {}), '(s)\n', (7240, 7243), False, 'import random\n'), ((9405, 9415), 'numpy.sin', 'np.sin', (['yf'], {}), '(yf)\n', (9411, 9415), True, 'import numpy as np\n'), ((9461, 9471), 'numpy.cos', 'np.cos', (['yf'], {}), '(yf)\n', (9467, 9471), True, 'import numpy as np\n'), ((9816, 9826), 'numpy.sin', 'np.sin', (['xf'], {}), '(xf)\n', (9822, 9826), True, 'import numpy as np\n'), ((9849, 9859), 'numpy.cos', 'np.cos', (['xf'], {}), '(xf)\n', (9855, 9859), True, 'import numpy as np\n')] |
# Display robot in 3D
import numpy as np
def run(robot):
# define path
# Interpret the input as a matrix. Unlike np.matrix(), np.asmatrix ()
# does not make a copy if the input is already a matrix or an ndarray
pose = np.asmatrix([0, 90, 0, 90, 0, 90], dtype=np.float32)
# just not to throw an error
path = np.concatenate((pose, pose), axis=0)
print(path)
# animate robot
robot.animate(stances=path, frame_rate=1, unit='deg')
| [
"numpy.asmatrix",
"numpy.concatenate"
] | [((238, 290), 'numpy.asmatrix', 'np.asmatrix', (['[0, 90, 0, 90, 0, 90]'], {'dtype': 'np.float32'}), '([0, 90, 0, 90, 0, 90], dtype=np.float32)\n', (249, 290), True, 'import numpy as np\n'), ((336, 372), 'numpy.concatenate', 'np.concatenate', (['(pose, pose)'], {'axis': '(0)'}), '((pose, pose), axis=0)\n', (350, 372), True, 'import numpy as np\n')] |
from mesa.space import SingleGrid
from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.datacollection import DataCollector
import numpy as np
class SchellingAgent(Agent):
# adding a pos instance variable so that each agent can remember
# where they are. Note that the pos can take the place of the name.
def __init__(self, pos, agent_type, homophily, model):
super().__init__(pos, model)
self.pos = pos
self.type = agent_type
self.homophily = homophily
def step(self):
pct_similar_neighbors = np.mean([
self.type == other.type for other in self.model.grid.neighbor_iter(self.pos)
])
if pct_similar_neighbors < self.homophily:
self.model.grid.move_to_empty(self)
self.model.moved += 1
class SchellingModel(Model):
# need to specify width, height, and density of agents
# in the grid.
def __init__(self, width, height, density, homophily):
self.schedule = RandomActivation(self)
# create the grid
self.grid = SingleGrid(width, height, torus=True)
self.moved = 0
self.running = True
# loop through the grid, and add agents so that the
# overall density is roughly equal to the passed
# density
for cell in self.grid.coord_iter():
x = cell[1]
y = cell[2]
if self.random.random() < density:
agent_type = np.random.choice(["Orange", "Blue"])
agent = SchellingAgent(pos = (x, y),
agent_type = agent_type,
homophily = homophily,
model = self)
self.schedule.add(agent)
self.grid.position_agent(agent, (x, y))
# NEW: create data collector
self.datacollector = DataCollector(model_reporters={"num_moved": lambda m: m.moved},
agent_reporters = {"x" : lambda a: a.pos[0],
"y" : lambda a: a.pos[1],
"type" : lambda a: a.type})
# this doesn't change, except that we will add a counter for the number of happy agents
# who don't move in this timestep
def step(self):
self.moved = 0
self.schedule.step()
print(f"{self.moved} agents moved in this timestep")
# NEW: call the data collector after each step
self.datacollector.collect(self)
self.running = self.moved != 0
| [
"mesa.time.RandomActivation",
"numpy.random.choice",
"mesa.space.SingleGrid",
"mesa.datacollection.DataCollector"
] | [((1074, 1096), 'mesa.time.RandomActivation', 'RandomActivation', (['self'], {}), '(self)\n', (1090, 1096), False, 'from mesa.time import RandomActivation\n'), ((1152, 1189), 'mesa.space.SingleGrid', 'SingleGrid', (['width', 'height'], {'torus': '(True)'}), '(width, height, torus=True)\n', (1162, 1189), False, 'from mesa.space import SingleGrid\n'), ((2045, 2210), 'mesa.datacollection.DataCollector', 'DataCollector', ([], {'model_reporters': "{'num_moved': lambda m: m.moved}", 'agent_reporters': "{'x': lambda a: a.pos[0], 'y': lambda a: a.pos[1], 'type': lambda a: a.type}"}), "(model_reporters={'num_moved': lambda m: m.moved},\n agent_reporters={'x': lambda a: a.pos[0], 'y': lambda a: a.pos[1],\n 'type': lambda a: a.type})\n", (2058, 2210), False, 'from mesa.datacollection import DataCollector\n'), ((1581, 1617), 'numpy.random.choice', 'np.random.choice', (["['Orange', 'Blue']"], {}), "(['Orange', 'Blue'])\n", (1597, 1617), True, 'import numpy as np\n')] |
import numpy as np
from comparator import comparator
import unittest
class TestBootstrap(unittest.TestCase):
def test_same_distro(self):
np.random.seed(0)
a = np.random.normal(2,3,5000)
b = np.random.normal(2,3,1000)
aa = comparator(A=a,B=b,normaltest=True)
self.assertFalse(aa.compare(),"stat test are failing")
def test_different_distro(self):
np.random.seed(0)
a = np.random.normal(1,2,5000)
b = np.random.normal(2,3,1000)
aa = comparator(A=a,B=b,normaltest=True)
self.assertTrue(aa.compare(),"stat test are failing")
def test_normal_distro(self):
np.random.seed(0)
a = np.random.rand(1000)
b = np.random.rand(500)
aa = comparator(A=a,B=b,normaltest=True)
self.assertFalse(aa.compare(),"normal test is failing")
if __name__=="__main__":
unittest.main()
| [
"unittest.main",
"numpy.random.seed",
"numpy.random.normal",
"numpy.random.rand",
"comparator.comparator"
] | [((864, 879), 'unittest.main', 'unittest.main', ([], {}), '()\n', (877, 879), False, 'import unittest\n'), ((149, 166), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (163, 166), True, 'import numpy as np\n'), ((177, 205), 'numpy.random.normal', 'np.random.normal', (['(2)', '(3)', '(5000)'], {}), '(2, 3, 5000)\n', (193, 205), True, 'import numpy as np\n'), ((214, 242), 'numpy.random.normal', 'np.random.normal', (['(2)', '(3)', '(1000)'], {}), '(2, 3, 1000)\n', (230, 242), True, 'import numpy as np\n'), ((252, 289), 'comparator.comparator', 'comparator', ([], {'A': 'a', 'B': 'b', 'normaltest': '(True)'}), '(A=a, B=b, normaltest=True)\n', (262, 289), False, 'from comparator import comparator\n'), ((393, 410), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (407, 410), True, 'import numpy as np\n'), ((421, 449), 'numpy.random.normal', 'np.random.normal', (['(1)', '(2)', '(5000)'], {}), '(1, 2, 5000)\n', (437, 449), True, 'import numpy as np\n'), ((458, 486), 'numpy.random.normal', 'np.random.normal', (['(2)', '(3)', '(1000)'], {}), '(2, 3, 1000)\n', (474, 486), True, 'import numpy as np\n'), ((496, 533), 'comparator.comparator', 'comparator', ([], {'A': 'a', 'B': 'b', 'normaltest': '(True)'}), '(A=a, B=b, normaltest=True)\n', (506, 533), False, 'from comparator import comparator\n'), ((639, 656), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (653, 656), True, 'import numpy as np\n'), ((667, 687), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (681, 687), True, 'import numpy as np\n'), ((698, 717), 'numpy.random.rand', 'np.random.rand', (['(500)'], {}), '(500)\n', (712, 717), True, 'import numpy as np\n'), ((729, 766), 'comparator.comparator', 'comparator', ([], {'A': 'a', 'B': 'b', 'normaltest': '(True)'}), '(A=a, B=b, normaltest=True)\n', (739, 766), False, 'from comparator import comparator\n')] |
# So what about those magic parameters? How did I determine them?
# Well, I used a hyperparameter optimizer!
#
# This example introduces the optimizer I used and we will talk about how it works in some detail.
import dlib
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from math import sin,cos,pi,exp,sqrt
# Let's fine the maximizer of this horrible function:
def messy_holder_table(x,y):
Z = abs(sin(x)*cos(y)*exp(abs(1-sqrt(x*x+y*y)/pi)))
R = max(9, abs(x)+abs(y))**5
return 1e5*Z / R
xy,z = dlib.find_max_global(messy_holder_table,
[-15,-15], # Lower bound constraints on x and y respectively
[15,15], # Upper bound constraints on x and y respectively
100) # The number of times find_min_global() will call messy_holder_table()
print("xy: ", xy);
print("z: ", z);
opt_z = messy_holder_table(-8.162150706931659, 0)
print("distance from optimal: ", opt_z - z)
# Now plot a 3D view of messy_holder_table() and also draw the point the optimizer located
X = np.arange(-15, 15, 0.1)
Y = np.arange(-15, 15, 0.1)
X, Y = np.meshgrid(X, Y)
from numpy import sin,cos,pi,exp,sqrt
Z = abs(sin(X)*cos(Y)*exp(abs(1-sqrt(X*X+Y*Y)/pi)))
R = np.maximum(9,abs(X) + abs(Y))**5
Z = 1e5*Z/R
# Plot the surface.
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, rcount=70, ccount=70,
linewidth=0, antialiased=True)
# Put a green dot on the location found by dlib.find_max_global()
ax.scatter(xy[0],xy[1], z, s=40, c='g')
plt.show()
| [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"dlib.find_max_global",
"numpy.sqrt"
] | [((653, 720), 'dlib.find_max_global', 'dlib.find_max_global', (['messy_holder_table', '[-15, -15]', '[15, 15]', '(100)'], {}), '(messy_holder_table, [-15, -15], [15, 15], 100)\n', (673, 720), False, 'import dlib\n'), ((1217, 1240), 'numpy.arange', 'np.arange', (['(-15)', '(15)', '(0.1)'], {}), '(-15, 15, 0.1)\n', (1226, 1240), True, 'import numpy as np\n'), ((1245, 1268), 'numpy.arange', 'np.arange', (['(-15)', '(15)', '(0.1)'], {}), '(-15, 15, 0.1)\n', (1254, 1268), True, 'import numpy as np\n'), ((1276, 1293), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (1287, 1293), True, 'import numpy as np\n'), ((1461, 1473), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1471, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1739, 1749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1747, 1749), True, 'import matplotlib.pyplot as plt\n'), ((1341, 1347), 'numpy.sin', 'sin', (['X'], {}), '(X)\n', (1344, 1347), False, 'from numpy import sin, cos, pi, exp, sqrt\n'), ((1348, 1354), 'numpy.cos', 'cos', (['Y'], {}), '(Y)\n', (1351, 1354), False, 'from numpy import sin, cos, pi, exp, sqrt\n'), ((547, 553), 'numpy.sin', 'sin', (['x'], {}), '(x)\n', (550, 553), False, 'from numpy import sin, cos, pi, exp, sqrt\n'), ((554, 560), 'numpy.cos', 'cos', (['y'], {}), '(y)\n', (557, 560), False, 'from numpy import sin, cos, pi, exp, sqrt\n'), ((1365, 1384), 'numpy.sqrt', 'sqrt', (['(X * X + Y * Y)'], {}), '(X * X + Y * Y)\n', (1369, 1384), False, 'from numpy import sin, cos, pi, exp, sqrt\n'), ((571, 590), 'numpy.sqrt', 'sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (575, 590), False, 'from numpy import sin, cos, pi, exp, sqrt\n')] |
# Problem: https://www.hackerrank.com/challenges/np-linear-algebra/problem
# Score: 20.0
import numpy as np
n = int(input())
a = np.array([input().strip().split() for _ in range(n)], float)
print(round(np.linalg.det(a), 2))
| [
"numpy.linalg.det"
] | [((205, 221), 'numpy.linalg.det', 'np.linalg.det', (['a'], {}), '(a)\n', (218, 221), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
import scipy.optimize as sp
def degreetorad(deg):
'''
This Procedure will convert Degree into Radian
'''
return deg* (np.pi / 180)
def calculate_longitude(zodiac_indexes,degree,minute,second):
'''
This Procedure will calculate Longitude given zodiac_indexes,degree,minute,second
'''
return zodiac_indexes * 30 + degree + (
minute / 60) + (second / 3600)
def diff_logarithmean_loggeomean_radius(parameters,args):
'''
This Procedure will calculate radius for each point of mars and calculate Loss[log(arithmetic_mean) - log(geometric_mean)]
'''
x = parameters[0]
alpha = args[0]
bita = args[1]
y = parameters[1]
radius_list = []
for i in range(len(alpha)):
z1 = x * np.sin(degreetorad(bita[i] - y))
z2 = np.sin(degreetorad(alpha[i] - y))
z3 = np.cos(degreetorad(bita[i] - alpha[i]))
z4 = 2 * z1 * z2 * z3 + np.square(z1) + np.square(z2)
z5 = 1 - np.square(z3)
radius = np.sqrt(z4 / z5)
radius_list.append(radius)
arithmetic_mean = np.mean(radius_list)
geometric_mean = gmean(radius_list)
return np.log(arithmetic_mean) - np.log(geometric_mean)
def find_radius(alpha,bita,x_opt,y_opt):
'''
Given Optimised x and y this procedure will find the optimum radius value.
'''
radius_list=[]
for i in range(12):
rsin1 = x_opt * np.sin(degreetorad(bita[i] - y_opt))
rsin2 = np.sin(degreetorad(alpha[i] - y_opt))
cos3 = np.cos(degreetorad(bita[i] - alpha[i]))
z1 = 2 * rsin1 * rsin2 * cos3 + np.square(rsin1) + np.square(rsin2)
z2 = 1 - np.square(cos3)
radius = np.sqrt(z1 / z2)
radius_list.append(radius)
print(radius_list)
def minimize_loss(alpha,bita,x,y) :
'''
Given Optimised x and y this procedure will find the optimum radius value.
This is just to confirm that all radiuses is of approximately same length.
'''
parameters=[x,y]
optimised_res = sp.minimize(diff_logarithmean_loggeomean_radius, parameters,args=[alpha, bita],bounds=((0,None), (None,None)))
return optimised_res.x[0],optimised_res.x[1],optimised_res.fun
if __name__ == "__main__":
mars_data = pd.read_csv('./../data/01_data_mars_opposition.csv')
# region Calculate Longitude Relative to Actual Sun(ALPHA)
actual_sun_zodiac_indexes=mars_data['ZodiacIndex'].values
actual_sun_degree = mars_data['Degree'].values
actual_sun_minute = mars_data['Minute'].values
actual_sun_second = mars_data['Second'].values
longitude_relative_actual_sun=calculate_longitude(actual_sun_zodiac_indexes,actual_sun_degree,actual_sun_minute,actual_sun_second)
# endregion
# region Calculate Longitude Relative to Average Sun(BITA)
avg_sun_zodiac_indexes = mars_data['ZodiacIndexAverageSun'].values
avg_sun_degree = mars_data['DegreeMean'].values
avg_sun_minute = mars_data['MinuteMean'].values
avg_sun_second = mars_data['SecondMean'].values
longitude_relative_avg_sun = calculate_longitude(avg_sun_zodiac_indexes, avg_sun_degree, avg_sun_minute,avg_sun_second)
# endregion
#Initial Guess of x :1.2, y :120
x=1.2
y=120
print("Initial Guess of x :"+str(x)+", y :"+str(y))
x_opt, y_opt,loss=minimize_loss(longitude_relative_actual_sun,longitude_relative_avg_sun,x,y)
print("Optimised Value of x :" + str(x_opt) + ", y :" + str(y_opt))
print("Total Loss :"+str(loss))
#ANSWRE: Optimised Value of x :0.9662028648251194, y :148.874455333893
#find_radius(longitude_relative_actual_sun, longitude_relative_avg_sun, x_opt, y_opt) | [
"scipy.optimize.minimize",
"scipy.stats.mstats.gmean",
"numpy.log",
"pandas.read_csv",
"numpy.square",
"numpy.mean",
"numpy.sqrt"
] | [((1149, 1169), 'numpy.mean', 'np.mean', (['radius_list'], {}), '(radius_list)\n', (1156, 1169), True, 'import numpy as np\n'), ((1191, 1209), 'scipy.stats.mstats.gmean', 'gmean', (['radius_list'], {}), '(radius_list)\n', (1196, 1209), False, 'from scipy.stats.mstats import gmean\n'), ((2074, 2192), 'scipy.optimize.minimize', 'sp.minimize', (['diff_logarithmean_loggeomean_radius', 'parameters'], {'args': '[alpha, bita]', 'bounds': '((0, None), (None, None))'}), '(diff_logarithmean_loggeomean_radius, parameters, args=[alpha,\n bita], bounds=((0, None), (None, None)))\n', (2085, 2192), True, 'import scipy.optimize as sp\n'), ((2296, 2348), 'pandas.read_csv', 'pd.read_csv', (['"""./../data/01_data_mars_opposition.csv"""'], {}), "('./../data/01_data_mars_opposition.csv')\n", (2307, 2348), True, 'import pandas as pd\n'), ((1075, 1091), 'numpy.sqrt', 'np.sqrt', (['(z4 / z5)'], {}), '(z4 / z5)\n', (1082, 1091), True, 'import numpy as np\n'), ((1221, 1244), 'numpy.log', 'np.log', (['arithmetic_mean'], {}), '(arithmetic_mean)\n', (1227, 1244), True, 'import numpy as np\n'), ((1247, 1269), 'numpy.log', 'np.log', (['geometric_mean'], {}), '(geometric_mean)\n', (1253, 1269), True, 'import numpy as np\n'), ((1746, 1762), 'numpy.sqrt', 'np.sqrt', (['(z1 / z2)'], {}), '(z1 / z2)\n', (1753, 1762), True, 'import numpy as np\n'), ((1013, 1026), 'numpy.square', 'np.square', (['z2'], {}), '(z2)\n', (1022, 1026), True, 'import numpy as np\n'), ((1044, 1057), 'numpy.square', 'np.square', (['z3'], {}), '(z3)\n', (1053, 1057), True, 'import numpy as np\n'), ((1679, 1695), 'numpy.square', 'np.square', (['rsin2'], {}), '(rsin2)\n', (1688, 1695), True, 'import numpy as np\n'), ((1713, 1728), 'numpy.square', 'np.square', (['cos3'], {}), '(cos3)\n', (1722, 1728), True, 'import numpy as np\n'), ((997, 1010), 'numpy.square', 'np.square', (['z1'], {}), '(z1)\n', (1006, 1010), True, 'import numpy as np\n'), ((1660, 1676), 'numpy.square', 'np.square', (['rsin1'], {}), '(rsin1)\n', (1669, 1676), True, 'import numpy as np\n')] |
import numpy as np
import csv
def read_csv(path):
width = 34
height = 26
dims = 1
with open(path,'r') as f:
#read the scv file with the dictionary format
reader = csv.DictReader(f)
rows = list(reader)
#imgs is a numpy array with all the images
#tgs is a numpy array with the tags of the images
imgs = np.empty((len(list(rows)),height,width, dims),dtype=np.uint8)
tgs = np.empty((len(list(rows)),1))
for row,i in zip(rows,range(len(rows))):
#convert the list back to the image format
img = row['image']
img = img.strip('[').strip(']').split(', ')
im = np.array(img,dtype=np.uint8)
im = im.reshape((height, width))
im = np.expand_dims(im, axis=2)
imgs[i] = im
#the tag for open is 1 and for close is 0
tag = row['state']
if tag == 'open':
tgs[i] = 1
else:
tgs[i] = 0
#shuffle the dataset
index = np.random.permutation(imgs.shape[0])
imgs = imgs[index]
tgs = tgs[index]
#return images and their respective tags
return imgs, tgs | [
"numpy.random.permutation",
"csv.DictReader",
"numpy.array",
"numpy.expand_dims"
] | [((890, 926), 'numpy.random.permutation', 'np.random.permutation', (['imgs.shape[0]'], {}), '(imgs.shape[0])\n', (911, 926), True, 'import numpy as np\n'), ((181, 198), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (195, 198), False, 'import csv\n'), ((601, 630), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (609, 630), True, 'import numpy as np\n'), ((676, 702), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(2)'}), '(im, axis=2)\n', (690, 702), True, 'import numpy as np\n')] |
#! python3
# -*- encoding: utf-8 -*-
'''
@File : equalize_hist.py
@Time : 2020/11/22 19:22:32
@Author : <NAME>
@Contact : <EMAIL>
'''
import os
import cv2
import numpy as np
import math
def equalize_gray(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
dst = cv2.equalizeHist(gray)
return dst
def equalize_rgb(img):
(b, g, r) = cv2.split(img)
bH = cv2.equalizeHist(b)
gH = cv2.equalizeHist(g)
rH = cv2.equalizeHist(r)
result = cv2.merge((bH, gH, rH))
return result
def equalize_yuv(img):
imgYUV = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
channelsYUV = cv2.split(imgYUV)
channelsYUV[0] = cv2.equalizeHist(channelsYUV[0])
channels = cv2.merge(channelsYUV)
result = cv2.cvtColor(channels, cv2.COLOR_YCrCb2BGR)
return result
def equalize_hsv(img):
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
channelsHSV = cv2.split(imgHSV)
channelsHSV[2] = cv2.equalizeHist(channelsHSV[2])
channels = cv2.merge(channelsHSV)
result = cv2.cvtColor(channels, cv2.COLOR_HSV2BGR)
return result
def equalize_hls(img):
imgHLS = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
channelsHLS = cv2.split(imgHLS)
channelsHLS[1] = cv2.equalizeHist(channelsHLS[1])
channels = cv2.merge(channelsHLS)
result = cv2.cvtColor(channels, cv2.COLOR_HLS2BGR)
return result
def gamma_correction(img):
src_l = np.mean(get_luminance(img))
gamma = cal_gamma(src_l)
table = np.array([((i / 255.0) ** gamma) * 255.0 for i in np.arange(0, 256)]).astype("uint8")
result = np.empty(img.shape)
result = cv2.LUT(np.array(img, dtype = np.uint8), table)
return result
def get_luminance(img):
img_blur=cv2.blur(img,(3,3))
ima_r = img_blur[:, :, 2]
ima_g = img_blur[:, :, 1]
ima_b = img_blur[:, :, 0]
ima_y = 0.256789 * ima_r + 0.504129 * ima_g + 0.097906 * ima_b + 16
return ima_y
def cal_gamma(l):
x=math.log(l/255)
y=math.log(101/255)
gamma = y/x
return gamma
def main():
root='dark'
dst_dir='dark_res'
img_list = []
for dirpath, dirnames, filenames in os.walk(root):
for filepath in filenames:
img_list.append(os.path.join(dirpath, filepath))
for i in img_list:
if(i[-3:]!='jpg'): continue
img = cv2.imread(i, 1)
src_l = np.mean(get_luminance(img))
# dst_rgb = equalize_rgb(img)
# dst_yuv = equalize_yuv(img)
# dst_hsv = equalize_hsv(img)
# dst_hls = equalize_hls(img)
dst_gamma = gamma_correction(img)
dst_l = np.mean(get_luminance(dst_gamma))
#res=cv2.hconcat([img,dst_rgb,dst_yuv,dst_hsv,dst_hls,dst_gamma])
res=cv2.hconcat([img,dst_gamma])
cv2.imwrite(i.replace(root,dst_dir),res)
print(i,src_l,dst_l)
if __name__ == '__main__':
main() | [
"cv2.equalizeHist",
"cv2.cvtColor",
"numpy.empty",
"os.walk",
"cv2.blur",
"math.log",
"cv2.imread",
"cv2.split",
"numpy.array",
"cv2.hconcat",
"numpy.arange",
"cv2.merge",
"os.path.join"
] | [((252, 289), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (264, 289), False, 'import cv2\n'), ((301, 323), 'cv2.equalizeHist', 'cv2.equalizeHist', (['gray'], {}), '(gray)\n', (317, 323), False, 'import cv2\n'), ((383, 397), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (392, 397), False, 'import cv2\n'), ((408, 427), 'cv2.equalizeHist', 'cv2.equalizeHist', (['b'], {}), '(b)\n', (424, 427), False, 'import cv2\n'), ((438, 457), 'cv2.equalizeHist', 'cv2.equalizeHist', (['g'], {}), '(g)\n', (454, 457), False, 'import cv2\n'), ((468, 487), 'cv2.equalizeHist', 'cv2.equalizeHist', (['r'], {}), '(r)\n', (484, 487), False, 'import cv2\n'), ((502, 525), 'cv2.merge', 'cv2.merge', (['(bH, gH, rH)'], {}), '((bH, gH, rH))\n', (511, 525), False, 'import cv2\n'), ((585, 623), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2YCrCb'], {}), '(img, cv2.COLOR_BGR2YCrCb)\n', (597, 623), False, 'import cv2\n'), ((643, 660), 'cv2.split', 'cv2.split', (['imgYUV'], {}), '(imgYUV)\n', (652, 660), False, 'import cv2\n'), ((683, 715), 'cv2.equalizeHist', 'cv2.equalizeHist', (['channelsYUV[0]'], {}), '(channelsYUV[0])\n', (699, 715), False, 'import cv2\n'), ((732, 754), 'cv2.merge', 'cv2.merge', (['channelsYUV'], {}), '(channelsYUV)\n', (741, 754), False, 'import cv2\n'), ((769, 812), 'cv2.cvtColor', 'cv2.cvtColor', (['channels', 'cv2.COLOR_YCrCb2BGR'], {}), '(channels, cv2.COLOR_YCrCb2BGR)\n', (781, 812), False, 'import cv2\n'), ((872, 908), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (884, 908), False, 'import cv2\n'), ((928, 945), 'cv2.split', 'cv2.split', (['imgHSV'], {}), '(imgHSV)\n', (937, 945), False, 'import cv2\n'), ((968, 1000), 'cv2.equalizeHist', 'cv2.equalizeHist', (['channelsHSV[2]'], {}), '(channelsHSV[2])\n', (984, 1000), False, 'import cv2\n'), ((1017, 1039), 'cv2.merge', 'cv2.merge', (['channelsHSV'], {}), '(channelsHSV)\n', (1026, 1039), False, 'import cv2\n'), ((1054, 1095), 'cv2.cvtColor', 'cv2.cvtColor', (['channels', 'cv2.COLOR_HSV2BGR'], {}), '(channels, cv2.COLOR_HSV2BGR)\n', (1066, 1095), False, 'import cv2\n'), ((1155, 1191), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HLS'], {}), '(img, cv2.COLOR_BGR2HLS)\n', (1167, 1191), False, 'import cv2\n'), ((1211, 1228), 'cv2.split', 'cv2.split', (['imgHLS'], {}), '(imgHLS)\n', (1220, 1228), False, 'import cv2\n'), ((1251, 1283), 'cv2.equalizeHist', 'cv2.equalizeHist', (['channelsHLS[1]'], {}), '(channelsHLS[1])\n', (1267, 1283), False, 'import cv2\n'), ((1300, 1322), 'cv2.merge', 'cv2.merge', (['channelsHLS'], {}), '(channelsHLS)\n', (1309, 1322), False, 'import cv2\n'), ((1337, 1378), 'cv2.cvtColor', 'cv2.cvtColor', (['channels', 'cv2.COLOR_HLS2BGR'], {}), '(channels, cv2.COLOR_HLS2BGR)\n', (1349, 1378), False, 'import cv2\n'), ((1612, 1631), 'numpy.empty', 'np.empty', (['img.shape'], {}), '(img.shape)\n', (1620, 1631), True, 'import numpy as np\n'), ((1754, 1775), 'cv2.blur', 'cv2.blur', (['img', '(3, 3)'], {}), '(img, (3, 3))\n', (1762, 1775), False, 'import cv2\n'), ((1987, 2004), 'math.log', 'math.log', (['(l / 255)'], {}), '(l / 255)\n', (1995, 2004), False, 'import math\n'), ((2011, 2030), 'math.log', 'math.log', (['(101 / 255)'], {}), '(101 / 255)\n', (2019, 2030), False, 'import math\n'), ((2182, 2195), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (2189, 2195), False, 'import os\n'), ((1654, 1683), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (1662, 1683), True, 'import numpy as np\n'), ((2371, 2387), 'cv2.imread', 'cv2.imread', (['i', '(1)'], {}), '(i, 1)\n', (2381, 2387), False, 'import cv2\n'), ((2771, 2800), 'cv2.hconcat', 'cv2.hconcat', (['[img, dst_gamma]'], {}), '([img, dst_gamma])\n', (2782, 2800), False, 'import cv2\n'), ((2262, 2293), 'os.path.join', 'os.path.join', (['dirpath', 'filepath'], {}), '(dirpath, filepath)\n', (2274, 2293), False, 'import os\n'), ((1562, 1579), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (1571, 1579), True, 'import numpy as np\n')] |
'''
Maximum predictability evaluation module described in
<NAME>, et al. "Predicting taxi demand at high spatial resolution:
Approaching the limit of predictability." Big Data (Big Data),
2016 IEEE International Conference on. IEEE, 2016.
Given the number of unique values N in the time series and its time-correlated entropy S
the module approximates the theoretical limit of the series predictability Pmax.
'''
import numpy as np
def Function(x, N, S):
# Calculates the value of the maximum predictability function.
# inputs:
# x (evaluated maximum predictability, Pmax) real number,
# N number of unique values in the time series,
# S time-correlated entropy of the time series.
# output:
# real value of the function.
return 1.0*(-x*np.log(x)-(1-x)*np.log(1-x)+(1-x)*np.log(N-1)-S*np.log(2))
def FirstDerivative(x, N):
# Calculates the value of the first derivative of the maximum predictability function.
# inputs:
# x (evaluated maximum predictability, Pmax) real number,
# N number of unique values in the time series.
# output:
# real value of the first derivative.
return 1.0*(np.log(1-x)-np.log(x)-np.log(N-1))
def SecondDerivative(x):
# Calculates the value of the second derivative of the maximum predictability function.
# inputs:
# x (evaluated maximum predictability, Pmax) real number.
# output:
# real value of the second derivative.
return 1.0/((x-1)*x)
def CalculateNewApproximation(x, N, S):
# Calculates a one-step approximation of the value of the maximum predictability function.
# inputs:
# x (evaluated maximum predictability, Pmax) real number,
# N number of unique values in the time series,
# S time-correlated entropy of the time series.
# output:
# real value of the function.
function = Function(x, N, S)
first_derivative = FirstDerivative(x, N)
second_derivative = SecondDerivative(x)
return 1.0*function/(first_derivative-function*second_derivative/(2*first_derivative))
def maximum_predictability(N, S):
# Evaluates the value of the maximum predictability of the series.
# inputs:
# N number of unique values in time series, integer,
# S time-correlated entropy of the time series, real.
# output:
# value of the maximum predictability, Pmax.
S = round(S, 9)
if S > round(np.log2(N), 9):
return "No solutions"
else:
if S <= 0.01:
return 0.999
else:
x = 1.0000000001/N
while abs(Function(x, N, S))>0.00000001:
x = x - CalculateNewApproximation(x, N, S)
return round(x, 10)
| [
"numpy.log2",
"numpy.log"
] | [((1220, 1233), 'numpy.log', 'np.log', (['(N - 1)'], {}), '(N - 1)\n', (1226, 1233), True, 'import numpy as np\n'), ((2445, 2455), 'numpy.log2', 'np.log2', (['N'], {}), '(N)\n', (2452, 2455), True, 'import numpy as np\n'), ((861, 870), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (867, 870), True, 'import numpy as np\n'), ((1198, 1211), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (1204, 1211), True, 'import numpy as np\n'), ((1210, 1219), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1216, 1219), True, 'import numpy as np\n'), ((847, 860), 'numpy.log', 'np.log', (['(N - 1)'], {}), '(N - 1)\n', (853, 860), True, 'import numpy as np\n'), ((813, 822), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (819, 822), True, 'import numpy as np\n'), ((829, 842), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (835, 842), True, 'import numpy as np\n')] |
# pylint: disable=line-too-long
import copy
import re
from numpy import dtype, array, invert, take
__all__ = ('generate_xdmf',)
xdmffile = """<?xml version="1.0" encoding="utf-8"?>
<Xdmf xmlns:xi="http://www.w3.org/2001/XInclude" Version="2.1">
<Domain>
<Grid Name="Structured Grid" GridType="Collection" CollectionType="Temporal">
<Time TimeType="List"><DataItem Format="XML" Dimensions="{1}"> {0} </DataItem></Time>
{2}
</Grid>
</Domain>
</Xdmf>
"""
def get_grid(geometry, topology, attrs):
return """<Grid GridType="Uniform">
{0}
{1}
{2}
</Grid>
""".format(geometry, topology, attrs)
def get_geometry(kind=0, dim=2):
assert kind in (0, 1)
assert dim in (2, 3)
if dim == 2:
if kind == 0:
return """<Geometry Type="ORIGIN_DXDY">
<DataItem Format="XML" NumberType="Float" Dimensions="2">
{0} {1}
</DataItem>
<DataItem Format="XML" NumberType="Float" Dimensions="2">
{2} {3}
</DataItem>
</Geometry>"""
return """<Geometry Type="VXVY">
<DataItem Format="HDF" NumberType="Float" Precision="{0}" Dimensions="{1}">
{3}:{6}/mesh/{4}
</DataItem>
<DataItem Format="HDF" NumberType="Float" Precision="{0}" Dimensions="{2}">
{3}:{6}/mesh/{5}
</DataItem>
</Geometry>"""
if dim == 3:
if kind == 0:
return """<Geometry Type="ORIGIN_DXDYDZ">
<DataItem Format="XML" NumberType="Float" Dimensions="3">
{0} {1} {2}
</DataItem>
<DataItem Format="XML" NumberType="Float" Dimensions="3">
{3} {4} {5}
</DataItem>
</Geometry>"""
return """<Geometry Type="VXVYVZ">
<DataItem Format="HDF" NumberType="Float" Precision="{0}" Dimensions="{3}">
{4}:{8}/mesh/{5}
</DataItem>
<DataItem Format="HDF" NumberType="Float" Precision="{0}" Dimensions="{2}">
{4}:{8}/mesh/{6}
</DataItem>
<DataItem Format="HDF" NumberType="Float" Precision="{0}" Dimensions="{1}">
{4}:{8}/mesh/{7}
</DataItem>
</Geometry>"""
def get_topology(dims, kind=0):
assert len(dims) in (2, 3)
co = 'Co' if kind == 0 else ''
if len(dims) == 2:
return """<Topology Dimensions="{0} {1}" Type="2D{2}RectMesh"/>""".format(dims[0], dims[1], co)
if len(dims) == 3:
return """<Topology Dimensions="{0} {1} {2}" Type="3D{3}RectMesh"/>""".format(dims[0], dims[1], dims[2], co)
def get_attribute(attr, h5filename, dims, prec):
name = attr.split("/")[0]
assert len(dims) in (2, 3)
if len(dims) == 2:
return """<Attribute Name="{0}" Center="Node">
<DataItem Format="HDF" NumberType="Float" Precision="{5}" Dimensions="{1} {2}">
{3}:/{4}
</DataItem>
</Attribute>
""".format(name, dims[0], dims[1], h5filename, attr, prec)
return """<Attribute Name="{0}" Center="Node">
<DataItem Format="HDF" NumberType="Float" Precision="{6}" Dimensions="{1} {2} {3}">
{4}:/{5}
</DataItem>
</Attribute>
""".format(name, dims[0], dims[1], dims[2], h5filename, attr, prec)
def generate_xdmf(h5filename, periodic=True, order='paraview'):
"""Generate XDMF-files
Parameters
----------
h5filename : str
Name of hdf5-file that we want to decorate with xdmf
periodic : bool or dim-sequence of bools, optional
If true along axis i, assume data is periodic.
Only affects the calculation of the domain size and only if the
domain is given as 2-tuple of origin+dx.
order : str
``paraview`` or ``visit``
For some reason Paraview and Visit requires the mesh stored in
opposite order in the XDMF-file for 2D slices. Make choice of
order here.
"""
import h5py
f = h5py.File(h5filename, 'a')
keys = []
f.visit(keys.append)
assert order.lower() in ('paraview', 'visit')
# Find unique scalar groups of 2D and 3D datasets
datasets = {2:{}, 3:{}}
for key in keys:
if f[key.split('/')[0]].attrs['rank'] > 0:
continue
if isinstance(f[key], h5py.Dataset):
if not ('mesh' in key or 'domain' in key or 'Vector' in key):
tstep = int(key.split("/")[-1])
ndim = int(key.split("/")[1][0])
if ndim in (2, 3):
ds = datasets[ndim]
if tstep in ds:
ds[tstep] += [key]
else:
ds[tstep] = [key]
if periodic is True:
periodic = [0]*5
elif periodic is False:
periodic = [1]*5
else:
assert isinstance(periodic, (tuple, list))
periodic = list(array(invert(periodic), int))
coor = ['x0', 'x1', 'x2', 'x3', 'x4']
for ndim, dsets in datasets.items():
timesteps = list(dsets.keys())
per = copy.copy(periodic)
if not timesteps:
continue
timesteps.sort(key=int)
tt = ""
for i in timesteps:
tt += "%s " %i
datatype = f[dsets[timesteps[0]][0]].dtype
assert datatype.char not in 'FDG', "Cannot use generate_xdmf to visualize complex data."
prec = 4 if datatype is dtype('float32') else 8
xff = {}
geometry = {}
topology = {}
attrs = {}
grid = {}
NN = {}
for name in dsets[timesteps[0]]:
group = name.split('/')[0]
if 'slice' in name:
slices = name.split("/")[2]
else:
slices = 'whole'
cc = copy.copy(coor)
if slices not in xff:
xff[slices] = copy.copy(xdmffile)
N = list(f[name].shape)
kk = 0
sl = 0
if 'slice' in slices:
ss = slices.split("_")
ii = []
for i, sx in enumerate(ss):
if 'slice' in sx:
ii.append(i)
else:
if len(f[group].attrs.get('shape')) == 3: # 2D slice in 3D domain
kk = i
sl = int(sx)
N.insert(i, 1)
cc = take(coor, ii)
else:
ii = list(range(ndim))
NN[slices] = N
if 'domain' in f[group].keys():
if ndim == 2 and ('slice' not in slices or len(f[group].attrs.get('shape')) > 3):
geo = get_geometry(kind=0, dim=2)
assert len(ii) == 2
i, j = ii
if order.lower() == 'paraview':
data = [f[group+'/domain/{}'.format(coor[i])][0],
f[group+'/domain/{}'.format(coor[j])][0],
f[group+'/domain/{}'.format(coor[i])][1]/(N[0]-per[i]),
f[group+'/domain/{}'.format(coor[j])][1]/(N[1]-per[j])]
geometry[slices] = geo.format(*data)
else:
data = [f[group+'/domain/{}'.format(coor[j])][0],
f[group+'/domain/{}'.format(coor[i])][0],
f[group+'/domain/{}'.format(coor[j])][1]/(N[0]-per[j]),
f[group+'/domain/{}'.format(coor[i])][1]/(N[1]-per[i])]
geometry[slices] = geo.format(*data)
else:
if ndim == 2:
ii.insert(kk, kk)
per[kk] = 0
i, j, k = ii
geo = get_geometry(kind=0, dim=3)
data = [f[group+'/domain/{}'.format(coor[i])][0],
f[group+'/domain/{}'.format(coor[j])][0],
f[group+'/domain/{}'.format(coor[k])][0],
f[group+'/domain/{}'.format(coor[i])][1]/(N[0]-per[i]),
f[group+'/domain/{}'.format(coor[j])][1]/(N[1]-per[j]),
f[group+'/domain/{}'.format(coor[k])][1]/(N[2]-per[k])]
if ndim == 2:
origin, dx = f[group+'/domain/x{}'.format(kk)]
M = f[group].attrs.get('shape')
pos = origin+dx/(M[kk]-per[kk])*sl
data[kk] = pos
data[kk+3] = pos
geometry[slices] = geo.format(*data)
topology[slices] = get_topology(N, kind=0)
elif 'mesh' in f[group].keys():
if ndim == 2 and ('slice' not in slices or len(f[group].attrs.get('shape')) > 3):
geo = get_geometry(kind=1, dim=2)
else:
geo = get_geometry(kind=1, dim=3)
if ndim == 2 and ('slice' not in slices or len(f[group].attrs.get('shape')) > 3):
if order.lower() == 'paraview':
sig = (prec, N[0], N[1], h5filename, cc[0], cc[1], group)
else:
sig = (prec, N[1], N[0], h5filename, cc[1], cc[0], group)
else:
if ndim == 2: # 2D slice in 3D domain
pos = f[group+"/mesh/x{}".format(kk)][sl]
z = re.findall(r'<DataItem(.*?)</DataItem>', geo, re.DOTALL)
geo = geo.replace(z[2-kk], ' Format="XML" NumberType="Float" Precision="{0}" Dimensions="{%d}">\n {%d}\n '%(1+kk, 7-kk))
cc = list(cc)
cc.insert(kk, pos)
sig = (prec, N[0], N[1], N[2], h5filename, cc[2], cc[1], cc[0], group)
geometry[slices] = geo.format(*sig)
topology[slices] = get_topology(N, kind=1)
grid[slices] = ''
# if slice of data, need to know along which axes
# Since there may be many different slices, we need to create
# one xdmf-file for each composition of slices
attrs = {}
for tstep in timesteps:
d = dsets[tstep]
slx = set()
for i, x in enumerate(d):
slices = x.split("/")[2]
if not 'slice' in slices:
slices = 'whole'
N = NN[slices]
if slices not in attrs:
attrs[slices] = ''
attrs[slices] += get_attribute(x, h5filename, N, prec)
slx.add(slices)
for slices in slx:
grid[slices] += get_grid(geometry[slices], topology[slices],
attrs[slices].rstrip())
attrs[slices] = ''
for slices, ff in xff.items():
if 'slice' in slices:
fname = h5filename[:-3]+"_"+slices+".xdmf"
else:
fname = h5filename[:-3]+".xdmf"
xfl = open(fname, "w")
h = ff.format(tt, len(timesteps), grid[slices].rstrip())
xfl.write(h)
xfl.close()
f.close()
if __name__ == "__main__":
import sys
generate_xdmf(sys.argv[-1])
| [
"h5py.File",
"numpy.invert",
"numpy.dtype",
"copy.copy",
"re.findall",
"numpy.take"
] | [((3976, 4002), 'h5py.File', 'h5py.File', (['h5filename', '"""a"""'], {}), "(h5filename, 'a')\n", (3985, 4002), False, 'import h5py\n'), ((5061, 5080), 'copy.copy', 'copy.copy', (['periodic'], {}), '(periodic)\n', (5070, 5080), False, 'import copy\n'), ((5773, 5788), 'copy.copy', 'copy.copy', (['coor'], {}), '(coor)\n', (5782, 5788), False, 'import copy\n'), ((5411, 5427), 'numpy.dtype', 'dtype', (['"""float32"""'], {}), "('float32')\n", (5416, 5427), False, 'from numpy import dtype, array, invert, take\n'), ((5853, 5872), 'copy.copy', 'copy.copy', (['xdmffile'], {}), '(xdmffile)\n', (5862, 5872), False, 'import copy\n'), ((4900, 4916), 'numpy.invert', 'invert', (['periodic'], {}), '(periodic)\n', (4906, 4916), False, 'from numpy import dtype, array, invert, take\n'), ((6484, 6498), 'numpy.take', 'take', (['coor', 'ii'], {}), '(coor, ii)\n', (6488, 6498), False, 'from numpy import dtype, array, invert, take\n'), ((9799, 9854), 're.findall', 're.findall', (['"""<DataItem(.*?)</DataItem>"""', 'geo', 're.DOTALL'], {}), "('<DataItem(.*?)</DataItem>', geo, re.DOTALL)\n", (9809, 9854), False, 'import re\n')] |
import os, sys
import tensorflow as tf
from tensorflow.keras.layers import MultiHeadAttention as MHA
import numpy as np
import pandas as pd
from DB.augmentation import augment
def Atomic(input_data, k_features=20):
""" builds AE k_features """
input_dim = len(input_data[0])
# Normalize
normalizer = tf.keras.layers.experimental.preprocessing.Normalization()
normalizer.adapt(input_data)
# Build model
atinput = tf.keras.layers.Input(shape=(input_dim,),name='Atomic input')
encoded_input = normalizer(atinput)
encoded = tf.keras.layers.Dense(k_features, activation='relu')(encoded_input)
decoded = tf.keras.layers.Dense(input_dim, activation='sigmoid')(encoded)
decoded = tf.keras.layers.Reshape((input_dim,))(decoded)
ae = tf.keras.Model(atinput, decoded, name='Atomic autoencoder')
print(ae.summary())
reconstruction_loss = tf.keras.losses.binary_crossentropy(encoded_input, decoded)
encoder = tf.keras.Model(atinput, encoded, name='Atom2vec encoder')
encoder.add_loss(reconstruction_loss)
print(encoder.summary())
return encoder
class Endtoend(tf.keras.Model):
def __init__(self):
super(Endtoend, self).__init__()
@staticmethod
def create_padding_mask(seq): # TODO - FIX
#seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
#return seq[:, tf.newaxis, tf.newaxis, :]
return tf.keras.layers.Masking()(seq)
def attention(self, x):
mask = self.create_padding_mask(x)
attention = MHA(num_heads=2,
key_dim=2,
value_dim=2)
return attention(x, x)
def call(self, x, atoms, attention=False):
n = tf.shape(x)[1] # elements in a phase field
m = tf.shape(x)[2] # all atoms considered
k = 20 # atomic k_features
inputs = tf.keras.layers.Input(shape=(n,m,))
encoder = Atomic(atoms, k_features=k)
embeddings = encoder(atoms)
# phases are one-hot encodings @ atomic embeddings:
x_ = tf.reshape(inputs, [-1, m])
phases = tf.reshape(x_ @ embeddings, [-1, n, k])
#phases = tf.reshape(x_ @ embeddings, [-1, n* k]) # padding?
if attention:
phases = self.attention(phases)
# Dense NN
phases_in = tf.reshape(phases, [-1, n*k])
x = tf.keras.layers.Dropout(0.1)(phases_in)
mid = tf.keras.layers.Dense(20, activation="relu")(x)
x = tf.keras.layers.Dropout(0.1)(mid)
classes = tf.keras.layers.Dense(2, activation="softmax", name='classify_Tc')(x)
# second output: reconstructred phases
phases_out = tf.keras.layers.Dense(n*k, activation="sigmoid", name='phases_out')(mid)
cosine = tf.keras.losses.CosineSimilarity(axis=1, reduction=tf.keras.losses.Reduction.NONE)
rankings = cosine(phases_in, phases_out)
# model with 2 outputs and losses
m = tf.keras.Model(inputs=inputs, outputs=[classes, rankings], name='Class_and_Rank')
c_loss = tf.keras.losses.sparse_categorical_crossentropy(inputs, classes)
re_loss = tf.keras.losses.binary_crossentropy(phases_in, phases_out)
# m.add_loss(c_loss)
# m.add_loss(re_loss)
print(m.summary())
return m
if __name__ == '__main__':
test = np.random.uniform(2,32,6960).reshape(10,8,87)
env = np.random.uniform(2,32,1740).reshape(87,20)
m = Endtoend()(test, env, attention=True)
tf.keras.utils.plot_model(m, "single_model.png", show_shapes=True)
print("Models are built")
| [
"numpy.random.uniform",
"tensorflow.keras.losses.binary_crossentropy",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.losses.CosineSimilarity",
"tensorflow.keras.Model",
"tensorflow.keras.utils.plot_model... | [((317, 375), 'tensorflow.keras.layers.experimental.preprocessing.Normalization', 'tf.keras.layers.experimental.preprocessing.Normalization', ([], {}), '()\n', (373, 375), True, 'import tensorflow as tf\n'), ((441, 503), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(input_dim,)', 'name': '"""Atomic input"""'}), "(shape=(input_dim,), name='Atomic input')\n", (462, 503), True, 'import tensorflow as tf\n'), ((774, 833), 'tensorflow.keras.Model', 'tf.keras.Model', (['atinput', 'decoded'], {'name': '"""Atomic autoencoder"""'}), "(atinput, decoded, name='Atomic autoencoder')\n", (788, 833), True, 'import tensorflow as tf\n'), ((885, 944), 'tensorflow.keras.losses.binary_crossentropy', 'tf.keras.losses.binary_crossentropy', (['encoded_input', 'decoded'], {}), '(encoded_input, decoded)\n', (920, 944), True, 'import tensorflow as tf\n'), ((959, 1016), 'tensorflow.keras.Model', 'tf.keras.Model', (['atinput', 'encoded'], {'name': '"""Atom2vec encoder"""'}), "(atinput, encoded, name='Atom2vec encoder')\n", (973, 1016), True, 'import tensorflow as tf\n'), ((3471, 3537), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['m', '"""single_model.png"""'], {'show_shapes': '(True)'}), "(m, 'single_model.png', show_shapes=True)\n", (3496, 3537), True, 'import tensorflow as tf\n'), ((557, 609), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['k_features'], {'activation': '"""relu"""'}), "(k_features, activation='relu')\n", (578, 609), True, 'import tensorflow as tf\n'), ((639, 693), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['input_dim'], {'activation': '"""sigmoid"""'}), "(input_dim, activation='sigmoid')\n", (660, 693), True, 'import tensorflow as tf\n'), ((717, 754), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['(input_dim,)'], {}), '((input_dim,))\n', (740, 754), True, 'import tensorflow as tf\n'), ((1520, 1560), 'tensorflow.keras.layers.MultiHeadAttention', 'MHA', ([], {'num_heads': '(2)', 'key_dim': '(2)', 'value_dim': '(2)'}), '(num_heads=2, key_dim=2, value_dim=2)\n', (1523, 1560), True, 'from tensorflow.keras.layers import MultiHeadAttention as MHA\n'), ((1868, 1903), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(n, m)'}), '(shape=(n, m))\n', (1889, 1903), True, 'import tensorflow as tf\n'), ((2061, 2088), 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, m]'], {}), '(inputs, [-1, m])\n', (2071, 2088), True, 'import tensorflow as tf\n'), ((2106, 2145), 'tensorflow.reshape', 'tf.reshape', (['(x_ @ embeddings)', '[-1, n, k]'], {}), '(x_ @ embeddings, [-1, n, k])\n', (2116, 2145), True, 'import tensorflow as tf\n'), ((2322, 2353), 'tensorflow.reshape', 'tf.reshape', (['phases', '[-1, n * k]'], {}), '(phases, [-1, n * k])\n', (2332, 2353), True, 'import tensorflow as tf\n'), ((2759, 2846), 'tensorflow.keras.losses.CosineSimilarity', 'tf.keras.losses.CosineSimilarity', ([], {'axis': '(1)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(axis=1, reduction=tf.keras.losses.\n Reduction.NONE)\n', (2791, 2846), True, 'import tensorflow as tf\n'), ((2946, 3032), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': '[classes, rankings]', 'name': '"""Class_and_Rank"""'}), "(inputs=inputs, outputs=[classes, rankings], name=\n 'Class_and_Rank')\n", (2960, 3032), True, 'import tensorflow as tf\n'), ((3045, 3109), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', (['inputs', 'classes'], {}), '(inputs, classes)\n', (3092, 3109), True, 'import tensorflow as tf\n'), ((3128, 3186), 'tensorflow.keras.losses.binary_crossentropy', 'tf.keras.losses.binary_crossentropy', (['phases_in', 'phases_out'], {}), '(phases_in, phases_out)\n', (3163, 3186), True, 'import tensorflow as tf\n'), ((1397, 1422), 'tensorflow.keras.layers.Masking', 'tf.keras.layers.Masking', ([], {}), '()\n', (1420, 1422), True, 'import tensorflow as tf\n'), ((1701, 1712), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1709, 1712), True, 'import tensorflow as tf\n'), ((1756, 1767), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1764, 1767), True, 'import tensorflow as tf\n'), ((2364, 2392), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (2387, 2392), True, 'import tensorflow as tf\n'), ((2418, 2462), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (2439, 2462), True, 'import tensorflow as tf\n'), ((2478, 2506), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (2501, 2506), True, 'import tensorflow as tf\n'), ((2530, 2596), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""classify_Tc"""'}), "(2, activation='softmax', name='classify_Tc')\n", (2551, 2596), True, 'import tensorflow as tf\n'), ((2669, 2738), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(n * k)'], {'activation': '"""sigmoid"""', 'name': '"""phases_out"""'}), "(n * k, activation='sigmoid', name='phases_out')\n", (2690, 2738), True, 'import tensorflow as tf\n'), ((3327, 3357), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(32)', '(6960)'], {}), '(2, 32, 6960)\n', (3344, 3357), True, 'import numpy as np\n'), ((3381, 3411), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(32)', '(1740)'], {}), '(2, 32, 1740)\n', (3398, 3411), True, 'import numpy as np\n')] |
import unittest
import os
import numpy.testing as nptest
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
import numpy as np
from gmid2.basics.uai_files import read_mmap, read_vo, read_sum, read_mpe
from gmid2.basics.undirected_network import PrimalGraph
from gmid2.basics.graphical_model import GraphicalModel
from gmid2.inference.bucket import bucket_tree_decomposition, mini_bucket_tree_decomposition, join_graph_decomposition
from gmid2.inference.pgm_bte import PgmBTE
from gmid2.inference.pgm_wmbmm import PgmWMBMM
import numpy.testing as nptest
from gmid2.basics.factor import *
class PgmWMBMMTSumest(unittest.TestCase):
def setUp(self):
self.file_name = "simple4.mmap"
# self.file_name = "hailfinder"
# v1 = Variable(100, 2, 'C')
# f1 = Factor([v1], 2.0)
# print(f1)
def _test_sum_bte(self):
# print(self.id())
file_name = os.path.join(os.path.join(os.getcwd(), "test_data/mmap"), self.file_name)
file_info = read_mmap(file_name, skip_table=False)
gm = GraphicalModel()
gm.build(file_info)
gm.convert_to_log()
read_vo(file_name + ".vo", file_info)
vid_elim_order = [5, 3, 1, 4, 2, 0] #file_info.vo # 5 3 1 4 2 0 simple4
bt = bucket_tree_decomposition(gm, vid_elim_order)
bte = PgmBTE(gm, vid_elim_order)
bte.build_message_graph(bt)
bte.schedule(bt)
bte.init_propagate()
bte.propagate_iter(bw_iter=False)
bound = bte.bounds()
print("be:{}".format(bound))
if gm.is_log:
nptest.assert_almost_equal(-6.72879777427, bound)
else:
nptest.assert_almost_equal(0.00119596992, bound)
def test_sum_mbte(self):
# print(self.id())
file_name = os.path.join(os.path.join(os.getcwd(), "test_data/mmap"), self.file_name)
file_info = read_mmap(file_name, skip_table=False)
gm = GraphicalModel()
gm.build(file_info)
gm.convert_to_log()
read_vo(file_name + ".vo", file_info)
vid_elim_order = [5, 3, 1, 4, 2, 0] #file_info.vo # 5 3 1 4 2 0
mbt = mini_bucket_tree_decomposition(gm, vid_elim_order, ibound=2)
bte = PgmBTE(gm, vid_elim_order)
bte.build_message_graph(mbt)
bte.schedule(mbt)
bte.init_propagate()
bte.propagate_iter(bw_iter=False)
bound = bte.bounds()
print("mbe:{}".format(bound))
if gm.is_log:
self.assertGreaterEqual(bound, -6.72879777427) # a >= b
else:
self.assertGreaterEqual(bound, 0.00119596992)
# GMID differnt paritioning
# 0.00266450688
# -5.74007135926
def test_sum_wmbmm(self):
# print(self.id())
file_name = os.path.join(os.path.join(os.getcwd(), "test_data/mmap"), self.file_name)
file_info = read_mmap(file_name, skip_table=False)
gm = GraphicalModel()
gm.build(file_info)
gm.convert_to_log()
read_vo(file_name + ".vo", file_info)
vid_elim_order = file_info.vo
# vid_elim_order = [5, 3, 1, 4, 2, 0] #file_info.vo # 5 3 1 4 2 0
mbt = mini_bucket_tree_decomposition(gm, vid_elim_order, ibound=2)
bte = PgmWMBMM(gm, vid_elim_order)
bte.build_message_graph(mbt)
bte.schedule()
bte.init_propagate()
bte.propagate_iter()
bound = bte.bounds()
print("wmbmm:{}".format(bound))
# no WMBMM implemntation in GMID; higher than exact; but lower than mbe?
# wmbmm: -6.019765162382094
if gm.is_log:
self.assertGreaterEqual(bound, -6.72879777427) # a >= b
else:
self.assertGreaterEqual(bound, 0.00119596992)
| [
"gmid2.basics.graphical_model.GraphicalModel",
"gmid2.inference.bucket.bucket_tree_decomposition",
"gmid2.basics.uai_files.read_vo",
"os.getcwd",
"numpy.testing.assert_almost_equal",
"gmid2.inference.pgm_bte.PgmBTE",
"pprint.PrettyPrinter",
"gmid2.inference.bucket.mini_bucket_tree_decomposition",
"g... | [((95, 118), 'pprint.PrettyPrinter', 'PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (108, 118), False, 'from pprint import PrettyPrinter\n'), ((1005, 1043), 'gmid2.basics.uai_files.read_mmap', 'read_mmap', (['file_name'], {'skip_table': '(False)'}), '(file_name, skip_table=False)\n', (1014, 1043), False, 'from gmid2.basics.uai_files import read_mmap, read_vo, read_sum, read_mpe\n'), ((1057, 1073), 'gmid2.basics.graphical_model.GraphicalModel', 'GraphicalModel', ([], {}), '()\n', (1071, 1073), False, 'from gmid2.basics.graphical_model import GraphicalModel\n'), ((1139, 1176), 'gmid2.basics.uai_files.read_vo', 'read_vo', (["(file_name + '.vo')", 'file_info'], {}), "(file_name + '.vo', file_info)\n", (1146, 1176), False, 'from gmid2.basics.uai_files import read_mmap, read_vo, read_sum, read_mpe\n'), ((1280, 1325), 'gmid2.inference.bucket.bucket_tree_decomposition', 'bucket_tree_decomposition', (['gm', 'vid_elim_order'], {}), '(gm, vid_elim_order)\n', (1305, 1325), False, 'from gmid2.inference.bucket import bucket_tree_decomposition, mini_bucket_tree_decomposition, join_graph_decomposition\n'), ((1341, 1367), 'gmid2.inference.pgm_bte.PgmBTE', 'PgmBTE', (['gm', 'vid_elim_order'], {}), '(gm, vid_elim_order)\n', (1347, 1367), False, 'from gmid2.inference.pgm_bte import PgmBTE\n'), ((1898, 1936), 'gmid2.basics.uai_files.read_mmap', 'read_mmap', (['file_name'], {'skip_table': '(False)'}), '(file_name, skip_table=False)\n', (1907, 1936), False, 'from gmid2.basics.uai_files import read_mmap, read_vo, read_sum, read_mpe\n'), ((1950, 1966), 'gmid2.basics.graphical_model.GraphicalModel', 'GraphicalModel', ([], {}), '()\n', (1964, 1966), False, 'from gmid2.basics.graphical_model import GraphicalModel\n'), ((2032, 2069), 'gmid2.basics.uai_files.read_vo', 'read_vo', (["(file_name + '.vo')", 'file_info'], {}), "(file_name + '.vo', file_info)\n", (2039, 2069), False, 'from gmid2.basics.uai_files import read_mmap, read_vo, read_sum, read_mpe\n'), ((2162, 2222), 'gmid2.inference.bucket.mini_bucket_tree_decomposition', 'mini_bucket_tree_decomposition', (['gm', 'vid_elim_order'], {'ibound': '(2)'}), '(gm, vid_elim_order, ibound=2)\n', (2192, 2222), False, 'from gmid2.inference.bucket import bucket_tree_decomposition, mini_bucket_tree_decomposition, join_graph_decomposition\n'), ((2238, 2264), 'gmid2.inference.pgm_bte.PgmBTE', 'PgmBTE', (['gm', 'vid_elim_order'], {}), '(gm, vid_elim_order)\n', (2244, 2264), False, 'from gmid2.inference.pgm_bte import PgmBTE\n'), ((2891, 2929), 'gmid2.basics.uai_files.read_mmap', 'read_mmap', (['file_name'], {'skip_table': '(False)'}), '(file_name, skip_table=False)\n', (2900, 2929), False, 'from gmid2.basics.uai_files import read_mmap, read_vo, read_sum, read_mpe\n'), ((2943, 2959), 'gmid2.basics.graphical_model.GraphicalModel', 'GraphicalModel', ([], {}), '()\n', (2957, 2959), False, 'from gmid2.basics.graphical_model import GraphicalModel\n'), ((3025, 3062), 'gmid2.basics.uai_files.read_vo', 'read_vo', (["(file_name + '.vo')", 'file_info'], {}), "(file_name + '.vo', file_info)\n", (3032, 3062), False, 'from gmid2.basics.uai_files import read_mmap, read_vo, read_sum, read_mpe\n'), ((3195, 3255), 'gmid2.inference.bucket.mini_bucket_tree_decomposition', 'mini_bucket_tree_decomposition', (['gm', 'vid_elim_order'], {'ibound': '(2)'}), '(gm, vid_elim_order, ibound=2)\n', (3225, 3255), False, 'from gmid2.inference.bucket import bucket_tree_decomposition, mini_bucket_tree_decomposition, join_graph_decomposition\n'), ((3271, 3299), 'gmid2.inference.pgm_wmbmm.PgmWMBMM', 'PgmWMBMM', (['gm', 'vid_elim_order'], {}), '(gm, vid_elim_order)\n', (3279, 3299), False, 'from gmid2.inference.pgm_wmbmm import PgmWMBMM\n'), ((1601, 1650), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['(-6.72879777427)', 'bound'], {}), '(-6.72879777427, bound)\n', (1627, 1650), True, 'import numpy.testing as nptest\n'), ((1677, 1725), 'numpy.testing.assert_almost_equal', 'nptest.assert_almost_equal', (['(0.00119596992)', 'bound'], {}), '(0.00119596992, bound)\n', (1703, 1725), True, 'import numpy.testing as nptest\n'), ((937, 948), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (946, 948), False, 'import os\n'), ((1830, 1841), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1839, 1841), False, 'import os\n'), ((2823, 2834), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2832, 2834), False, 'import os\n')] |
import numpy as np
import cv2
import glob
import itertools
def getImageArr(path, width, height, imgNorm="sub_mean", odering='channels_first'):
try:
img = cv2.imread(path, 1)
if imgNorm == "sub_and_divide":
img = np.float32(cv2.resize(img, (width, height))) / 127.5 - 1
elif imgNorm == "sub_mean":
img = cv2.resize(img, (width, height))
img = img.astype(np.float32)
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
elif imgNorm == "divide":
img = cv2.resize(img, (width, height))
img = img.astype(np.float32)
img = img/255.0
if odering == 'channels_first':
img = np.rollaxis(img, 2, 0)
return img
except Exception as e:
print (path, e)
img = np.zeros((height, width, 3))
if odering == 'channels_first':
img = np.rollaxis(img, 2, 0)
return img
def getSegmentationArr(path, nClasses, width, height):
seg_labels = np.zeros((height, width, nClasses))
try:
img = cv2.imread(path, 1)
img = cv2.resize(img, (width, height))
img = img[:, :, 0]
for c in range(nClasses):
seg_labels[:, :, c] = (img == c).astype(int)
except Exception as e:
print (e)
seg_labels = np.reshape(seg_labels, (width*height, nClasses))
return seg_labels
def imageSegmentationGenerator(images_path, segs_path, batch_size, n_classes, input_height, input_width, output_height, output_width):
assert images_path[-1] == '/'
assert segs_path[-1] == '/'
images = glob.glob(images_path + "*.jpg") + glob.glob(images_path +
"*.png") + glob.glob(images_path + "*.jpeg")
images.sort()
segmentations = glob.glob(
segs_path + "*.jpg") + glob.glob(segs_path + "*.png") + glob.glob(segs_path + "*.jpeg")
segmentations.sort()
assert len(images) == len(segmentations)
for im, seg in zip(images, segmentations):
assert(im.split('/')[-1].split(".")[0] ==
seg.split('/')[-1].split(".")[0])
zipped = itertools.cycle(zip(images, segmentations))
while True:
X = []
Y = []
for _ in range(batch_size):
im, seg = next(zipped)
X.append(getImageArr(im, input_width, input_height))
Y.append(getSegmentationArr(
seg, n_classes, output_width, output_height))
yield np.array(X), np.array(Y)
# import Models , LoadBatches
# G = LoadBatches.imageSegmentationGenerator( "data/clothes_seg/prepped/images_prepped_train/" , "data/clothes_seg/prepped/annotations_prepped_train/" , 1, 10 , 800 , 550 , 400 , 272 )
# G2 = LoadBatches.imageSegmentationGenerator( "data/clothes_seg/prepped/images_prepped_test/" , "data/clothes_seg/prepped/annotations_prepped_test/" , 1, 10 , 800 , 550 , 400 , 272 )
# m = Models.VGGSegnet.VGGSegnet( 10 , use_vgg_weights=True , optimizer='adadelta' , input_image_size=( 800 , 550 ) )
# m.fit_generator( G , 512 , nb_epoch=10 )
| [
"numpy.rollaxis",
"numpy.zeros",
"cv2.imread",
"numpy.array",
"numpy.reshape",
"glob.glob",
"cv2.resize"
] | [((1066, 1101), 'numpy.zeros', 'np.zeros', (['(height, width, nClasses)'], {}), '((height, width, nClasses))\n', (1074, 1101), True, 'import numpy as np\n'), ((1375, 1425), 'numpy.reshape', 'np.reshape', (['seg_labels', '(width * height, nClasses)'], {}), '(seg_labels, (width * height, nClasses))\n', (1385, 1425), True, 'import numpy as np\n'), ((170, 189), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (180, 189), False, 'import cv2\n'), ((1125, 1144), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (1135, 1144), False, 'import cv2\n'), ((1159, 1191), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1169, 1191), False, 'import cv2\n'), ((1794, 1827), 'glob.glob', 'glob.glob', (["(images_path + '*.jpeg')"], {}), "(images_path + '*.jpeg')\n", (1803, 1827), False, 'import glob\n'), ((1941, 1972), 'glob.glob', 'glob.glob', (["(segs_path + '*.jpeg')"], {}), "(segs_path + '*.jpeg')\n", (1950, 1972), False, 'import glob\n'), ((754, 776), 'numpy.rollaxis', 'np.rollaxis', (['img', '(2)', '(0)'], {}), '(img, 2, 0)\n', (765, 776), True, 'import numpy as np\n'), ((861, 889), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {}), '((height, width, 3))\n', (869, 889), True, 'import numpy as np\n'), ((1666, 1698), 'glob.glob', 'glob.glob', (["(images_path + '*.jpg')"], {}), "(images_path + '*.jpg')\n", (1675, 1698), False, 'import glob\n'), ((1701, 1733), 'glob.glob', 'glob.glob', (["(images_path + '*.png')"], {}), "(images_path + '*.png')\n", (1710, 1733), False, 'import glob\n'), ((1866, 1896), 'glob.glob', 'glob.glob', (["(segs_path + '*.jpg')"], {}), "(segs_path + '*.jpg')\n", (1875, 1896), False, 'import glob\n'), ((1908, 1938), 'glob.glob', 'glob.glob', (["(segs_path + '*.png')"], {}), "(segs_path + '*.png')\n", (1917, 1938), False, 'import glob\n'), ((360, 392), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (370, 392), False, 'import cv2\n'), ((948, 970), 'numpy.rollaxis', 'np.rollaxis', (['img', '(2)', '(0)'], {}), '(img, 2, 0)\n', (959, 970), True, 'import numpy as np\n'), ((2549, 2560), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2557, 2560), True, 'import numpy as np\n'), ((2562, 2573), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (2570, 2573), True, 'import numpy as np\n'), ((593, 625), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (603, 625), False, 'import cv2\n'), ((260, 292), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (270, 292), False, 'import cv2\n')] |
"""
This is yeoms attribute inference attack model and is not used!
"""
import numpy as np
from tensorflow.keras.models import load_model
from ..MIA.experiments.train_wb import get_scores
def get_test_indices(path):
"""
Retrieve the test indices
"""
indices = np.load(path, allow_pickle=True).item()
# return np.concatenate((indices['train'],indices['test']))
return indices["train"]
def arreq_in_list(myarr, list_arrays):
return next((True for elem in list_arrays if np.array_equal(elem, myarr)), False)
def variate_dataset(data, attribut_index, test_indices):
"""
Variate the dataset |pi(x)| times for every possible attribut value. For texas/purchases |pi(x)|==2 because all values are boolean
"""
data = np.load(data, allow_pickle=True)
X_original = data['x'][test_indices]
Y_original = data['y'][test_indices]
X_fake = np.copy(X_original)
Y_fake = np.copy(Y_original)
for i in range(len(X_fake)):
X_fake[i][attribut_index] ^= 1
X = np.concatenate((X_original, X_fake))
Y = np.concatenate((Y_original, Y_fake))
print(len(X_original), len(Y_original), len(X_fake), len(Y_fake))
return X, Y
models = ["./data/experiments/global/texas100_s_42/target/2e6b7d3509284da39d2389532b9bace4_10_local_model.h5",
]
models = list(map(lambda x: load_model(x, compile=True), models))
# loss = SparseCategoricalCrossentropy(from_logits=False, reduction=tf.compat.v1.losses.Reduction.NONE)
# models[0].compile(optimizer=Adam(0.001), loss=loss, metrics=['accuracy'])
num_classes = 100
index = 2
path = "./data/global/texas100_s_42/target/2e6b7d3509284da39d2389532b9bace4_indices.npy"
data = f"./models/shokri_texas_{num_classes}_classes.npz"
X, Y = variate_dataset(data, index, get_test_indices(path))
conf = []
size = len(X) // 2
for i in range(len(X)):
conf.append(models[0].evaluate(X[i:i + 1], Y[i:i + 1], batch_size=1)[0])
pred_y = []
scores = []
true_y = []
for i in range(size):
print(X[i][index], X[i + size][index])
true_y.append(X[i][index])
if conf[i] < conf[i + size]:
pred_y.append(X[i][index])
else:
pred_y.append(X[i + size][index])
# true_y = X[:size, index]
scores.append(get_scores(true_y, pred_y))
print(list(true_y))
print("bla")
print(pred_y)
print(scores)
| [
"numpy.load",
"tensorflow.keras.models.load_model",
"numpy.copy",
"numpy.array_equal",
"numpy.concatenate"
] | [((761, 793), 'numpy.load', 'np.load', (['data'], {'allow_pickle': '(True)'}), '(data, allow_pickle=True)\n', (768, 793), True, 'import numpy as np\n'), ((889, 908), 'numpy.copy', 'np.copy', (['X_original'], {}), '(X_original)\n', (896, 908), True, 'import numpy as np\n'), ((922, 941), 'numpy.copy', 'np.copy', (['Y_original'], {}), '(Y_original)\n', (929, 941), True, 'import numpy as np\n'), ((1023, 1059), 'numpy.concatenate', 'np.concatenate', (['(X_original, X_fake)'], {}), '((X_original, X_fake))\n', (1037, 1059), True, 'import numpy as np\n'), ((1068, 1104), 'numpy.concatenate', 'np.concatenate', (['(Y_original, Y_fake)'], {}), '((Y_original, Y_fake))\n', (1082, 1104), True, 'import numpy as np\n'), ((278, 310), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (285, 310), True, 'import numpy as np\n'), ((1344, 1371), 'tensorflow.keras.models.load_model', 'load_model', (['x'], {'compile': '(True)'}), '(x, compile=True)\n', (1354, 1371), False, 'from tensorflow.keras.models import load_model\n'), ((503, 530), 'numpy.array_equal', 'np.array_equal', (['elem', 'myarr'], {}), '(elem, myarr)\n', (517, 530), True, 'import numpy as np\n')] |
import numpy as np
from collections import deque
from functools import partials
class WollMonteCarlo:
""" Wolff Monte Carlo simulator for the Ising model """
def __init__(self, L, T, method = None):
self._L = L
self._T = T
self._K = 2. / T
self._method = method
# set up the initial state
self._state = np.random.randint(0, 2, size = [L, L])
@property
def L(self):
return self._L
@property
def T(self):
return self._T
@property
def K(self):
return self._K
@property
def state(self):
return self._state
def probability_add_bonds(self, x1, y1, x2, y2, state):
""" The probability for adding a bond. """
E = 1.0 if state[x1, y1] == state[x2, y2] else 0.0
return 1.0 - np.exp(-self.K * E)
def wolff_iterative(self, state):
""" Iterative Wolff algorithm.
This algorithm uses a doubly ended queue (deque), which provides O(1) operations for adding and removing items from both ends of a list.
"""
# Convenient lists for indexing
# Below, we'll use these to get the left (or 'above', etc) neighbors of a site.
# Includes periodic boundaries! Another option would be to replace
# left[x1] by (x1 - 1) % L
# but that does an addition and a module every step.
left = [self.L - 1] + list(range(self.L - 1))
right = list(range(1, self.L)) + [0]
# Book-keeping containers
sites_to_consider = deque()
sites_to_flip = set()
bonds_considered = set()
# Initial queue of sites to consider, just consisting of a single (x,y) location
sites_to_consider.append((
np.random.randint(0, self.L),
np.random.randint(0, self.L)
))
# As long as there are sites to consider
while sites_to_consider:
# Pick a new site to consider from the queue, either using
# breadth first or depth first
if self._method == "BFS":
x1, y1 = sites_to_consider.popleft()
if self._method == "DFS":
x1, y1 = sites_to_consider.pop()
# For the neighbors of this site
for x2, y2 in zip([left[x1], right[x1], x1, x1],
[y1, y1, left[y1], right[y1]]):
# Check if we have not already considered this pair
if not (x1, y1, x2, y2) in bonds_considered:
# Add the pair so that we don't flip it twice
bonds_considered.add((x1, y1, x2, y2))
bonds_considered.add((x2, y2, x1, y1))
if np.random.rand() < self.probability_add_bond(x1, y1, x2, y2, state):
sites_to_consider.append((x2, y2))
sites_to_flip.add((x1, y1))
sites_to_flip.add((x2, y2))
return sites_to_flip
def step(self):
"""Use Wolff and perform update."""
# Get a list of sites to flip...
to_flip = self.wolff_iterative(self._state)
# ...and flip them
for (x, y) in to_flip:
self._state[x, y] = 1 - self._state[x, y]
# Return the list of the flipped sites
return to_flip | [
"numpy.random.rand",
"numpy.random.randint",
"numpy.exp",
"collections.deque"
] | [((362, 398), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '[L, L]'}), '(0, 2, size=[L, L])\n', (379, 398), True, 'import numpy as np\n'), ((1553, 1560), 'collections.deque', 'deque', ([], {}), '()\n', (1558, 1560), False, 'from collections import deque\n'), ((825, 844), 'numpy.exp', 'np.exp', (['(-self.K * E)'], {}), '(-self.K * E)\n', (831, 844), True, 'import numpy as np\n'), ((1761, 1789), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.L'], {}), '(0, self.L)\n', (1778, 1789), True, 'import numpy as np\n'), ((1803, 1831), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.L'], {}), '(0, self.L)\n', (1820, 1831), True, 'import numpy as np\n'), ((2741, 2757), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2755, 2757), True, 'import numpy as np\n')] |
import numpy as np
import random
def sample(softmax, temperature):
EPSILON = 1e-15
probs = (np.array(softmax) + EPSILON).astype('float64')
probs = np.log(probs) / temperature
probs = np.exp(probs)
probs = probs / np.sum(probs)
return np.random.choice(range(len(probs)), p=probs)
#TODO: saving/loading
#TODO: regularizers
class ThoughtHelper():
def __init__(self, rnn, buffer_size=10000):
self.trajectories = {}
self.buffer_size = buffer_size
self.buffer = []
self.rnn = rnn
def generate(self, trajectory, max_len=1000, temp_state=None,
temperature=0.1):
"""predict from current trajectory onwards"""
state = self.get_trajectory(trajectory) if temp_state is None else temp_state
#generate some text
gen = []
for i in range(max_len):
char = self.rnn.decode(state, return_probs=True)[0]
char = sample(char, temperature) if temperature is not None else np.argmax(char)
state = self.rnn.encode(char, state)[0]
if char == 0:
break
gen.append(char)
gen = bytes(gen).decode("utf-8", "ignore")
return gen
def remember(self, char, state, next_char):
self.buffer.append((char, state, next_char))
while len(self.buffer) > self.buffer_size:
self.buffer.pop(0)
def update(self, trajectory, text, add_to_buffer=True,
add_null_terminator=True, zero_injection_rate=0.001):
"""updates state with text"""
if add_null_terminator:
text = text + "\0"
#convert to list of bytes
#need to add \0 to the front to learn the transition between null and first char
#this assumes that the last character seen was actually \0
text = list(bytes("\0"+text, "utf-8"))
#grab trajectory state
state = self.get_trajectory(trajectory)
#for every char
for i in range(len(text) - 1):
char = text[i]
next_char = text[(i + 1) % len(text)]
if add_to_buffer:
self.remember(char, state, next_char)
#inject zero states (for resuming when states are lost)
if np.random.random() < zero_injection_rate:
self.remember(char, np.zeros([self.rnn.state_size]), next_char)
#encode that char into the trajectory state
state = self.rnn.encode(char, state)[0]
#TODO: update state with \0?
#yes.
state = self.rnn.encode(next_char, state)[0]
#update trajectory state
self.reset_trajectory(trajectory, state)
def reset_trajectory(self, trajectory, state=None):
state = np.zeros([self.rnn.state_size]) if state is None else state
#print(state)
self.trajectories[trajectory] = state
def get_trajectory(self, trajectory):
return self.trajectories[trajectory]
def get_batch(self, size):
batch = [random.choice(self.buffer) for _ in range(size)]
return zip(*batch)
def train(self, batch_size=64, num_batches=1):
for i in range(num_batches):
chars, states, next_chars = self.get_batch(batch_size)
loss = self.rnn.train_on_batch(chars, states, next_chars)
return loss
| [
"numpy.sum",
"numpy.log",
"numpy.argmax",
"numpy.zeros",
"random.choice",
"numpy.random.random",
"numpy.array",
"numpy.exp"
] | [((192, 205), 'numpy.exp', 'np.exp', (['probs'], {}), '(probs)\n', (198, 205), True, 'import numpy as np\n'), ((154, 167), 'numpy.log', 'np.log', (['probs'], {}), '(probs)\n', (160, 167), True, 'import numpy as np\n'), ((224, 237), 'numpy.sum', 'np.sum', (['probs'], {}), '(probs)\n', (230, 237), True, 'import numpy as np\n'), ((2483, 2514), 'numpy.zeros', 'np.zeros', (['[self.rnn.state_size]'], {}), '([self.rnn.state_size])\n', (2491, 2514), True, 'import numpy as np\n'), ((2728, 2754), 'random.choice', 'random.choice', (['self.buffer'], {}), '(self.buffer)\n', (2741, 2754), False, 'import random\n'), ((97, 114), 'numpy.array', 'np.array', (['softmax'], {}), '(softmax)\n', (105, 114), True, 'import numpy as np\n'), ((921, 936), 'numpy.argmax', 'np.argmax', (['char'], {}), '(char)\n', (930, 936), True, 'import numpy as np\n'), ((2035, 2053), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2051, 2053), True, 'import numpy as np\n'), ((2107, 2138), 'numpy.zeros', 'np.zeros', (['[self.rnn.state_size]'], {}), '([self.rnn.state_size])\n', (2115, 2138), True, 'import numpy as np\n')] |
# coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test matmul replacement.
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
import pytest
from ..core.multiarray import matmul, GE1P10
def test_import():
"""Check that what is imported from code is what we are testing."""
from ... import numpy as anp
assert anp.matmul is matmul
def test_test_function():
"""Test the test function
The possibly patched version of broadcast_arrays should always be OK
The numpy version may be, in which case we just use it, or it may not,
it which case we use the patched version.
"""
from ... import numpy as anp
assert GE1P10(module=anp) is True
if GE1P10(module=np):
assert matmul is np.matmul
else:
assert not hasattr(np, 'matmul')
def test_matmul():
a = np.arange(18).reshape(2, 3, 3)
# with another matrix
b1 = np.identity(3)
assert np.all(matmul(a, b1) == a)
b2 = 1. - b1
assert np.all(matmul(a[0], b2) == np.array([[3, 2, 1],
[9, 8, 7],
[15, 14, 13]]))
b3 = np.ones((4, 1, 3, 2))
out = np.zeros((4, 2, 3, 2))
res = matmul(a, b3, out=out)
assert res is out
with pytest.raises(ValueError): # wrong shape
matmul(b3, a)
out2 = np.zeros((4, 1, 3, 2))
with pytest.raises(ValueError):
matmul(a, b3, out=out2)
# with a vector
b4 = np.ones((3,))
assert np.all(matmul(a, b4) == a.sum(-1))
out = np.zeros((a.shape[0], a.shape[2]))
res = matmul(b4, a, out=out)
assert res is out
assert np.all(out == a.sum(-2))
with pytest.raises(ValueError):
matmul(a, 1.)
with pytest.raises(ValueError):
matmul(1., a)
with pytest.raises(ValueError):
matmul(a, 1.)
| [
"numpy.zeros",
"numpy.ones",
"numpy.identity",
"pytest.raises",
"numpy.arange",
"numpy.array"
] | [((1000, 1014), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1011, 1014), True, 'import numpy as np\n'), ((1261, 1282), 'numpy.ones', 'np.ones', (['(4, 1, 3, 2)'], {}), '((4, 1, 3, 2))\n', (1268, 1282), True, 'import numpy as np\n'), ((1293, 1315), 'numpy.zeros', 'np.zeros', (['(4, 2, 3, 2)'], {}), '((4, 2, 3, 2))\n', (1301, 1315), True, 'import numpy as np\n'), ((1455, 1477), 'numpy.zeros', 'np.zeros', (['(4, 1, 3, 2)'], {}), '((4, 1, 3, 2))\n', (1463, 1477), True, 'import numpy as np\n'), ((1576, 1589), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (1583, 1589), True, 'import numpy as np\n'), ((1646, 1680), 'numpy.zeros', 'np.zeros', (['(a.shape[0], a.shape[2])'], {}), '((a.shape[0], a.shape[2]))\n', (1654, 1680), True, 'import numpy as np\n'), ((1380, 1405), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1393, 1405), False, 'import pytest\n'), ((1487, 1512), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1500, 1512), False, 'import pytest\n'), ((1782, 1807), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1795, 1807), False, 'import pytest\n'), ((1840, 1865), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1853, 1865), False, 'import pytest\n'), ((1898, 1923), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1911, 1923), False, 'import pytest\n'), ((934, 947), 'numpy.arange', 'np.arange', (['(18)'], {}), '(18)\n', (943, 947), True, 'import numpy as np\n'), ((1108, 1154), 'numpy.array', 'np.array', (['[[3, 2, 1], [9, 8, 7], [15, 14, 13]]'], {}), '([[3, 2, 1], [9, 8, 7], [15, 14, 13]])\n', (1116, 1154), True, 'import numpy as np\n')] |
import sys
from . import scigridnetwork
from . import armafitloader
from . import globals
import numpy as np
import logging
from tqdm import tqdm
import json
from pathlib import Path
pypsapath = "C:/dev/py/PyPSA/"
if sys.path[0] != pypsapath:
sys.path.insert(0, pypsapath)
import pypsa
pretty = True
highres = True
#rcParams["figure.dpi"]=300
def make_export_renewable_covariance():
logging.info("Loading SciGRID network")
sgn = scigridnetwork.SciGRID_network()
logging.info("Loading monthly ARMA fits and exporting covariance matrices")
for mi in tqdm(range(1)):
armafits = armafitloader.ARMAfit_loader(sgn, mi)
armafits.compute_covariances(save_csv=True, save_npy=True)
logging.info("Exporting wind & solar capacity")
np.savetxt(globals.data_path/"processed"/"wind_capacity.csv", sgn.wind_capacity, delimiter=",")
np.savetxt(globals.data_path/"processed"/"solar_capacity.csv", sgn.solar_capacity, delimiter=",")
np.save(globals.data_path/"processed"/"wind_capacity.npy", sgn.wind_capacity, allow_pickle=False)
np.save(globals.data_path/"processed"/"solar_capacity.npy", sgn.solar_capacity, allow_pickle=False)
def make_export_flow_matrix():
logging.info("Loading SciGRID network")
sgn = scigridnetwork.SciGRID_network()
logging.info("Writing flow matrix")
with open(globals.data_path / "processed" / "flow.json", 'w') as f:
f.write(json.dumps(sgn.F.tolist()))
logging.info("Writing node properties")
lines = sgn.network.lines
x = sgn.network.buses.loc[sgn.new_nodes].x
y = sgn.network.buses.loc[sgn.new_nodes].y
data_to_export = {'x': list(x),
'y': list(y),
'bus0': list(lines.bus0.apply(sgn.node_index)),
'bus1': list(lines.bus1.apply(sgn.node_index)),
'capacity': list(sgn.line_capacity)}
lines = []
for name, d in data_to_export.items():
lines.append(name+" = " + json.dumps(d))
with open(globals.data_path / "processed" / "nodeproperties.js", 'w') as f:
f.write("\n".join(lines))
def make_list():
print(makeable)
makeable = [s[5:] for s in dir() if s.startswith("make_")]
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
for funcname in sys.argv[1:]:
if funcname in makeable:
print(" == " + funcname + " == ")
eval("make_" + funcname)()
print(" ===" + "="*len(funcname) + "=== ")
| [
"numpy.save",
"numpy.savetxt",
"sys.path.insert",
"json.dumps",
"logging.info",
"logging.getLogger"
] | [((248, 277), 'sys.path.insert', 'sys.path.insert', (['(0)', 'pypsapath'], {}), '(0, pypsapath)\n', (263, 277), False, 'import sys\n'), ((397, 436), 'logging.info', 'logging.info', (['"""Loading SciGRID network"""'], {}), "('Loading SciGRID network')\n", (409, 436), False, 'import logging\n'), ((485, 560), 'logging.info', 'logging.info', (['"""Loading monthly ARMA fits and exporting covariance matrices"""'], {}), "('Loading monthly ARMA fits and exporting covariance matrices')\n", (497, 560), False, 'import logging\n'), ((720, 767), 'logging.info', 'logging.info', (['"""Exporting wind & solar capacity"""'], {}), "('Exporting wind & solar capacity')\n", (732, 767), False, 'import logging\n'), ((772, 876), 'numpy.savetxt', 'np.savetxt', (["(globals.data_path / 'processed' / 'wind_capacity.csv')", 'sgn.wind_capacity'], {'delimiter': '""","""'}), "(globals.data_path / 'processed' / 'wind_capacity.csv', sgn.\n wind_capacity, delimiter=',')\n", (782, 876), True, 'import numpy as np\n'), ((872, 978), 'numpy.savetxt', 'np.savetxt', (["(globals.data_path / 'processed' / 'solar_capacity.csv')", 'sgn.solar_capacity'], {'delimiter': '""","""'}), "(globals.data_path / 'processed' / 'solar_capacity.csv', sgn.\n solar_capacity, delimiter=',')\n", (882, 978), True, 'import numpy as np\n'), ((974, 1080), 'numpy.save', 'np.save', (["(globals.data_path / 'processed' / 'wind_capacity.npy')", 'sgn.wind_capacity'], {'allow_pickle': '(False)'}), "(globals.data_path / 'processed' / 'wind_capacity.npy', sgn.\n wind_capacity, allow_pickle=False)\n", (981, 1080), True, 'import numpy as np\n'), ((1076, 1184), 'numpy.save', 'np.save', (["(globals.data_path / 'processed' / 'solar_capacity.npy')", 'sgn.solar_capacity'], {'allow_pickle': '(False)'}), "(globals.data_path / 'processed' / 'solar_capacity.npy', sgn.\n solar_capacity, allow_pickle=False)\n", (1083, 1184), True, 'import numpy as np\n'), ((1213, 1252), 'logging.info', 'logging.info', (['"""Loading SciGRID network"""'], {}), "('Loading SciGRID network')\n", (1225, 1252), False, 'import logging\n'), ((1301, 1336), 'logging.info', 'logging.info', (['"""Writing flow matrix"""'], {}), "('Writing flow matrix')\n", (1313, 1336), False, 'import logging\n'), ((1458, 1497), 'logging.info', 'logging.info', (['"""Writing node properties"""'], {}), "('Writing node properties')\n", (1470, 1497), False, 'import logging\n'), ((2249, 2268), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2266, 2268), False, 'import logging\n'), ((1987, 2000), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (1997, 2000), False, 'import json\n')] |
"""This module contains the Explorer class, which is an abstraction
for batched, Bayesian optimization."""
from collections import deque
import csv
import heapq
import json
from operator import itemgetter
from pathlib import Path
import pickle
import tempfile
from typing import Dict, List, Optional, Tuple, TypeVar, Union
import numpy as np
from molpal import acquirer, featurizer, models, objectives, pools
T = TypeVar('T')
class Explorer:
"""An Explorer explores a pool of inputs using Bayesian optimization
Attributes
----------
name : str
the name this explorer will use for all outputs
pool : MoleculePool
the pool of inputs to explore
featurizer : Featurizer
the featurizer this explorer will use convert molecules from SMILES
strings into feature representations
acquirer : Acquirer
an acquirer which selects molecules to explore next using a prior
distribution over the inputs
objective : Objective
an objective calculates the objective function of a set of inputs
model : Model
a model that generates a posterior distribution over the inputs using
observed data
retrain_from_scratch : bool
whether the model will be retrained from scratch at each iteration.
If False, train the model online.
NOTE: The definition of 'online' is model-specific.
epoch : int
the current epoch of exploration
scores : Dict[T, float]
a dictionary mapping an input's identifier to its corresponding
objective function value
failed : Dict[T, None]
a dictionary containing the inputs for which the objective function
failed to evaluate
new_scores : Dict[T, float]
a dictionary mapping an input's identifier to its corresponding
objective function value for the most recent batch of labeled inputs
updated_model : bool
whether the predictions are currently out-of-date with the model
top_k_avg : float
the average of the top-k explored inputs
y_preds : List[float]
a list parallel to the pool containing the mean predicted score
for an input
y_vars : List[float]
a list parallel to the pool containing the variance in the predicted
score for an input. Will be empty if model does not provide variance
recent_avgs : Deque[float]
a queue containing the <window_size> most recent averages
delta : float
the minimum acceptable fractional difference between the current
average and the moving average in order to continue exploration
max_epochs : int
the maximum number of batches to explore
root : str
the directory under which to organize all outputs
write_final : bool
whether the list of explored inputs and their scores should be written
to a file at the end of exploration
write_intermediate : bool
whether the list of explored inputs and their scores should be written
to a file after each round of exploration
scores_csvs : List[str]
a list containing the filepath of each score file that was written
in the order in which they were written. Used only when saving the
intermediate state to initialize another explorer
save_preds : bool
whether the predictions should be written after each exploration batch
verbose : int
the level of output the Explorer prints
Parameters
----------
name : str
k : Union[int, float] (Default = 0.01)
window_size : int (Default = 3)
the number of top-k averages from which to calculate a moving average
delta : float (Default = 0.01)
max_epochs : int (Default = 50)
max_explore : Union[int, float] (Default = 1.)
root : str (Default = '.')
write_final : bool (Default = True)
write_intermediate : bool (Default = False)
save_preds : bool (Default = False)
retrain_from_scratch : bool (Default = False)
previous_scores : Optional[str] (Default = None)
the filepath of a CSV file containing previous scoring data which will
be treated as the initialization batch (instead of randomly selecting
from the bool.)
scores_csvs : Union[str, List[str], None] (Default = None)
a list of filepaths containing CSVs with previous scoring data or a
pickle file containing this list. These
CSVs will be read in and the model trained on the data in the order
in which the CSVs are provide. This is useful for mimicking the
intermediate state of a previous Explorer instance
verbose : int (Default = 0)
**kwargs
keyword arguments to initialize an Encoder, MoleculePool, Acquirer,
Model, and Objective classes
Raises
------
ValueError
if k is less than 0
if max_explore is less than 0
"""
def __init__(self, name: str = 'molpal',
k: Union[int, float] = 0.01, window_size: int = 3,
delta: float = 0.01, max_epochs: int = 50,
max_explore: Union[int, float] = 1., root: str = '.',
write_final: bool = True, write_intermediate: bool = False,
save_preds: bool = False, retrain_from_scratch: bool = False,
previous_scores: Optional[str] = None,
scores_csvs: Union[str, List[str], None] = None,
verbose: int = 0, tmp_dir: str = tempfile.gettempdir(),
**kwargs):
self.name = name; kwargs['name'] = name
self.verbose = verbose; kwargs['verbose'] = verbose
self.root = root
self.tmp = tmp_dir
self.featurizer = featurizer.Featurizer(
fingerprint=kwargs['fingerprint'],
radius=kwargs['radius'], length=kwargs['length']
)
self.pool = pools.pool(featurizer=self.featurizer,
path=self.tmp, **kwargs)
self.acquirer = acquirer.Acquirer(size=len(self.pool), **kwargs)
if self.acquirer.metric == 'thompson':
kwargs['dropout_size'] = 1
self.model = models.model(input_size=len(self.featurizer), **kwargs)
self.acquirer.stochastic_preds = 'stochastic' in self.model.provides
self.objective = objectives.objective(**kwargs)
self._validate_acquirer()
self.retrain_from_scratch = retrain_from_scratch
self.k = k
self.delta = delta
self.max_explore = max_explore
self.max_epochs = max_epochs
self.write_final = write_final
self.write_intermediate = write_intermediate
self.save_preds = save_preds
# stateful attributes (not including model)
self.epoch = 0
self.scores = {}
self.failures = {}
self.new_scores = {}
self.updated_model = None
self.recent_avgs = deque(maxlen=window_size)
self.top_k_avg = None
self.y_preds = None
self.y_vars = None
if isinstance(scores_csvs, str):
self.scores_csvs = pickle.load(open(scores_csvs, 'rb'))
elif isinstance(scores_csvs, list):
self.scores_csvs = scores_csvs
else:
self.scores_csvs = []
if previous_scores:
self.load_scores(previous_scores)
elif scores_csvs:
self.load()
@property
def k(self) -> int:
"""the number of top-scoring inputs from which to calculate an average.
"""
k = self.__k
if isinstance(k, float):
k = int(k * len(self.pool))
return min(k, len(self.pool))
@k.setter
def k(self, k: Union[int, float]):
"""Set k either as an integer or as a fraction of the pool.
NOTE: Specifying either a fraction greater than 1 or or a number
larger than the pool size will default to using the full pool.
"""
if k <= 0:
raise ValueError(f'k(={k}) must be greater than 0!')
self.__k = k
@property
def max_explore(self) -> int:
"""the maximum number of inputs to explore"""
max_explore = self.__max_explore
if isinstance(max_explore, float):
max_explore = int(max_explore * len(self.pool))
return max_explore
@max_explore.setter
def max_explore(self, max_explore: Union[int, float]):
"""Set max_explore either as an integer or as a fraction of the pool.
NOTE: Specifying either a fraction greater than 1 or or a number
larger than the pool size will default to using the full pool.
"""
if max_explore <= 0.:
raise ValueError(
f'max_explore(={max_explore}) must be greater than 0!')
self.__max_explore = max_explore
@property
def completed(self) -> bool:
"""whether the explorer fulfilled one of its stopping conditions
Stopping Conditions
-------------------
a. explored the entire pool
(not implemented right now due to complications with 'transfer
learning')
b. explored for at least <max_epochs> epochs
c. explored at least <max_explore> inputs
d. the current top-k average is within a fraction <delta> of the moving
top-k average. This requires two sub-conditions to be met:
1. the explorer has successfully explored at least k inputs
2. the explorer has completed at least <window_size> epochs after
sub-condition (1) has been met
Returns
-------
bool
whether a stopping condition has been met
"""
if self.epoch > self.max_epochs:
return True
if len(self.scores) >= self.max_explore:
return True
if len(self.recent_avgs) < self.recent_avgs.maxlen:
return False
moving_avg = sum(self.recent_avgs) / len(self.recent_avgs)
return (self.top_k_avg - moving_avg) / moving_avg <= self.delta
def explore(self):
self.run()
def run(self):
"""Explore the MoleculePool until the stopping condition is met"""
if self.epoch == 0:
print('Starting Exploration ...')
self.explore_initial()
else:
print(f'Resuming Exploration at epoch {self.epoch}...')
self.explore_batch()
while not self.completed:
if self.verbose > 0:
print(f'Current average of top {self.k}: {self.top_k_avg:0.3f}',
'Continuing exploration ...', flush=True)
self.explore_batch()
print('Finished exploring!')
print(f'Explored a total of {len(self)} molecules',
f'over {self.epoch} iterations')
print(f'Final average of top {self.k}: {self.top_k_avg:0.3f}')
print(f'Final averages')
print(f'--------------')
for k in [0.0001, 0.0005, 0.001, 0.005]:
print(f'top {k*100:0.2f}%: {self.avg(k):0.3f}')
if self.write_final:
self.write_scores(final=True)
def __len__(self) -> int:
"""The number of inputs that have been explored"""
return len(self.scores) + len(self.failures)
def explore_initial(self) -> float:
"""Perform an initial round of exploration
Must be called before explore_batch()
Returns
-------
avg : float
the average score of the batch
"""
inputs = self.acquirer.acquire_initial(
xs=self.pool.smis(),
cluster_ids=self.pool.cluster_ids(),
cluster_sizes=self.pool.cluster_sizes,
)
new_scores = self.objective.calc(inputs)
self._clean_and_update_scores(new_scores)
self.top_k_avg = self.avg()
if len(self.scores) >= self.k:
self.recent_avgs.append(self.top_k_avg)
if self.write_intermediate:
self.write_scores(include_failed=True)
self.epoch += 1
valid_scores = [y for y in new_scores.values() if y is not None]
return sum(valid_scores) / len(valid_scores)
def explore_batch(self) -> float:
"""Perform a round of exploration
Returns
-------
avg : float
the average score of the batch
Raises
------
InvalidExplorationError
if called before explore_initial or load_scores
"""
if self.epoch == 0:
raise InvalidExplorationError(
'Cannot explore a batch before initialization!'
)
if len(self.scores) >= len(self.pool):
# this needs to be reconsidered for warm-start type approach
self.epoch += 1
return self.top_k_avg
self._update_model()
self._update_predictions()
inputs = self.acquirer.acquire_batch(
xs=self.pool.smis(), y_means=self.y_preds, y_vars=self.y_vars,
explored={**self.scores, **self.failures},
cluster_ids=self.pool.cluster_ids(),
cluster_sizes=self.pool.cluster_sizes, epoch=self.epoch,
)
new_scores = self.objective.calc(inputs)
self._clean_and_update_scores(new_scores)
self.top_k_avg = self.avg()
if len(self.scores) >= self.k:
self.recent_avgs.append(self.top_k_avg)
if self.write_intermediate:
self.write_scores(include_failed=True)
self.epoch += 1
valid_scores = [y for y in new_scores.values() if y is not None]
return sum(valid_scores)/len(valid_scores)
def avg(self, k: Union[int, float, None] = None) -> float:
"""Calculate the average of the top k molecules
Parameter
---------
k : Union[int, float, None] (Default = None)
the number of molecules to consider when calculating the
average, expressed either as a specific number or as a
fraction of the pool. If the value specified is greater than the
number of successfully evaluated inputs, return the average of all
succesfully evaluated inputs. If None, use self.k
Returns
-------
float
the top-k average
"""
k = k or self.k
if isinstance(k, float):
k = int(k * len(self.pool))
k = min(k, len(self.scores))
if k == len(self.pool):
return sum(score for score in self.scores.items()) / k
return sum(score for smi, score in self.top_explored(k)) / k
def top_explored(self, k: Union[int, float, None] = None) -> List[Tuple]:
"""Get the top-k explored molecules
Parameter
---------
k : Union[int, float, None] (Default = None)
the number of top-scoring molecules to get, expressed either as a
specific number or as a fraction of the pool. If the value
specified is greater than the number of successfully evaluated
inputs, return all explored inputs. If None, use self.k
Returns
-------
top_explored : List[Tuple[str, float]]
a list of tuples containing the identifier and score of the
top-k inputs, sorted by their score
"""
k = k or self.k
if isinstance(k, float):
k = int(k * len(self.pool))
k = min(k, len(self.scores))
if k / len(self.scores) < 0.8:
return heapq.nlargest(k, self.scores.items(), key=itemgetter(1))
return sorted(self.scores.items(), key=itemgetter(1), reverse=True)
def top_preds(self, k: Union[int, float, None] = None) -> List[Tuple]:
"""Get the current top predicted molecules and their scores
Parameter
---------
k : Union[int, float, None] (Default = None)
see documentation for avg()
Returns
-------
top_preds : List[Tuple[str, float]]
a list of tuples containing the identifier and predicted score of
the top-k predicted inputs, sorted by their predicted score
"""
k = k or self.k
if isinstance(k, float):
k = int(k * len(self.pool))
k = min(k, len(self.scores))
selected = []
for x, y in zip(self.pool.smis(), self.y_preds):
if len(selected) < k:
heapq.heappush(selected, (y, x))
else:
heapq.heappushpop(selected, (y, x))
return [(x, y) for y, x in selected]
def write_scores(self, m: Union[int, float] = 1.,
final: bool = False,
include_failed: bool = False) -> None:
"""Write the top M scores to a CSV file
Writes a CSV file of the top-k explored inputs with the input ID and
the respective objective function value.
Parameters
----------
m : Union[int, float] (Default = 1.)
The number of top-scoring inputs to write, expressed either as an
integer or as a float representing the fraction of explored inputs.
By default, writes all inputs
final : bool (Default = False)
Whether the explorer has finished. If true, write all explored
inputs (both successful and failed) and name the output CSV file
"all_explored_final.csv"
include_failed : bool (Default = False)
Whether to include the inputs for which objective function
evaluation failed
"""
if isinstance(m, float):
m = int(m * len(self))
m = min(m, len(self))
p_data = Path(f'{self.root}/{self.name}/data')
p_data.mkdir(parents=True, exist_ok=True)
if final:
m = len(self)
p_scores = p_data / f'all_explored_final.csv'
include_failed = True
else:
p_scores = p_data / f'top_{m}_explored_iter_{self.epoch}.csv'
self.scores_csvs.append(str(p_scores))
top_m = self.top_explored(m)
with open(p_scores, 'w') as fid:
writer = csv.writer(fid)
writer.writerow(['smiles', 'score'])
writer.writerows(top_m)
if include_failed:
writer.writerows(self.failures.items())
if self.verbose > 0:
print(f'Results were written to "{p_scores}"')
def load_scores(self, previous_scores: str) -> None:
"""Load the scores CSV located at saved_scores.
If this is being called during initialization, treat the data as the
initialization batch.
Parameter
---------
previous_scores : str
the filepath of a CSV file containing previous scoring information.
The 0th column of this CSV must contain the input identifier and
the 1st column must contain a float corresponding to its score.
A failure to parse the 1st column as a float will treat that input
as a failure.
"""
if self.verbose > 0:
print(f'Loading scores from "{previous_scores}" ... ', end='')
scores, failures = self._read_scores(previous_scores)
self.scores.update(scores)
self.failures.update(failures)
if self.epoch == 0:
self.epoch = 1
if self.verbose > 0:
print('Done!')
def save(self) -> str:
p_states = Path(f'{self.root}/{self.name}/states')
p_states.mkdir(parents=True, exist_ok=True)
p_state = p_states / f'epoch_{self.epoch}.pkl'
with open(p_state, 'wb') as fid:
pickle.dump(self.scores_csvs, fid)
return str(p_state)
def save_checkpoint(self) -> str:
p_chkpt = Path(
f'{self.root}/{self.name}/checkpoints/epoch_{self.epoch}'
)
p_chkpt.mkdir(parents=True, exist_ok=True)
state = {
'epoch': self.epoch,
'scores': self.scores,
'failures': self.failures,
'new_scores': self.new_scores,
'updated_model': self.updated_model,
'recent_avgs': self.recent_avgs,
'top_k_avg': self.top_k_avg,
'y_preds': self.write_preds(),
'model': self.model.save(p_chkpt / 'model')
}
p_state = p_chkpt / 'state.json'
json.dump(state, open(p_state))
return str(p_state)
def load(self) -> None:
"""Mimic the intermediate state of a previous explorer run by loading
the data from the list of output files"""
if self.verbose > 0:
print(f'Loading in previous state ... ', end='')
for scores_csv in self.scores_csvs:
scores, self.failures = self._read_scores(scores_csv)
self.new_scores = {smi: score for smi, score in scores.items()
if smi not in self.scores}
if not self.retrain_from_scratch:
self._update_model()
self.scores = scores
self.epoch += 1
self.top_k_avg = self.avg()
if len(self.scores) >= self.k:
self.recent_avgs.append(self.top_k_avg)
if self.verbose > 0:
print('Done!')
def write_preds(self) -> str:
path = Path(f'{self.root}/{self.name}/preds')
path.mkdir(parents=True, exist_ok=True)
if self.y_vars:
Y_pred = np.column_stack((self.y_preds, self.y_vars))
else:
Y_pred = np.array(self.y_preds)
preds_file = f'{path}/preds_iter_{self.epoch}.npy'
np.save(preds_file, Y_pred)
return preds_file
def _clean_and_update_scores(self, new_scores: Dict[T, Optional[float]]):
"""Remove the None entries from new_scores and update the attributes
new_scores, scores, and failed accordingly
Parameter
---------
new_scores : Dict[T, Optional[float]]
a dictionary containing the corresponding values of the objective
function for a batch of inputs
Side effects
------------
(mutates) self.scores : Dict[T, float]
updates self.scores with the non-None entries from new_scores
(mutates) self.new_scores : Dict[T, float]
updates self.new_scores with the non-None entries from new_scores
(mutates) self.failures : Dict[T, None]
a dictionary storing the inputs for which scoring failed
"""
for x, y in new_scores.items():
if y is None:
self.failures[x] = y
else:
self.scores[x] = y
self.new_scores[x] = y
def _update_model(self) -> None:
"""Update the prior distribution to generate a posterior distribution
Side effects
------------
(mutates) self.model : Type[Model]
updates the model with new data, if there are any
(sets) self.new_scores : Dict[str, Optional[float]]
reinitializes self.new_scores to an empty dictionary
(sets) self.updated_model : bool
sets self.updated_model to True, indicating that the predictions
must be updated as well
"""
if len(self.new_scores) == 0:
# only update model if there are new data
self.updated_model = False
return
if self.retrain_from_scratch:
xs, ys = zip(*self.scores.items())
else:
xs, ys = zip(*self.new_scores.items())
self.model.train(
xs, ys, retrain=self.retrain_from_scratch,
featurizer=self.featurizer,
# featurize=self.encoder.encode_and_uncompress
)
self.new_scores = {}
self.updated_model = True
def _update_predictions(self) -> None:
"""Update the predictions over the pool with the new model
Side effects
------------
(sets) self.y_preds : List[float]
a list of floats parallel to the pool inputs containing the mean
predicted score for each input
(sets) self.y_vars : List[float]
a list of floats parallel to the pool inputs containing the
predicted variance for each input
(sets) self.updated_model : bool
sets self.updated_model to False, indicating that the predictions
are now up-to-date with the current model
"""
if not self.updated_model and self.y_preds:
# don't update predictions if the model has not been updated
# and the predictions are already set
return
self.y_preds, self.y_vars = self.model.apply(
x_ids=self.pool.smis(), x_feats=self.pool.fps(),
batched_size=None, size=len(self.pool),
mean_only='vars' not in self.acquirer.needs
)
self.updated_model = False
if self.save_preds:
self.write_preds()
def _validate_acquirer(self):
"""Ensure that the model provides values the Acquirer needs"""
if self.acquirer.needs > self.model.provides:
raise IncompatibilityError(
f'{self.acquirer.metric} metric needs: '
+ f'{self.acquirer.needs} '
+ f'but {self.model.type_} only provides: '
+ f'{self.model.provides}')
def _read_scores(self, scores_csv: str) -> Dict:
"""read the scores contained in the file located at scores_csv"""
scores = {}
failures = {}
with open(scores_csv) as fid:
reader = csv.reader(fid)
next(reader)
for row in reader:
try:
scores[row[0]] = float(row[1])
except:
failures[row[0]] = None
return scores, failures
class InvalidExplorationError(Exception):
pass
class IncompatibilityError(Exception):
pass
| [
"pickle.dump",
"numpy.save",
"csv.reader",
"molpal.featurizer.Featurizer",
"csv.writer",
"molpal.pools.pool",
"heapq.heappush",
"heapq.heappushpop",
"tempfile.gettempdir",
"molpal.objectives.objective",
"pathlib.Path",
"numpy.array",
"numpy.column_stack",
"typing.TypeVar",
"operator.item... | [((416, 428), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (423, 428), False, 'from typing import Dict, List, Optional, Tuple, TypeVar, Union\n'), ((5483, 5504), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (5502, 5504), False, 'import tempfile\n'), ((5721, 5832), 'molpal.featurizer.Featurizer', 'featurizer.Featurizer', ([], {'fingerprint': "kwargs['fingerprint']", 'radius': "kwargs['radius']", 'length': "kwargs['length']"}), "(fingerprint=kwargs['fingerprint'], radius=kwargs[\n 'radius'], length=kwargs['length'])\n", (5742, 5832), False, 'from molpal import acquirer, featurizer, models, objectives, pools\n'), ((5882, 5945), 'molpal.pools.pool', 'pools.pool', ([], {'featurizer': 'self.featurizer', 'path': 'self.tmp'}), '(featurizer=self.featurizer, path=self.tmp, **kwargs)\n', (5892, 5945), False, 'from molpal import acquirer, featurizer, models, objectives, pools\n'), ((6318, 6348), 'molpal.objectives.objective', 'objectives.objective', ([], {}), '(**kwargs)\n', (6338, 6348), False, 'from molpal import acquirer, featurizer, models, objectives, pools\n'), ((6921, 6946), 'collections.deque', 'deque', ([], {'maxlen': 'window_size'}), '(maxlen=window_size)\n', (6926, 6946), False, 'from collections import deque\n'), ((17845, 17882), 'pathlib.Path', 'Path', (['f"""{self.root}/{self.name}/data"""'], {}), "(f'{self.root}/{self.name}/data')\n", (17849, 17882), False, 'from pathlib import Path\n'), ((19655, 19694), 'pathlib.Path', 'Path', (['f"""{self.root}/{self.name}/states"""'], {}), "(f'{self.root}/{self.name}/states')\n", (19659, 19694), False, 'from pathlib import Path\n'), ((19985, 20048), 'pathlib.Path', 'Path', (['f"""{self.root}/{self.name}/checkpoints/epoch_{self.epoch}"""'], {}), "(f'{self.root}/{self.name}/checkpoints/epoch_{self.epoch}')\n", (19989, 20048), False, 'from pathlib import Path\n'), ((21540, 21578), 'pathlib.Path', 'Path', (['f"""{self.root}/{self.name}/preds"""'], {}), "(f'{self.root}/{self.name}/preds')\n", (21544, 21578), False, 'from pathlib import Path\n'), ((21852, 21879), 'numpy.save', 'np.save', (['preds_file', 'Y_pred'], {}), '(preds_file, Y_pred)\n', (21859, 21879), True, 'import numpy as np\n'), ((18306, 18321), 'csv.writer', 'csv.writer', (['fid'], {}), '(fid)\n', (18316, 18321), False, 'import csv\n'), ((19864, 19898), 'pickle.dump', 'pickle.dump', (['self.scores_csvs', 'fid'], {}), '(self.scores_csvs, fid)\n', (19875, 19898), False, 'import pickle\n'), ((21673, 21717), 'numpy.column_stack', 'np.column_stack', (['(self.y_preds, self.y_vars)'], {}), '((self.y_preds, self.y_vars))\n', (21688, 21717), True, 'import numpy as np\n'), ((21753, 21775), 'numpy.array', 'np.array', (['self.y_preds'], {}), '(self.y_preds)\n', (21761, 21775), True, 'import numpy as np\n'), ((25880, 25895), 'csv.reader', 'csv.reader', (['fid'], {}), '(fid)\n', (25890, 25895), False, 'import csv\n'), ((15751, 15764), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (15761, 15764), False, 'from operator import itemgetter\n'), ((16574, 16606), 'heapq.heappush', 'heapq.heappush', (['selected', '(y, x)'], {}), '(selected, (y, x))\n', (16588, 16606), False, 'import heapq\n'), ((16641, 16676), 'heapq.heappushpop', 'heapq.heappushpop', (['selected', '(y, x)'], {}), '(selected, (y, x))\n', (16658, 16676), False, 'import heapq\n'), ((15680, 15693), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (15690, 15693), False, 'from operator import itemgetter\n')] |
# Copyright 2021 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
from numpy.lib.function_base import append
import kubric as kb
from kubric.assets import asset_source
from kubric.renderer.blender import Blender as KubricBlender
from kubric.simulator.pybullet import PyBullet as KubricSimulator
import sys
import imageio
import bpy
import pdb
import random
from scipy.spatial import transform
logging.basicConfig(level="INFO") # < CRITICAL, ERROR, WARNING, INFO, DEBUG
ROT_CAM = True
ROT_RANGE = np.pi / 4 # 2 * np.pi #
OBJNAME = 'shapenet-less-rot'
POSITION = (0,0,1) #(0,0,0.2)
VELOCITY = (0.5,0,-1) # (4,-4,0)
OBJ_TYPE = 'shapenet'
TEXTURE = False
random.seed(0)
# --- create scene and attach a renderer and simulator
scene = kb.Scene(resolution=(256, 256))
scene.frame_end = 30 # < numbers of frames to render
scene.frame_rate = 24 # < rendering framerate
scene.step_rate = 240 # < simulation framerate
renderer = KubricBlender(scene)
simulator = KubricSimulator(scene)
# --- populate the scene with objects, lights, cameras
scene += kb.Cube(name="floor", scale=(5, 5, 0.1), position=(0, 0, -0.1), static=True, background=True, segmentation_id=1)
scene += kb.DirectionalLight(name="sun", position=(-1, -0.5, 3), look_at=(0, 0, 0), intensity=1.5)
scene.camera = kb.PerspectiveCamera(name="camera", position=(2, -2, 4), look_at=(0, 0, 0))
# color = kb.random_hue_color()
color = kb.Color(r=1, g=0.1, b=0.1, a=1.0)
# quaternion = [0.871342, 0.401984, -0.177436, 0.218378]
material = kb.PrincipledBSDFMaterial(color=color)
if OBJ_TYPE == 'cube':
obj = kb.Cube(name='cube', scale=0.3, velocity=VELOCITY, angular_velocity=[0,0,0], position=POSITION, mass=0.2, restitution=1, material=material, friction=1, segmentation_id=2)
objname = 'cube'
# segmentation id doesn't seem to be working -- the segmentation mask still uses object id
elif OBJ_TYPE == 'torus':
# set up assets
asset_source = kb.AssetSource("examples/KuBasic")
obj = asset_source.create(name="torus",
asset_id='Torus', scale=0.5)
objname = 'torus'
obj.material = material # kb.PrincipledBSDFMaterial(color=kb.Color(r=1, g=0.030765511645494348, b=0.0, a=1.0), metallic=0., ior=1.25, roughness=0.7, specular=0.33)
obj.position = POSITION
obj.velocity = VELOCITY
elif OBJ_TYPE == 'shapenet':
asset_source = kb.AssetSource('gs://tensorflow-graphics/public/60c9de9c410be30098c297ac/ShapeNetCore.v2')
ids = list(asset_source.db.loc[asset_source.db['id'].str.startswith('02691156')]['id'])
rng = np.random.RandomState(0)
asset_id = rng.choice(ids) #< e.g. 02691156_10155655850468db78d106ce0a280f87
obj = asset_source.create(asset_id=asset_id)
obj.position = POSITION
obj.velocity = VELOCITY
obj.metadata = {
"asset_id": obj.asset_id,
"category": asset_source.db[
asset_source.db["id"] == obj.asset_id].iloc[0]["category_name"],
}
obj.scale = 2
objname = obj.name
else:
raise NotImplementedError
if TEXTURE:
bpy_scene = bpy.context.scene
obj.material = kb.PrincipledBSDFMaterial(name="material")
obj.material.metallic = random.random()
obj.material.roughness = random.random()**0.2
scene += obj
mat = bpy_scene.objects[objname].active_material
tree = mat.node_tree
mat_node = tree.nodes["Principled BSDF"]
texImage = mat.node_tree.nodes.new('ShaderNodeTexImage')
texImage.image = bpy.data.images.load('examples/tex/tex.jpg')
tree.links.new(mat_node.inputs['Base Color'], texImage.outputs['Color'])
else:
scene += obj
cam_params = []
if ROT_CAM:
# Render cameras at the same general distance from the origin, but at
# different positions.
#
# We will use spherical coordinates (r, theta, phi) to do this.
# x = r * cos(theta) * sin(phi)
# y = r * sin(theta) * sin(phi)
# z = r * cos(phi)
original_camera_position = scene.camera.position
r = np.sqrt(sum(a * a for a in original_camera_position))
phi = np.arccos(original_camera_position[2] / r) # (180 - elevation)
theta = np.arccos(original_camera_position[0] / (r * np.sin(phi))) # azimuth
num_phi_values_per_theta = 1
theta_change = ROT_RANGE / ((scene.frame_end - scene.frame_start) / num_phi_values_per_theta)
# pdb.set_trace()
for frame in range(scene.frame_start, scene.frame_end + 1):
i = (frame - scene.frame_start)
theta_new = (i // num_phi_values_per_theta) * theta_change + theta
# These values of (x, y, z) will lie on the same sphere as the original camera.
x = r * np.cos(theta_new) * np.sin(phi)
y = r * np.sin(theta_new) * np.sin(phi)
z = r * np.cos(phi)
scene.camera.position = (x, y, z)
scene.camera.look_at((0, 0, 0))
scene.camera.keyframe_insert("position", frame)
scene.camera.keyframe_insert("quaternion", frame)
cam_param = np.zeros([1,8])
quat = scene.camera.quaternion
rot = transform.Rotation.from_quat(quat)
inv_quat = rot.inv().as_quat()
cam_param[0,0] = scene.camera.focal_length
cam_param[0,1] = x
cam_param[0,2] = y
cam_param[0,3] = quat[3]
cam_param[0,4:7] = quat[:3]
cam_param[0,7] = z
# cam_param[0,0] = scene.camera.focal_length
# cam_param[0,1] = -x
# cam_param[0,2] = -y
# cam_param[0,3] = inv_quat[3]
# cam_param[0,4:7] = inv_quat[:3]
# cam_param[0,7] = -z
cam_params.append(cam_param)
# pdb.set_trace()
else:
x,y,z = scene.camera.position
cam_param = np.zeros([1,8])
quat = scene.camera.quaternion
rot = transform.Rotation.from_quat(quat)
inv_quat = rot.inv().as_quat()
cam_param[0,0] = scene.camera.focal_length
cam_param[0,1] = x
cam_param[0,2] = y
cam_param[0,3] = quat[3]
cam_param[0,4:7] = quat[:3]
cam_param[0,7] = z
# cam_param[0,0] = scene.camera.focal_length
# cam_param[0,1] = -x
# cam_param[0,2] = -y
# cam_param[0,3] = inv_quat[3]
# cam_param[0,4:7] = inv_quat[:3]
# cam_param[0,7] = -z
for _ in range(scene.frame_end):
cam_params.append(cam_param)
# --- executes the simulation (and store keyframes)
simulator.run()
# --- renders the output
kb.as_path("output").mkdir(exist_ok=True)
renderer.save_state(f"output/{OBJNAME}/{OBJNAME}.blend")
frames_dict = renderer.render()
# del frames_dict["uv"]
# del frames_dict["forward_flow"]
# del frames_dict["backward_flow"]
# del frames_dict["depth"]
# del frames_dict["normal"]
import pickle
with open(f'output/{OBJNAME}/frames.dict', 'wb') as file:
pickle.dump(frames_dict, file)
# kb.write_image_dict(frames_dict, f"output/{OBJNAME}")
# convert segmentation mask to LASR style
palette = [[0,0,0],[0,0,0],[128,128,128],[128,128,128],[128,128,128],[128,128,128]]
kb.file_io.multi_write_image(frames_dict['segmentation'], str(kb.as_path(f"output/{OBJNAME}/LASR/Annotations/Full-Resolution/{OBJNAME}") / "{:05d}.png"), write_fn=kb.write_palette_png,
max_write_threads=16, palette=palette)
# kb.file_io.write_rgba_batch(frames_dict['rgba'], str(kb.as_path("output/rigid/rgba") / "{:05d}.png"))
kb.file_io.multi_write_image(frames_dict['rgba'], str(kb.as_path(f"output/{OBJNAME}/LASR/JPEGImages/Full-Resolution/{OBJNAME}") / "{:05d}.png"), write_fn=kb.write_png,
max_write_threads=16)
# write optical flow and occlusion map in LASR format
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n".encode() if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
fw = frames_dict['forward_flow'][:-1,...] * 256
bw = frames_dict['backward_flow'][1:,...] * 256
imgs = frames_dict['rgba']
M, N = imgs.shape[1:3]
occs = np.ones(fw.shape[:-1]).astype('float32')
# for img_i in range(len(imgs)-1):
# img1, img2 = imgs[img_i,...], imgs[img_i+1,...]
# f = fw[img_i,...] # corresponding forward floAw image
# b = bw[img_i,...] # corresponding backward flow image
# # loop through all pixel to check occlusion
# for i in range(M):
# for j in range(N):
# # flow forward
# fi, fj = np.round(np.array([i,j]) + f[i,j]).astype(int)
# # ignore the pixel if it goes out of range
# if not (0<=fi<M and 0<=fj<N):
# continue
# # flow backward
# bi, bj = np.round(np.array([fi,fj]) - b[fi,fj]).astype(int)
# THRESHOLD = 1
# # occlusion detected
# if np.abs(i - bi) + np.abs(j - bj) > THRESHOLD:
# occs[img_i,i,j] = 1
# # pdb.set_trace()
import os
os.makedirs(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/{OBJNAME}',exist_ok=True)
os.makedirs(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/{OBJNAME}',exist_ok=True)
os.makedirs(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/r{OBJNAME}',exist_ok=True)
os.makedirs(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/r{OBJNAME}',exist_ok=True)
os.makedirs(f'output/{OBJNAME}/LASR/Camera/Full-Resolution/{OBJNAME}',exist_ok=True)
os.makedirs(f'output/{OBJNAME}/LASR/Camera/Full-Resolution/r{OBJNAME}',exist_ok=True)
# write flows into pfm
# Kubric optical forward flow format:
# fw[i,j,k] = [dy, dx] for pixel [j,k] from img[i] to img[i+1]
# Kubric optical backward flow format:
# bw[i,j,k] = [-dy, -dx] for pixel [j,k] from img[i] to img[i-1]
# VCN optical forward flow format:
# fw[i,j,k] = [dx, dy] for pixel [256-j,k] from img[i] to img[i+1]
# VCN optical backward flow format:
# bw[i,j,k] = [dx, dy] for pixel [256-j,k] from img[i] to img[i-1]
for i in range(len(fw)):
f = fw[i,...]
ones = np.ones_like(f[...,:1])
f = np.concatenate([f[...,1:], f[...,:1], ones],-1)
b = np.concatenate([-bw[i,...,1:],-bw[i,...,:1], ones],-1)
f = np.flip(f,0)
b = np.flip(b,0)
write_pfm(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/{OBJNAME}/flo-{i:05d}.pfm',f)
write_pfm(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/{OBJNAME}/flo-{i+1:05d}.pfm',b)
write_pfm(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/{OBJNAME}/occ-{i:05d}.pfm',np.ones_like(occs[i,...]))
write_pfm(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/{OBJNAME}/occ-{i+1:05d}.pfm',np.ones_like(occs[i,...]))
write_pfm(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/r{OBJNAME}/flo-{i:05d}.pfm',f)
write_pfm(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/r{OBJNAME}/flo-{i+1:05d}.pfm',b)
write_pfm(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/r{OBJNAME}/occ-{i:05d}.pfm',np.ones_like(occs[i,...]))
write_pfm(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/r{OBJNAME}/occ-{i+1:05d}.pfm',np.ones_like(occs[i,...]))
for i in range(len(cam_params)):
# save camera parameters
np.savetxt(f'output/{OBJNAME}/LASR/Camera/Full-Resolution/{OBJNAME}/{i:05d}.txt',cam_params[i].T)
np.savetxt(f'output/{OBJNAME}/LASR/Camera/Full-Resolution/r{OBJNAME}/{i:05d}.txt',cam_params[i].T)
# write gif
imageio.mimsave(str(kb.as_path(f"output/{OBJNAME}/") / f"{OBJNAME}.gif"),frames_dict['rgba'])
kb.file_io.write_flow_batch(frames_dict['forward_flow'], directory= f"output/{OBJNAME}/FlowFW", file_template="{:05d}.png", name="forward_flow",
max_write_threads=16)
kb.file_io.write_flow_batch(frames_dict['backward_flow'], directory= f"output/{OBJNAME}/FlowBW", file_template="{:05d}.png", name="backward_flow",
max_write_threads=16)
# cp -r output/moving_cube/LASR/*s/ ../lasr/database/DAVIS/ | [
"pickle.dump",
"kubric.DirectionalLight",
"numpy.ones",
"numpy.sin",
"kubric.Cube",
"numpy.savetxt",
"kubric.AssetSource",
"numpy.random.RandomState",
"random.seed",
"kubric.PrincipledBSDFMaterial",
"numpy.arccos",
"kubric.PerspectiveCamera",
"kubric.file_io.write_flow_batch",
"numpy.ones_... | [((945, 978), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""INFO"""'}), "(level='INFO')\n", (964, 978), False, 'import logging\n'), ((1208, 1222), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (1219, 1222), False, 'import random\n'), ((1287, 1318), 'kubric.Scene', 'kb.Scene', ([], {'resolution': '(256, 256)'}), '(resolution=(256, 256))\n', (1295, 1318), True, 'import kubric as kb\n'), ((1480, 1500), 'kubric.renderer.blender.Blender', 'KubricBlender', (['scene'], {}), '(scene)\n', (1493, 1500), True, 'from kubric.renderer.blender import Blender as KubricBlender\n'), ((1513, 1535), 'kubric.simulator.pybullet.PyBullet', 'KubricSimulator', (['scene'], {}), '(scene)\n', (1528, 1535), True, 'from kubric.simulator.pybullet import PyBullet as KubricSimulator\n'), ((1601, 1717), 'kubric.Cube', 'kb.Cube', ([], {'name': '"""floor"""', 'scale': '(5, 5, 0.1)', 'position': '(0, 0, -0.1)', 'static': '(True)', 'background': '(True)', 'segmentation_id': '(1)'}), "(name='floor', scale=(5, 5, 0.1), position=(0, 0, -0.1), static=True,\n background=True, segmentation_id=1)\n", (1608, 1717), True, 'import kubric as kb\n'), ((1723, 1816), 'kubric.DirectionalLight', 'kb.DirectionalLight', ([], {'name': '"""sun"""', 'position': '(-1, -0.5, 3)', 'look_at': '(0, 0, 0)', 'intensity': '(1.5)'}), "(name='sun', position=(-1, -0.5, 3), look_at=(0, 0, 0),\n intensity=1.5)\n", (1742, 1816), True, 'import kubric as kb\n'), ((1828, 1903), 'kubric.PerspectiveCamera', 'kb.PerspectiveCamera', ([], {'name': '"""camera"""', 'position': '(2, -2, 4)', 'look_at': '(0, 0, 0)'}), "(name='camera', position=(2, -2, 4), look_at=(0, 0, 0))\n", (1848, 1903), True, 'import kubric as kb\n'), ((1946, 1980), 'kubric.Color', 'kb.Color', ([], {'r': '(1)', 'g': '(0.1)', 'b': '(0.1)', 'a': '(1.0)'}), '(r=1, g=0.1, b=0.1, a=1.0)\n', (1954, 1980), True, 'import kubric as kb\n'), ((2049, 2087), 'kubric.PrincipledBSDFMaterial', 'kb.PrincipledBSDFMaterial', ([], {'color': 'color'}), '(color=color)\n', (2074, 2087), True, 'import kubric as kb\n'), ((9978, 10067), 'os.makedirs', 'os.makedirs', (['f"""output/{OBJNAME}/LASR/FlowFW/Full-Resolution/{OBJNAME}"""'], {'exist_ok': '(True)'}), "(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/{OBJNAME}',\n exist_ok=True)\n", (9989, 10067), False, 'import os\n'), ((10063, 10152), 'os.makedirs', 'os.makedirs', (['f"""output/{OBJNAME}/LASR/FlowBW/Full-Resolution/{OBJNAME}"""'], {'exist_ok': '(True)'}), "(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/{OBJNAME}',\n exist_ok=True)\n", (10074, 10152), False, 'import os\n'), ((10148, 10238), 'os.makedirs', 'os.makedirs', (['f"""output/{OBJNAME}/LASR/FlowFW/Full-Resolution/r{OBJNAME}"""'], {'exist_ok': '(True)'}), "(f'output/{OBJNAME}/LASR/FlowFW/Full-Resolution/r{OBJNAME}',\n exist_ok=True)\n", (10159, 10238), False, 'import os\n'), ((10234, 10324), 'os.makedirs', 'os.makedirs', (['f"""output/{OBJNAME}/LASR/FlowBW/Full-Resolution/r{OBJNAME}"""'], {'exist_ok': '(True)'}), "(f'output/{OBJNAME}/LASR/FlowBW/Full-Resolution/r{OBJNAME}',\n exist_ok=True)\n", (10245, 10324), False, 'import os\n'), ((10320, 10409), 'os.makedirs', 'os.makedirs', (['f"""output/{OBJNAME}/LASR/Camera/Full-Resolution/{OBJNAME}"""'], {'exist_ok': '(True)'}), "(f'output/{OBJNAME}/LASR/Camera/Full-Resolution/{OBJNAME}',\n exist_ok=True)\n", (10331, 10409), False, 'import os\n'), ((10405, 10495), 'os.makedirs', 'os.makedirs', (['f"""output/{OBJNAME}/LASR/Camera/Full-Resolution/r{OBJNAME}"""'], {'exist_ok': '(True)'}), "(f'output/{OBJNAME}/LASR/Camera/Full-Resolution/r{OBJNAME}',\n exist_ok=True)\n", (10416, 10495), False, 'import os\n'), ((12388, 12563), 'kubric.file_io.write_flow_batch', 'kb.file_io.write_flow_batch', (["frames_dict['forward_flow']"], {'directory': 'f"""output/{OBJNAME}/FlowFW"""', 'file_template': '"""{:05d}.png"""', 'name': '"""forward_flow"""', 'max_write_threads': '(16)'}), "(frames_dict['forward_flow'], directory=\n f'output/{OBJNAME}/FlowFW', file_template='{:05d}.png', name=\n 'forward_flow', max_write_threads=16)\n", (12415, 12563), True, 'import kubric as kb\n'), ((12575, 12752), 'kubric.file_io.write_flow_batch', 'kb.file_io.write_flow_batch', (["frames_dict['backward_flow']"], {'directory': 'f"""output/{OBJNAME}/FlowBW"""', 'file_template': '"""{:05d}.png"""', 'name': '"""backward_flow"""', 'max_write_threads': '(16)'}), "(frames_dict['backward_flow'], directory=\n f'output/{OBJNAME}/FlowBW', file_template='{:05d}.png', name=\n 'backward_flow', max_write_threads=16)\n", (12602, 12752), True, 'import kubric as kb\n'), ((2120, 2301), 'kubric.Cube', 'kb.Cube', ([], {'name': '"""cube"""', 'scale': '(0.3)', 'velocity': 'VELOCITY', 'angular_velocity': '[0, 0, 0]', 'position': 'POSITION', 'mass': '(0.2)', 'restitution': '(1)', 'material': 'material', 'friction': '(1)', 'segmentation_id': '(2)'}), "(name='cube', scale=0.3, velocity=VELOCITY, angular_velocity=[0, 0, \n 0], position=POSITION, mass=0.2, restitution=1, material=material,\n friction=1, segmentation_id=2)\n", (2127, 2301), True, 'import kubric as kb\n'), ((3567, 3609), 'kubric.PrincipledBSDFMaterial', 'kb.PrincipledBSDFMaterial', ([], {'name': '"""material"""'}), "(name='material')\n", (3592, 3609), True, 'import kubric as kb\n'), ((3636, 3651), 'random.random', 'random.random', ([], {}), '()\n', (3649, 3651), False, 'import random\n'), ((3913, 3957), 'bpy.data.images.load', 'bpy.data.images.load', (['"""examples/tex/tex.jpg"""'], {}), "('examples/tex/tex.jpg')\n", (3933, 3957), False, 'import bpy\n'), ((4466, 4508), 'numpy.arccos', 'np.arccos', (['(original_camera_position[2] / r)'], {}), '(original_camera_position[2] / r)\n', (4475, 4508), True, 'import numpy as np\n'), ((5945, 5961), 'numpy.zeros', 'np.zeros', (['[1, 8]'], {}), '([1, 8])\n', (5953, 5961), True, 'import numpy as np\n'), ((6006, 6040), 'scipy.spatial.transform.Rotation.from_quat', 'transform.Rotation.from_quat', (['quat'], {}), '(quat)\n', (6034, 6040), False, 'from scipy.spatial import transform\n'), ((6983, 7013), 'pickle.dump', 'pickle.dump', (['frames_dict', 'file'], {}), '(frames_dict, file)\n', (6994, 7013), False, 'import pickle\n'), ((10983, 11007), 'numpy.ones_like', 'np.ones_like', (['f[..., :1]'], {}), '(f[..., :1])\n', (10995, 11007), True, 'import numpy as np\n'), ((11017, 11067), 'numpy.concatenate', 'np.concatenate', (['[f[..., 1:], f[..., :1], ones]', '(-1)'], {}), '([f[..., 1:], f[..., :1], ones], -1)\n', (11031, 11067), True, 'import numpy as np\n'), ((11073, 11133), 'numpy.concatenate', 'np.concatenate', (['[-bw[i, ..., 1:], -bw[i, ..., :1], ones]', '(-1)'], {}), '([-bw[i, ..., 1:], -bw[i, ..., :1], ones], -1)\n', (11087, 11133), True, 'import numpy as np\n'), ((11137, 11150), 'numpy.flip', 'np.flip', (['f', '(0)'], {}), '(f, 0)\n', (11144, 11150), True, 'import numpy as np\n'), ((11158, 11171), 'numpy.flip', 'np.flip', (['b', '(0)'], {}), '(b, 0)\n', (11165, 11171), True, 'import numpy as np\n'), ((12080, 12187), 'numpy.savetxt', 'np.savetxt', (['f"""output/{OBJNAME}/LASR/Camera/Full-Resolution/{OBJNAME}/{i:05d}.txt"""', 'cam_params[i].T'], {}), "(\n f'output/{OBJNAME}/LASR/Camera/Full-Resolution/{OBJNAME}/{i:05d}.txt',\n cam_params[i].T)\n", (12090, 12187), True, 'import numpy as np\n'), ((12182, 12290), 'numpy.savetxt', 'np.savetxt', (['f"""output/{OBJNAME}/LASR/Camera/Full-Resolution/r{OBJNAME}/{i:05d}.txt"""', 'cam_params[i].T'], {}), "(\n f'output/{OBJNAME}/LASR/Camera/Full-Resolution/r{OBJNAME}/{i:05d}.txt',\n cam_params[i].T)\n", (12192, 12290), True, 'import numpy as np\n'), ((2465, 2499), 'kubric.AssetSource', 'kb.AssetSource', (['"""examples/KuBasic"""'], {}), "('examples/KuBasic')\n", (2479, 2499), True, 'import kubric as kb\n'), ((2509, 2571), 'kubric.assets.asset_source.create', 'asset_source.create', ([], {'name': '"""torus"""', 'asset_id': '"""Torus"""', 'scale': '(0.5)'}), "(name='torus', asset_id='Torus', scale=0.5)\n", (2528, 2571), False, 'from kubric.assets import asset_source\n'), ((3679, 3694), 'random.random', 'random.random', ([], {}), '()\n', (3692, 3694), False, 'import random\n'), ((5321, 5337), 'numpy.zeros', 'np.zeros', (['[1, 8]'], {}), '([1, 8])\n', (5329, 5337), True, 'import numpy as np\n'), ((5382, 5416), 'scipy.spatial.transform.Rotation.from_quat', 'transform.Rotation.from_quat', (['quat'], {}), '(quat)\n', (5410, 5416), False, 'from scipy.spatial import transform\n'), ((6626, 6646), 'kubric.as_path', 'kb.as_path', (['"""output"""'], {}), "('output')\n", (6636, 6646), True, 'import kubric as kb\n'), ((8182, 8198), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (8191, 8198), True, 'import numpy as np\n'), ((9061, 9083), 'numpy.ones', 'np.ones', (['fw.shape[:-1]'], {}), '(fw.shape[:-1])\n', (9068, 9083), True, 'import numpy as np\n'), ((11448, 11474), 'numpy.ones_like', 'np.ones_like', (['occs[i, ...]'], {}), '(occs[i, ...])\n', (11460, 11474), True, 'import numpy as np\n'), ((11565, 11591), 'numpy.ones_like', 'np.ones_like', (['occs[i, ...]'], {}), '(occs[i, ...])\n', (11577, 11591), True, 'import numpy as np\n'), ((11868, 11894), 'numpy.ones_like', 'np.ones_like', (['occs[i, ...]'], {}), '(occs[i, ...])\n', (11880, 11894), True, 'import numpy as np\n'), ((11986, 12012), 'numpy.ones_like', 'np.ones_like', (['occs[i, ...]'], {}), '(occs[i, ...])\n', (11998, 12012), True, 'import numpy as np\n'), ((2885, 2980), 'kubric.AssetSource', 'kb.AssetSource', (['"""gs://tensorflow-graphics/public/60c9de9c410be30098c297ac/ShapeNetCore.v2"""'], {}), "(\n 'gs://tensorflow-graphics/public/60c9de9c410be30098c297ac/ShapeNetCore.v2')\n", (2899, 2980), True, 'import kubric as kb\n'), ((3074, 3098), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (3095, 3098), True, 'import numpy as np\n'), ((3186, 3224), 'kubric.assets.asset_source.create', 'asset_source.create', ([], {'asset_id': 'asset_id'}), '(asset_id=asset_id)\n', (3205, 3224), False, 'from kubric.assets import asset_source\n'), ((5043, 5054), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5049, 5054), True, 'import numpy as np\n'), ((5087, 5098), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5093, 5098), True, 'import numpy as np\n'), ((5111, 5122), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (5117, 5122), True, 'import numpy as np\n'), ((7261, 7335), 'kubric.as_path', 'kb.as_path', (['f"""output/{OBJNAME}/LASR/Annotations/Full-Resolution/{OBJNAME}"""'], {}), "(f'output/{OBJNAME}/LASR/Annotations/Full-Resolution/{OBJNAME}')\n", (7271, 7335), True, 'import kubric as kb\n'), ((7601, 7674), 'kubric.as_path', 'kb.as_path', (['f"""output/{OBJNAME}/LASR/JPEGImages/Full-Resolution/{OBJNAME}"""'], {}), "(f'output/{OBJNAME}/LASR/JPEGImages/Full-Resolution/{OBJNAME}')\n", (7611, 7674), True, 'import kubric as kb\n'), ((12314, 12346), 'kubric.as_path', 'kb.as_path', (['f"""output/{OBJNAME}/"""'], {}), "(f'output/{OBJNAME}/')\n", (12324, 12346), True, 'import kubric as kb\n'), ((4584, 4595), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4590, 4595), True, 'import numpy as np\n'), ((5023, 5040), 'numpy.cos', 'np.cos', (['theta_new'], {}), '(theta_new)\n', (5029, 5040), True, 'import numpy as np\n'), ((5067, 5084), 'numpy.sin', 'np.sin', (['theta_new'], {}), '(theta_new)\n', (5073, 5084), True, 'import numpy as np\n')] |
import RPi.GPIO as GPIO
from time import sleep
import numpy as np
import Encoder
import threading
import signal
import sys
import pybullet as p
import argparse
import time
# import keyboard
# For pybullet loading urdf to calculate inverse dynamics / Motor Params
def SetUp():
global ee_mass, bodyId
client = p.connect(p.DIRECT)
p.setGravity(0, 0, -9.81, physicsClientId=client)
flags = p.URDF_USE_SELF_COLLISION
bodyId = p.loadURDF("./data/Arm_Final_Planet_hook/urdf/Arm_Final_Planet_hook.urdf",
basePosition=[0,0,0],useFixedBase=True,flags=flags)
maxForce = 0
p.setJointMotorControl2(bodyId, 0,controlMode=p.VELOCITY_CONTROL, force=maxForce)
p.setJointMotorControl2(bodyId, 1,controlMode=p.VELOCITY_CONTROL, force=maxForce)
p.setJointMotorControl2(bodyId, 2,controlMode=p.VELOCITY_CONTROL, force=maxForce)
# end-effector mass in bullet.
ee_mass = p.getDynamicsInfo(bodyId,3)[0]
parser = argparse.ArgumentParser()
parser.add_argument('a0',
type=float,
help='target end effector joint angle 0')
parser.add_argument('a1',
type=float,
help='target end effector joint angle 1')
parser.add_argument('a2',
type=float,
help='target end effector joint angle 2')
parser.add_argument('--load',
type=float,
help='weight to lift')
parser.add_argument('--worm',
type=int,
help='set if worm gear used or not,0: planetary 1: worm gear')
args = parser.parse_args()
targetORN = [args.a0*np.pi/180,args.a1*np.pi/180,args.a2*np.pi/180]
destORN = [args.a0*np.pi/180 + np.pi/2,args.a1*np.pi/180,args.a2*np.pi/180]
prev_pos = [0,-85*np.pi/180,0]
# prev_pos = [0,0,0]
prev_error = [0,0,0]
cum_e = [0,0,0]
load = args.load
if args.worm==0:
worm = False
else:
worm = True
picked, placed = False, False
offset = False
return targetORN,destORN,prev_pos,prev_error,cum_e,load,picked,placed,offset,worm
def checkPoint(error,vel,status):
tol = 0.1
if( status == False and np.linalg.norm(np.asarray(error),axis=0) < tol and
np.linalg.norm(np.asarray(vel),axis=0) < tol):
status = True
return status
def GetVoltage(torque,vel):
Ts = 23.5/1000 # Nm (stall torque)
Is = 1.8 # A (stall current)
R = 8.4 # Ohm
V = 12 # Voltage [V]
noLoadCurr = 70/1000 # A
noLoadSpeed = 7000*2*np.pi/60 # rad / s
N = 270
Kt = Ts/Is
Ke = (V - R*noLoadCurr)/noLoadSpeed
V0 = R/Kt*torque[0]/N +Ke*vel[0]*N
V1 = R/Kt*torque[1]/N +Ke*vel[1]*N
V2 = R/Kt*torque[2]/N +Ke*vel[2]*N
return [V0,V1,V2]
def PID_torque(e,de,cum_e,load):
# kp0,ki0,kd0 = 2e-2, 1e-8 , 2e-2
kp0,ki0,kd0 = 9e-2, 1e-8 , 9e-2
# kp1,ki1,kd1 = 3e-2, 1e-7 , 4e-2
kp1,ki1,kd1 = 6.5, 1e-3 , 7.0
# kp2,ki2,kd2 = 2e-2, 1e-4 , 2e-2
kp2,ki2,kd2 = 10e-1, 1e-3 , 10e-1
if(load!=0):
kp0*=(1+ 10.5*load)
ki0*=(1+ 5**5*load)
kd0*=(1+ 15*load)
kp1*=(1+ 1000.6*load)
ki1*=(1+ 5**6*load)
kd1*=(1+ 805*load)
kp2*=(1+ 7.025*load)
ki2*=(1+ 7.5*load)
kd2*=(1+ 7.025*load)
T0 = kp0*(e[0]) + kd0*(de[0]) + ki0*cum_e[0]
T1 = kp1*(e[1]) + kd1*(de[1]) + ki1*cum_e[1]
T2 = kp2*(e[2]) + kd2*(de[2]) + ki2*cum_e[2]
return [T0,T1,T2]
# For GPIO clean exit
def signal_handler(sig, frame):
print('Cleaning GPIO and Exiting the program...')
exitRoutine()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Motor--------------------------------------
pwm_frequency = 1000
encoder_count_per_rotation = 810
V = 12
# GPIOs--------------------------------------
# First Motor related
motor_driver_1_reverse_enable_pin = 6 # GPIO 4
motor_driver_1_forward_enable_pin = 13 # GPIO 17
motor_driver_1_reverse_pwm_pin = 19 # GPIO 27
motor_driver_1_forward_pwm_pin = 26 # GPIO 22
motor_1_Encoder_A_pin = 12 # GPIO 18
motor_1_Encoder_B_pin = 16 # GPIO 23
# Second Motor related
motor_driver_2_reverse_enable_pin = 10 # GPIO 10
motor_driver_2_forward_enable_pin = 9 # GPIO 9
motor_driver_2_reverse_pwm_pin = 11 # GPIO 11
motor_driver_2_forward_pwm_pin = 5 # GPIO 5
motor_2_Encoder_A_pin = 24 # GPIO 24
motor_2_Encoder_B_pin = 25 # GPIO 25
# Third Motor related
motor_driver_3_reverse_enable_pin = 4 # GPIO 6
motor_driver_3_forward_enable_pin = 17 # GPIO 13
motor_driver_3_reverse_pwm_pin = 27 # GPIO 19
motor_driver_3_forward_pwm_pin = 22 # GPIO 26
motor_3_Encoder_A_pin = 18 # GPIO 12
motor_3_Encoder_B_pin = 23 # GPIO 16
# GPIO initialization--------------------------------------
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# First Motor related
GPIO.setup(motor_driver_1_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_1_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_1_Encoder_B_pin, GPIO.IN)
motor_1_encoder = Encoder.Encoder(motor_1_Encoder_A_pin, motor_1_Encoder_B_pin)
motor_driver_1_reverse_pwm = GPIO.PWM(motor_driver_1_reverse_pwm_pin, pwm_frequency)
motor_driver_1_forward_pwm = GPIO.PWM(motor_driver_1_forward_pwm_pin, pwm_frequency)
# Second Motor related
GPIO.setup(motor_driver_2_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_2_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_2_Encoder_B_pin, GPIO.IN)
motor_2_encoder = Encoder.Encoder(motor_2_Encoder_A_pin, motor_2_Encoder_B_pin)
motor_driver_2_reverse_pwm = GPIO.PWM(motor_driver_2_reverse_pwm_pin, pwm_frequency)
motor_driver_2_forward_pwm = GPIO.PWM(motor_driver_2_forward_pwm_pin, pwm_frequency)
# Third Motor related
GPIO.setup(motor_driver_3_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_3_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_3_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_3_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_3_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_3_Encoder_B_pin, GPIO.IN)
motor_3_encoder = Encoder.Encoder(motor_3_Encoder_A_pin, motor_3_Encoder_B_pin)
motor_driver_3_reverse_pwm = GPIO.PWM(motor_driver_3_reverse_pwm_pin, pwm_frequency)
motor_driver_3_forward_pwm = GPIO.PWM(motor_driver_3_forward_pwm_pin, pwm_frequency)
# End of initialization--------------------------------------
def rotateCCW(motor, voltage):
global motor_driver_1_forward_pwm
global motor_driver_2_forward_pwm
global motor_driver_3_forward_pwm
global V
pwm_percent = 0
if(voltage > 12):
pwm_percent = 100
else:
pwm_percent = voltage / V * 100
if(motor == 0):
motor_driver_1_forward_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 1):
motor_driver_2_forward_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 2):
motor_driver_3_forward_pwm.ChangeDutyCycle(pwm_percent)
def rotateCW(motor, voltage):
global motor_driver_1_reverse_pwm
global motor_driver_2_reverse_pwm
global motor_driver_3_reverse_pwm
global V
pwm_percent = 0
if(voltage > 12):
pwm_percent = 100
else:
pwm_percent = voltage / V * 100
if(motor == 0):
motor_driver_1_reverse_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 1):
motor_driver_2_reverse_pwm.ChangeDutyCycle(pwm_percent)
elif (motor == 2):
motor_driver_3_reverse_pwm.ChangeDutyCycle(pwm_percent)
def stopRotate(motor):
rotateCW(motor, 0)
rotateCCW(motor, 0)
def getEncoderPosition(encoder):
global motor_1_encoder
global motor_2_encoder
global motor_3_encoder
global encoder_count_per_rotation
if(encoder == 0):
return 2* np.pi * (motor_1_encoder.read() / 10) / (encoder_count_per_rotation) # rad
elif (encoder == 1):
return 2* np.pi * (motor_2_encoder.read() / 10) / (encoder_count_per_rotation) # rad
elif (encoder == 2):
return 2* np.pi * (motor_3_encoder.read() / 10) / (encoder_count_per_rotation) # rad
def getEncoderVelocity(encoder_position, prev_pos, dt):
return (encoder_position - prev_pos) / (dt) # rad/s
def exitRoutine():
GPIO.cleanup()
dt = 0.05 #50ms
prev_pos = 0
GPIO.output(motor_driver_1_reverse_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_1_forward_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_2_reverse_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_2_forward_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_3_reverse_enable_pin, GPIO.HIGH)
GPIO.output(motor_driver_3_forward_enable_pin, GPIO.HIGH)
motor_driver_1_forward_pwm.start(0)
motor_driver_1_reverse_pwm.start(0)
motor_driver_2_forward_pwm.start(0)
motor_driver_2_reverse_pwm.start(0)
motor_driver_3_forward_pwm.start(0)
motor_driver_3_reverse_pwm.start(0)
# rotateCW(0, 12)
# pause = 0
targetORN, destORN, prev_pos, prev_error, cum_e, load, picked, placed, offset, worm = SetUp()
def main():
global targetORN, destORN, prev_pos, prev_error, cum_e, load, picked, placed, offset, worm
pos = [getEncoderPosition(0),getEncoderPosition(1),getEncoderPosition(2)]
vel = [getEncoderVelocity(pos[0], prev_pos[0], dt),
getEncoderVelocity(pos[1], prev_pos[1], dt),
getEncoderVelocity(pos[2], prev_pos[2], dt)]
# if offset ==False:
# targetORN[2]-=10*np.pi/180
# offset = True
error = [targetORN[0]-pos[0],targetORN[1]-pos[1],targetORN[2]-pos[2] ]
de = [error[0] - prev_error[0],error[1] - prev_error[1],error[2] - prev_error[2] ]
cum_e+=error
if picked == False:
pidTorques = PID_torque(error, de, cum_e, 0)
picked = checkPoint(error, vel, picked)
if picked == True:
p.changeDynamics(bodyId,3,mass = ee_mass+load)
targetORN = destORN
if picked == True:
pidTorques = PID_torque(error, de, cum_e, load)
placed = checkPoint(error, vel, placed)
if placed == True:
print("Reached goal destination.")
tau0,tau1,tau2 = p.calculateInverseDynamics(bodyId,
[pos[0],pos[1],pos[2]],
[vel[0],vel[1],vel[2]],
[0,0,0])
torque = pidTorques + [tau0,tau1,tau2]
volt = GetVoltage(torque,vel)
print("volt = ", volt)
# if(volt[0]>0): rotateCW(0, volt[0])
# else: rotateCCW(0, abs(volt[0]))
# if(volt[1]<0): rotateCW(1, abs(volt[1]))
# else: rotateCCW(1, volt[1])
# if picked==True and worm == True:
# stopRotate(2)
# elif(volt[2]<0):
# rotateCW(2, abs(volt[2]))
# else:
# rotateCCW(2, volt[2])
if(volt[0]>0): rotateCW(0, abs(volt[0]))
else: rotateCCW(0, abs(volt[0]))
if(volt[1]>0): rotateCW(1, abs(volt[1]))
else: rotateCCW(1, abs(volt[1]))
if picked==True and worm == True:
stopRotate(2)
elif(volt[2]>0):
rotateCW(2, abs(volt[2]))
else:
rotateCCW(2, abs(volt[2]))
print("position 0: " + str(pos[0]) + ". velocity 0: " + str(vel[0]) + ".")
print("position 1: " + str(pos[1]) + ". velocity 1: " + str(vel[1]) + ".")
print("position 2: " + str(pos[2]) + ". velocity 2: " + str(vel[2]) + ".")
print("-----------------------------------------------------------------")
prev_pos = pos
prev_error = error
threading.Timer(dt, main).start()
main()
| [
"threading.Timer",
"argparse.ArgumentParser",
"pybullet.setJointMotorControl2",
"pybullet.connect",
"RPi.GPIO.output",
"Encoder.Encoder",
"pybullet.calculateInverseDynamics",
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"pybullet.setGravity",
"RPi.GPIO.setmode",
"pybullet.changeDynamics",
"numpy.as... | [((3827, 3871), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (3840, 3871), False, 'import signal\n'), ((5130, 5152), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (5142, 5152), True, 'import RPi.GPIO as GPIO\n'), ((5153, 5176), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (5169, 5176), True, 'import RPi.GPIO as GPIO\n'), ((5199, 5254), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_1_reverse_enable_pin', 'GPIO.OUT'], {}), '(motor_driver_1_reverse_enable_pin, GPIO.OUT)\n', (5209, 5254), True, 'import RPi.GPIO as GPIO\n'), ((5255, 5310), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_1_forward_enable_pin', 'GPIO.OUT'], {}), '(motor_driver_1_forward_enable_pin, GPIO.OUT)\n', (5265, 5310), True, 'import RPi.GPIO as GPIO\n'), ((5311, 5363), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_1_reverse_pwm_pin', 'GPIO.OUT'], {}), '(motor_driver_1_reverse_pwm_pin, GPIO.OUT)\n', (5321, 5363), True, 'import RPi.GPIO as GPIO\n'), ((5364, 5416), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_1_forward_pwm_pin', 'GPIO.OUT'], {}), '(motor_driver_1_forward_pwm_pin, GPIO.OUT)\n', (5374, 5416), True, 'import RPi.GPIO as GPIO\n'), ((5417, 5459), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_1_Encoder_A_pin', 'GPIO.IN'], {}), '(motor_1_Encoder_A_pin, GPIO.IN)\n', (5427, 5459), True, 'import RPi.GPIO as GPIO\n'), ((5460, 5502), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_1_Encoder_B_pin', 'GPIO.IN'], {}), '(motor_1_Encoder_B_pin, GPIO.IN)\n', (5470, 5502), True, 'import RPi.GPIO as GPIO\n'), ((5521, 5582), 'Encoder.Encoder', 'Encoder.Encoder', (['motor_1_Encoder_A_pin', 'motor_1_Encoder_B_pin'], {}), '(motor_1_Encoder_A_pin, motor_1_Encoder_B_pin)\n', (5536, 5582), False, 'import Encoder\n'), ((5613, 5668), 'RPi.GPIO.PWM', 'GPIO.PWM', (['motor_driver_1_reverse_pwm_pin', 'pwm_frequency'], {}), '(motor_driver_1_reverse_pwm_pin, pwm_frequency)\n', (5621, 5668), True, 'import RPi.GPIO as GPIO\n'), ((5698, 5753), 'RPi.GPIO.PWM', 'GPIO.PWM', (['motor_driver_1_forward_pwm_pin', 'pwm_frequency'], {}), '(motor_driver_1_forward_pwm_pin, pwm_frequency)\n', (5706, 5753), True, 'import RPi.GPIO as GPIO\n'), ((5778, 5833), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_2_reverse_enable_pin', 'GPIO.OUT'], {}), '(motor_driver_2_reverse_enable_pin, GPIO.OUT)\n', (5788, 5833), True, 'import RPi.GPIO as GPIO\n'), ((5834, 5889), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_2_forward_enable_pin', 'GPIO.OUT'], {}), '(motor_driver_2_forward_enable_pin, GPIO.OUT)\n', (5844, 5889), True, 'import RPi.GPIO as GPIO\n'), ((5890, 5942), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_2_reverse_pwm_pin', 'GPIO.OUT'], {}), '(motor_driver_2_reverse_pwm_pin, GPIO.OUT)\n', (5900, 5942), True, 'import RPi.GPIO as GPIO\n'), ((5943, 5995), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_2_forward_pwm_pin', 'GPIO.OUT'], {}), '(motor_driver_2_forward_pwm_pin, GPIO.OUT)\n', (5953, 5995), True, 'import RPi.GPIO as GPIO\n'), ((5996, 6038), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_2_Encoder_A_pin', 'GPIO.IN'], {}), '(motor_2_Encoder_A_pin, GPIO.IN)\n', (6006, 6038), True, 'import RPi.GPIO as GPIO\n'), ((6039, 6081), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_2_Encoder_B_pin', 'GPIO.IN'], {}), '(motor_2_Encoder_B_pin, GPIO.IN)\n', (6049, 6081), True, 'import RPi.GPIO as GPIO\n'), ((6100, 6161), 'Encoder.Encoder', 'Encoder.Encoder', (['motor_2_Encoder_A_pin', 'motor_2_Encoder_B_pin'], {}), '(motor_2_Encoder_A_pin, motor_2_Encoder_B_pin)\n', (6115, 6161), False, 'import Encoder\n'), ((6192, 6247), 'RPi.GPIO.PWM', 'GPIO.PWM', (['motor_driver_2_reverse_pwm_pin', 'pwm_frequency'], {}), '(motor_driver_2_reverse_pwm_pin, pwm_frequency)\n', (6200, 6247), True, 'import RPi.GPIO as GPIO\n'), ((6277, 6332), 'RPi.GPIO.PWM', 'GPIO.PWM', (['motor_driver_2_forward_pwm_pin', 'pwm_frequency'], {}), '(motor_driver_2_forward_pwm_pin, pwm_frequency)\n', (6285, 6332), True, 'import RPi.GPIO as GPIO\n'), ((6356, 6411), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_3_reverse_enable_pin', 'GPIO.OUT'], {}), '(motor_driver_3_reverse_enable_pin, GPIO.OUT)\n', (6366, 6411), True, 'import RPi.GPIO as GPIO\n'), ((6412, 6467), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_3_forward_enable_pin', 'GPIO.OUT'], {}), '(motor_driver_3_forward_enable_pin, GPIO.OUT)\n', (6422, 6467), True, 'import RPi.GPIO as GPIO\n'), ((6468, 6520), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_3_reverse_pwm_pin', 'GPIO.OUT'], {}), '(motor_driver_3_reverse_pwm_pin, GPIO.OUT)\n', (6478, 6520), True, 'import RPi.GPIO as GPIO\n'), ((6521, 6573), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_driver_3_forward_pwm_pin', 'GPIO.OUT'], {}), '(motor_driver_3_forward_pwm_pin, GPIO.OUT)\n', (6531, 6573), True, 'import RPi.GPIO as GPIO\n'), ((6574, 6616), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_3_Encoder_A_pin', 'GPIO.IN'], {}), '(motor_3_Encoder_A_pin, GPIO.IN)\n', (6584, 6616), True, 'import RPi.GPIO as GPIO\n'), ((6617, 6659), 'RPi.GPIO.setup', 'GPIO.setup', (['motor_3_Encoder_B_pin', 'GPIO.IN'], {}), '(motor_3_Encoder_B_pin, GPIO.IN)\n', (6627, 6659), True, 'import RPi.GPIO as GPIO\n'), ((6678, 6739), 'Encoder.Encoder', 'Encoder.Encoder', (['motor_3_Encoder_A_pin', 'motor_3_Encoder_B_pin'], {}), '(motor_3_Encoder_A_pin, motor_3_Encoder_B_pin)\n', (6693, 6739), False, 'import Encoder\n'), ((6770, 6825), 'RPi.GPIO.PWM', 'GPIO.PWM', (['motor_driver_3_reverse_pwm_pin', 'pwm_frequency'], {}), '(motor_driver_3_reverse_pwm_pin, pwm_frequency)\n', (6778, 6825), True, 'import RPi.GPIO as GPIO\n'), ((6855, 6910), 'RPi.GPIO.PWM', 'GPIO.PWM', (['motor_driver_3_forward_pwm_pin', 'pwm_frequency'], {}), '(motor_driver_3_forward_pwm_pin, pwm_frequency)\n', (6863, 6910), True, 'import RPi.GPIO as GPIO\n'), ((8812, 8869), 'RPi.GPIO.output', 'GPIO.output', (['motor_driver_1_reverse_enable_pin', 'GPIO.HIGH'], {}), '(motor_driver_1_reverse_enable_pin, GPIO.HIGH)\n', (8823, 8869), True, 'import RPi.GPIO as GPIO\n'), ((8870, 8927), 'RPi.GPIO.output', 'GPIO.output', (['motor_driver_1_forward_enable_pin', 'GPIO.HIGH'], {}), '(motor_driver_1_forward_enable_pin, GPIO.HIGH)\n', (8881, 8927), True, 'import RPi.GPIO as GPIO\n'), ((8929, 8986), 'RPi.GPIO.output', 'GPIO.output', (['motor_driver_2_reverse_enable_pin', 'GPIO.HIGH'], {}), '(motor_driver_2_reverse_enable_pin, GPIO.HIGH)\n', (8940, 8986), True, 'import RPi.GPIO as GPIO\n'), ((8987, 9044), 'RPi.GPIO.output', 'GPIO.output', (['motor_driver_2_forward_enable_pin', 'GPIO.HIGH'], {}), '(motor_driver_2_forward_enable_pin, GPIO.HIGH)\n', (8998, 9044), True, 'import RPi.GPIO as GPIO\n'), ((9046, 9103), 'RPi.GPIO.output', 'GPIO.output', (['motor_driver_3_reverse_enable_pin', 'GPIO.HIGH'], {}), '(motor_driver_3_reverse_enable_pin, GPIO.HIGH)\n', (9057, 9103), True, 'import RPi.GPIO as GPIO\n'), ((9104, 9161), 'RPi.GPIO.output', 'GPIO.output', (['motor_driver_3_forward_enable_pin', 'GPIO.HIGH'], {}), '(motor_driver_3_forward_enable_pin, GPIO.HIGH)\n', (9115, 9161), True, 'import RPi.GPIO as GPIO\n'), ((317, 336), 'pybullet.connect', 'p.connect', (['p.DIRECT'], {}), '(p.DIRECT)\n', (326, 336), True, 'import pybullet as p\n'), ((341, 390), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {'physicsClientId': 'client'}), '(0, 0, -9.81, physicsClientId=client)\n', (353, 390), True, 'import pybullet as p\n'), ((442, 576), 'pybullet.loadURDF', 'p.loadURDF', (['"""./data/Arm_Final_Planet_hook/urdf/Arm_Final_Planet_hook.urdf"""'], {'basePosition': '[0, 0, 0]', 'useFixedBase': '(True)', 'flags': 'flags'}), "('./data/Arm_Final_Planet_hook/urdf/Arm_Final_Planet_hook.urdf',\n basePosition=[0, 0, 0], useFixedBase=True, flags=flags)\n", (452, 576), True, 'import pybullet as p\n'), ((614, 701), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['bodyId', '(0)'], {'controlMode': 'p.VELOCITY_CONTROL', 'force': 'maxForce'}), '(bodyId, 0, controlMode=p.VELOCITY_CONTROL, force=\n maxForce)\n', (637, 701), True, 'import pybullet as p\n'), ((700, 787), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['bodyId', '(1)'], {'controlMode': 'p.VELOCITY_CONTROL', 'force': 'maxForce'}), '(bodyId, 1, controlMode=p.VELOCITY_CONTROL, force=\n maxForce)\n', (723, 787), True, 'import pybullet as p\n'), ((786, 873), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['bodyId', '(2)'], {'controlMode': 'p.VELOCITY_CONTROL', 'force': 'maxForce'}), '(bodyId, 2, controlMode=p.VELOCITY_CONTROL, force=\n maxForce)\n', (809, 873), True, 'import pybullet as p\n'), ((962, 987), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (985, 987), False, 'import argparse\n'), ((3814, 3825), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3822, 3825), False, 'import sys\n'), ((8764, 8778), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (8776, 8778), True, 'import RPi.GPIO as GPIO\n'), ((10593, 10695), 'pybullet.calculateInverseDynamics', 'p.calculateInverseDynamics', (['bodyId', '[pos[0], pos[1], pos[2]]', '[vel[0], vel[1], vel[2]]', '[0, 0, 0]'], {}), '(bodyId, [pos[0], pos[1], pos[2]], [vel[0], vel[1\n ], vel[2]], [0, 0, 0])\n', (10619, 10695), True, 'import pybullet as p\n'), ((917, 945), 'pybullet.getDynamicsInfo', 'p.getDynamicsInfo', (['bodyId', '(3)'], {}), '(bodyId, 3)\n', (934, 945), True, 'import pybullet as p\n'), ((10296, 10344), 'pybullet.changeDynamics', 'p.changeDynamics', (['bodyId', '(3)'], {'mass': '(ee_mass + load)'}), '(bodyId, 3, mass=ee_mass + load)\n', (10312, 10344), True, 'import pybullet as p\n'), ((11983, 12008), 'threading.Timer', 'threading.Timer', (['dt', 'main'], {}), '(dt, main)\n', (11998, 12008), False, 'import threading\n'), ((2275, 2292), 'numpy.asarray', 'np.asarray', (['error'], {}), '(error)\n', (2285, 2292), True, 'import numpy as np\n'), ((2354, 2369), 'numpy.asarray', 'np.asarray', (['vel'], {}), '(vel)\n', (2364, 2369), True, 'import numpy as np\n')] |
import random
import numpy as np
import copy
from algs.evocnn.genetic.population import Individual
from algs.evocnn.utils import Utils
from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool
class CrossoverAndMutation(object):
def __init__(self, prob_crossover, prob_mutation, _log, individuals, gen_no, _params):
self.prob_crossover = prob_crossover
self.prob_mutation = prob_mutation
self.individuals = individuals
self.gen_no = gen_no
self.crossover_eta = _params['crossover_eta']
self.mutation_eta = _params['mutation_eta']
self.acc_mean_threshold = _params['acc_mean_threshold']
self.complexity_threshold = _params['complexity_threshold']
self.log = _log
self.offspring = []
def process(self):
crossover = Crossover(self.individuals, self.prob_crossover, self.crossover_eta, self.acc_mean_threshold,
self.complexity_threshold, self.log)
offspring = crossover.do_crossover()
self.offspring = offspring
Utils.save_population_after_crossover(self.individuals_to_string(), self.gen_no)
mutation = Mutation(self.offspring, self.prob_mutation, self.mutation_eta, self.log)
mutation.do_mutation()
for i, indi in enumerate(self.offspring):
indi_no = 'indi%05d_%05d' % (self.gen_no, i)
indi.id = indi_no
Utils.save_population_after_mutation(self.individuals_to_string(), self.gen_no)
return self.offspring
def individuals_to_string(self):
_str = []
for indi in self.offspring:
_str.append(str(indi))
_str.append('-' * 100)
return '\n'.join(_str)
class Crossover(object):
def __init__(self, individuals, prob_, eta, acc_mean_threshold, complexity_threshold, _log):
self.individuals = individuals
self.prob = prob_
self.eta = eta
self.acc_mean_threshold = acc_mean_threshold
self.complexity_threshold = complexity_threshold
self.log = _log
def _choose_one_parent(self):
count_ = len(self.individuals)
idx1 = np.random.randint(0, count_)
idx2 = np.random.randint(0, count_)
ind1 = self.individuals[idx1]
ind2 = self.individuals[idx2]
if ind1.acc_mean > ind2.acc_mean:
if ind1.acc_mean - ind2.acc_mean > self.acc_mean_threshold:
winner = ind1
else:
if ind2.complexity < (ind1.complexity - self.complexity_threshold):
winner = ind2
else:
winner = ind1
else:
if ind2.acc_mean - ind1.acc_mean > self.acc_mean_threshold:
winner = ind2
else:
if ind1.complexity < (ind2.complexity - self.complexity_threshold):
winner = ind1
else:
winner = ind2
return winner
"""
binary tournament selection
"""
def _choose_two_parents(self):
# this might choose two same parents
ind1 = self._choose_one_parent()
ind2 = self._choose_one_parent()
return ind1, ind2
def do_crossover(self):
new_offspring_list = []
for _ in range(len(self.individuals) // 2):
ind1, ind2 = self._choose_two_parents()
self.log.info('Do crossover on indi:%s and indi:%s' % (ind1.id, ind2.id))
p1, p2 = copy.deepcopy(ind1), copy.deepcopy(ind2)
# for different unit, we define two list, one to save their index and the other one save unit
p1_conv_index_list = []
p1_conv_layer_list = []
p1_pool_index_list = []
p1_pool_layer_list = []
p1_full_index_list = []
p1_full_layer_list = []
p2_conv_index_list = []
p2_conv_layer_list = []
p2_pool_index_list = []
p2_pool_layer_list = []
p2_full_index_list = []
p2_full_layer_list = []
for i in range(len(p1.units)):
unit = p1.units[i]
if unit.type == 1:
p1_conv_index_list.append(i)
p1_conv_layer_list.append(unit)
elif unit.type == 2:
p1_pool_index_list.append(i)
p1_pool_layer_list.append(unit)
else:
p1_full_index_list.append(i)
p1_full_layer_list.append(unit)
for i in range(len(p2.units)):
unit = p2.units[i]
if unit.type == 1:
p2_conv_index_list.append(i)
p2_conv_layer_list.append(unit)
elif unit.type == 2:
p2_pool_index_list.append(i)
p2_pool_layer_list.append(unit)
else:
p2_full_index_list.append(i)
p2_full_layer_list.append(unit)
# begin crossover on conv layer
l = min(len(p1_conv_layer_list), len(p2_conv_layer_list))
for i in range(l):
unit_p1 = p1_conv_layer_list[i]
unit_p2 = p2_conv_layer_list[i]
_p = np.random.random()
if _p < self.prob:
# filter size
filter_size_range = StatusUpdateTool.get_conv_filter_size_limit()
w1 = unit_p1.filter_size[0]
w2 = unit_p2.filter_size[0]
n_w1, n_w2 = self.sbx(w1, w2, filter_size_range[0], filter_size_range[1], self.eta)
unit_p1.filter_size = int(n_w1), int(n_w1)
unit_p2.filter_size = int(n_w2), int(n_w2)
# out channel size
out_channel_size_range = StatusUpdateTool.get_channel_limit()
s1 = unit_p1.out_channel
s2 = unit_p2.out_channel
n_s1, n_s2 = self.sbx(s1, s2, out_channel_size_range[0], out_channel_size_range[1], self.eta)
unit_p1.out_channel = int(n_s1)
unit_p2.out_channel = int(n_s2)
# mean
mean_range = StatusUpdateTool.get_mean_limit()
m1 = unit_p1.mean
m2 = unit_p2.mean
n_m1, n_m2 = self.sbx(m1, m2, mean_range[0], mean_range[1], self.eta)
unit_p1.mean = n_m1
unit_p2.mean = n_m2
# std
std_range = StatusUpdateTool.get_std_limit()
std1 = unit_p1.std
std2 = unit_p2.std
n_std1, n_std2 = self.sbx(std1, std2, std_range[0], std_range[1], self.eta)
unit_p1.std = n_std1
unit_p2.std = n_std2
p1_conv_layer_list[i] = unit_p1
p2_conv_layer_list[i] = unit_p2
# begin crossover on pool layer
l = min(len(p1_pool_layer_list), len(p2_pool_layer_list))
for i in range(l):
unit_p1 = p1_pool_layer_list[i]
unit_p2 = p2_pool_layer_list[i]
_p = np.random.random()
if _p < self.prob:
# kernel size
pool_kernel_size_range = StatusUpdateTool.get_pool_kernel_size_list()
k1 = np.log2(unit_p1.kernel_size[0])
k2 = np.log2(unit_p2.kernel_size[0])
n_k1, n_k2 = self.sbx(k1, k2, pool_kernel_size_range[0], pool_kernel_size_range[-1], self.eta)
n_k1 = int(np.power(2, n_k1))
n_k2 = int(np.power(2, n_k2))
unit_p1.kernel_size = n_k1, n_k1
unit_p2.kernel_size = n_k2, n_k2
# pool type
t1 = unit_p1.max_or_avg
t2 = unit_p2.max_or_avg
n_t1, n_t2 = self.sbx(t1, t2, 0, 1, self.eta)
unit_p1.max_or_avg = n_t1
unit_p2.max_or_avg = n_t2
p1_pool_layer_list[i] = unit_p1
p2_pool_layer_list[i] = unit_p2
# begin crossover on fc layer
l = min(len(p1_full_layer_list), len(p2_full_layer_list))
for i in range(l - 1):
unit_p1 = p1_full_layer_list[i]
unit_p2 = p2_full_layer_list[i]
_p = np.random.random()
if _p < self.prob:
# output hidden neurons number
hidden_neurons_range = StatusUpdateTool.get_hidden_neurons_limit()
n1 = unit_p1.output_neurons_number
n2 = unit_p2.output_neurons_number
n_n1, n_n2 = self.sbx(n1, n2, hidden_neurons_range[0], hidden_neurons_range[1], self.eta)
unit_p1.output_neurons_number = int(n_n1)
unit_p2.output_neurons_number = int(n_n2)
# mean
mean_range = StatusUpdateTool.get_mean_limit()
m1 = unit_p1.mean
m2 = unit_p2.mean
n_m1, n_m2 = self.sbx(m1, m2, mean_range[0], mean_range[1], self.eta)
unit_p1.mean = n_m1
unit_p2.mean = n_m2
# std
std_range = StatusUpdateTool.get_std_limit()
std1 = unit_p1.std
std2 = unit_p2.std
n_std1, n_std2 = self.sbx(std1, std2, std_range[0], std_range[1], self.eta)
unit_p1.std = n_std1
unit_p2.std = n_std2
p1_full_layer_list[i] = unit_p1
p2_full_layer_list[i] = unit_p2
# for the last full layer, only mean and std
unit_p1 = p1_full_layer_list[-1]
unit_p2 = p2_full_layer_list[-1]
_p = np.random.random()
if _p < self.prob:
# mean
mean_range = StatusUpdateTool.get_mean_limit()
m1 = unit_p1.mean
m2 = unit_p2.mean
n_m1, n_m2 = self.sbx(m1, m2, mean_range[0], mean_range[1], self.eta)
unit_p1.mean = n_m1
unit_p2.mean = n_m2
# std
std_range = StatusUpdateTool.get_std_limit()
std1 = unit_p1.std
std2 = unit_p2.std
n_std1, n_std2 = self.sbx(std1, std2, std_range[0], std_range[-1], self.eta)
unit_p1.std = n_std1
unit_p2.std = n_std2
p1_full_layer_list[-1] = unit_p1
p2_full_layer_list[-1] = unit_p2
# assign these crossovered values to the unit_list1 and unit_list2
unit_list1 = p1.units
for i in range(len(p1_conv_index_list)):
unit_list1[p1_conv_index_list[i]] = p1_conv_layer_list[i]
for i in range(len(p1_pool_index_list)):
unit_list1[p1_pool_index_list[i]] = p1_pool_layer_list[i]
for i in range(len(p1_full_index_list)):
unit_list1[p1_full_index_list[i]] = p1_full_layer_list[i]
unit_list2 = p2.units
for i in range(len(p2_conv_index_list)):
unit_list2[p2_conv_index_list[i]] = p2_conv_layer_list[i]
for i in range(len(p2_pool_index_list)):
unit_list2[p2_pool_index_list[i]] = p2_pool_layer_list[i]
for i in range(len(p2_full_index_list)):
unit_list2[p2_full_index_list[i]] = p2_full_layer_list[i]
# re-adjust the in_channel of the above two list
unit_list1 = Individual.update_all_channels(unit_list1, 0, self.log)
unit_list2 = Individual.update_all_channels(unit_list2, 0, self.log)
p1.units = unit_list1
p2.units = unit_list2
offspring1, offspring2 = p1, p2
offspring1.reset_acc()
offspring2.reset_acc()
offspring1.complexity = Individual.calculate_complexity(unit_list1)
offspring2.complexity = Individual.calculate_complexity(unit_list2)
new_offspring_list.append(offspring1)
new_offspring_list.append(offspring2)
self.log.info('CROSSOVER-%d offspring are generated.' % (len(new_offspring_list)))
return new_offspring_list
def sbx(self, p1, p2, xl, xu, eta):
'''
:param p1: parent1
:param p2: parent2
:param xl: minimal
:param xu: maximal
:param eta: the parameter of sbx
:return: two offsprings after crossover
'''
# par1 is the greater parent
if p1 > p2:
par1 = p1
par2 = p2
else:
par1 = p2
par2 = p1
yl = xl
yu = xu
rand = np.random.random()
if rand <= 0.5:
betaq = (2 * rand) ** (1 / (eta + 1))
else:
betaq = (1 / (2 - 2 * rand)) ** (1 / (eta + 1))
child1 = 0.5 * ((par1 + par2) - betaq * (par1 - par2))
child2 = 0.5 * ((par1 + par2) + betaq * (par1 - par2))
if child1 < yl:
child1 = yl
if child1 > yu:
child1 = yu
if child2 < yl:
child2 = yl
if child2 > yu:
child2 = yu
return child1, child2
class Mutation(object):
def __init__(self, individuals, prob_, eta, _log):
self.individuals = individuals
self.prob = prob_
self.eta = eta
self.log = _log
def do_mutation(self):
_stat_param = {'offspring_new': 0, 'offspring_from_parent': 0, 'ADD': 0, 'REMOVE': 0, 'ALTER': 0}
mutation_list = StatusUpdateTool.get_mutation_probs_for_each()
for indi in self.individuals:
p_ = random.random()
if p_ < self.prob:
units_list = []
is_new = False
for i in range(len(indi.units) - 1):
cur_unit = indi.units[i]
p_ = np.random.random()
if p_ < 0.5:
is_new = True
max_length = 6
mutation_type = self.select_mutation_type(mutation_list)
if mutation_type == 0:
current_conv_and_pool_length = indi.get_conv_number() + indi.get_pool_number()
if current_conv_and_pool_length < max_length:
_stat_param['ADD'] += 1
units_list.append(self.generate_a_new_layer(indi, cur_unit.type, len(indi.units)))
units_list.append(cur_unit)
else:
_stat_param['ALTER'] += 1
updated_unit = self.alter_a_unit(indi, cur_unit, self.eta)
units_list.append(updated_unit)
elif mutation_type == 1:
_stat_param['ALTER'] += 1
updated_unit = self.alter_a_unit(indi, cur_unit, self.eta)
units_list.append(updated_unit)
elif mutation_type == 2:
_stat_param['REMOVE'] += 1
# do nothing with units_list
else:
raise TypeError('Error mutation type :%d, validate range:0-2' % (mutation_type))
else:
units_list.append(cur_unit)
# avoid all units have been removed, add a full layer
if len(units_list) == 0:
units_list.append(Individual.init_a_conv(indi))
units_list.append(Individual.init_a_pool(indi))
units_list.append(indi.units[-1])
# judge the first unit and the second unit
if units_list[0].type != 1:
units_list.insert(0, Individual.init_a_conv(indi))
if is_new:
_stat_param['offspring_new'] += 1
units_list = Individual.update_all_channels(units_list, 1, self.log)
indi.units = units_list
indi.complexity = Individual.calculate_complexity(units_list)
else:
_stat_param['offspring_from_parent'] += 1
else:
_stat_param['offspring_from_parent'] += 1
self.log.info('MUTATION-mutated individuals:%d[ADD:%d,REMOVE:%d,ALTER:%d, no_changes:%d]' % (
_stat_param['offspring_new'], _stat_param['ADD'], _stat_param['REMOVE'], _stat_param['ALTER'],
_stat_param['offspring_from_parent']))
def generate_a_new_layer(self, indi, current_unit_type, unit_length):
if current_unit_type == 3:
# judge if current length = 1, add conv or pool
if unit_length == 1:
if random.random() < 0.5:
return Individual.init_a_conv(indi)
else:
return Individual.init_a_pool(indi)
else:
return Individual.init_a_fc(indi)
else:
r = random.random()
if r < 0.5:
return Individual.init_a_conv(indi)
else:
return Individual.init_a_pool(indi)
def alter_a_unit(self, indi, unit, eta):
if unit.type == 1:
# mutate a conv layer
return self.alter_conv_unit(indi, unit, eta)
elif unit.type == 2:
# mutate a pool layer
return self.alter_pool_unit(indi, unit, eta)
else:
# mutate a full layer
return self.alter_full_layer(indi, unit, eta)
def alter_conv_unit(self, indi, unit, eta):
# feature map size, feature map number, mean std
fms = unit.filter_size[0]
fmn = unit.out_channel
mean = unit.mean
std = unit.std
new_fms = int(self.pm(indi.min_conv_filter_size, indi.max_conv_filter_size, fms, eta))
new_fmn = int(self.pm(indi.min_channel, indi.max_channel, fmn, eta))
new_mean = self.pm(indi.min_mean, indi.max_mean, mean, eta)
new_std = self.pm(indi.min_std, indi.max_std, std, eta)
conv_unit = Individual.init_a_conv(indi, _filter_height=new_fms, _filter_width=new_fms, _out_channel=new_fmn,
_mean=new_mean, _std=new_std)
return conv_unit
def alter_pool_unit(self, indi, unit, eta):
# kernel size, pool_type
ksize = np.log2(unit.kernel_size[0])
pool_type = unit.max_or_avg
new_ksize = self.pm(indi.pool_kernel_size_list[0], indi.pool_kernel_size_list[-1], ksize, eta)
new_ksize = int(np.power(2, new_ksize))
new_pool_type = self.pm(0, 1, pool_type, eta)
pool_unit = Individual.init_a_pool(indi, _kernel_width=new_ksize, _kernel_height=new_ksize,
_max_or_avg=new_pool_type)
return pool_unit
def alter_full_layer(self, indi, unit, eta):
# num of hidden neurons, mean ,std
n_hidden = unit.output_neurons_number
mean = unit.mean
std = unit.std
new_n_hidden = int(self.pm(indi.min_hidden_neurons, indi.max_hidden_neurons, n_hidden, eta))
new_mean = self.pm(indi.min_mean, indi.max_mean, mean, eta)
new_std = self.pm(indi.min_std, indi.max_std, std, eta)
fc_unit = Individual.init_a_fc(indi, _output_neurons_number=new_n_hidden, _mean=new_mean, _std=new_std)
return fc_unit
def select_mutation_type(self, _a):
a = np.asarray(_a)
sum_a = np.sum(a).astype(np.float)
rand = np.random.random() * sum_a
sum = 0
mutation_type = -1
for i in range(len(a)):
sum += a[i]
if sum > rand:
mutation_type = i
break
assert mutation_type != -1
return mutation_type
def pm(self, xl, xu, x, eta):
delta_1 = (x - xl) / (xu - xl)
delta_2 = (xu - x) / (xu - xl)
rand = np.random.random()
mut_pow = 1.0 / (eta + 1.)
if rand < 0.5:
xy = 1.0 - delta_1
val = 2.0 * rand + (1.0 - 2.0 * rand) * xy ** (eta + 1)
delta_q = val ** mut_pow - 1.0
else:
xy = 1.0 - delta_2
val = 2.0 * (1.0 - rand) + 2.0 * (rand - 0.5) * xy ** (eta + 1)
delta_q = 1.0 - val ** mut_pow
x = x + delta_q * (xu - xl)
x = min(max(x, xl), xu)
return x
| [
"numpy.sum",
"numpy.random.randint",
"algs.evocnn.genetic.population.Individual.init_a_conv",
"algs.evocnn.genetic.population.Individual.calculate_complexity",
"numpy.power",
"algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_hidden_neurons_limit",
"algs.evocnn.genetic.statusupdatetool.StatusUpd... | [((2160, 2188), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count_'], {}), '(0, count_)\n', (2177, 2188), True, 'import numpy as np\n'), ((2204, 2232), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count_'], {}), '(0, count_)\n', (2221, 2232), True, 'import numpy as np\n'), ((12940, 12958), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (12956, 12958), True, 'import numpy as np\n'), ((13807, 13853), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_mutation_probs_for_each', 'StatusUpdateTool.get_mutation_probs_for_each', ([], {}), '()\n', (13851, 13853), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((18448, 18579), 'algs.evocnn.genetic.population.Individual.init_a_conv', 'Individual.init_a_conv', (['indi'], {'_filter_height': 'new_fms', '_filter_width': 'new_fms', '_out_channel': 'new_fmn', '_mean': 'new_mean', '_std': 'new_std'}), '(indi, _filter_height=new_fms, _filter_width=new_fms,\n _out_channel=new_fmn, _mean=new_mean, _std=new_std)\n', (18470, 18579), False, 'from algs.evocnn.genetic.population import Individual\n'), ((18742, 18770), 'numpy.log2', 'np.log2', (['unit.kernel_size[0]'], {}), '(unit.kernel_size[0])\n', (18749, 18770), True, 'import numpy as np\n'), ((19033, 19144), 'algs.evocnn.genetic.population.Individual.init_a_pool', 'Individual.init_a_pool', (['indi'], {'_kernel_width': 'new_ksize', '_kernel_height': 'new_ksize', '_max_or_avg': 'new_pool_type'}), '(indi, _kernel_width=new_ksize, _kernel_height=\n new_ksize, _max_or_avg=new_pool_type)\n', (19055, 19144), False, 'from algs.evocnn.genetic.population import Individual\n'), ((19647, 19745), 'algs.evocnn.genetic.population.Individual.init_a_fc', 'Individual.init_a_fc', (['indi'], {'_output_neurons_number': 'new_n_hidden', '_mean': 'new_mean', '_std': 'new_std'}), '(indi, _output_neurons_number=new_n_hidden, _mean=\n new_mean, _std=new_std)\n', (19667, 19745), False, 'from algs.evocnn.genetic.population import Individual\n'), ((19817, 19831), 'numpy.asarray', 'np.asarray', (['_a'], {}), '(_a)\n', (19827, 19831), True, 'import numpy as np\n'), ((20291, 20309), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (20307, 20309), True, 'import numpy as np\n'), ((9996, 10014), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10012, 10014), True, 'import numpy as np\n'), ((11766, 11821), 'algs.evocnn.genetic.population.Individual.update_all_channels', 'Individual.update_all_channels', (['unit_list1', '(0)', 'self.log'], {}), '(unit_list1, 0, self.log)\n', (11796, 11821), False, 'from algs.evocnn.genetic.population import Individual\n'), ((11847, 11902), 'algs.evocnn.genetic.population.Individual.update_all_channels', 'Individual.update_all_channels', (['unit_list2', '(0)', 'self.log'], {}), '(unit_list2, 0, self.log)\n', (11877, 11902), False, 'from algs.evocnn.genetic.population import Individual\n'), ((12122, 12165), 'algs.evocnn.genetic.population.Individual.calculate_complexity', 'Individual.calculate_complexity', (['unit_list1'], {}), '(unit_list1)\n', (12153, 12165), False, 'from algs.evocnn.genetic.population import Individual\n'), ((12202, 12245), 'algs.evocnn.genetic.population.Individual.calculate_complexity', 'Individual.calculate_complexity', (['unit_list2'], {}), '(unit_list2)\n', (12233, 12245), False, 'from algs.evocnn.genetic.population import Individual\n'), ((13909, 13924), 'random.random', 'random.random', ([], {}), '()\n', (13922, 13924), False, 'import random\n'), ((17352, 17367), 'random.random', 'random.random', ([], {}), '()\n', (17365, 17367), False, 'import random\n'), ((18935, 18957), 'numpy.power', 'np.power', (['(2)', 'new_ksize'], {}), '(2, new_ksize)\n', (18943, 18957), True, 'import numpy as np\n'), ((19890, 19908), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (19906, 19908), True, 'import numpy as np\n'), ((3487, 3506), 'copy.deepcopy', 'copy.deepcopy', (['ind1'], {}), '(ind1)\n', (3500, 3506), False, 'import copy\n'), ((3508, 3527), 'copy.deepcopy', 'copy.deepcopy', (['ind2'], {}), '(ind2)\n', (3521, 3527), False, 'import copy\n'), ((5282, 5300), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5298, 5300), True, 'import numpy as np\n'), ((7258, 7276), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7274, 7276), True, 'import numpy as np\n'), ((8511, 8529), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8527, 8529), True, 'import numpy as np\n'), ((10098, 10131), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_mean_limit', 'StatusUpdateTool.get_mean_limit', ([], {}), '()\n', (10129, 10131), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((10408, 10440), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_std_limit', 'StatusUpdateTool.get_std_limit', ([], {}), '()\n', (10438, 10440), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((17295, 17321), 'algs.evocnn.genetic.population.Individual.init_a_fc', 'Individual.init_a_fc', (['indi'], {}), '(indi)\n', (17315, 17321), False, 'from algs.evocnn.genetic.population import Individual\n'), ((17415, 17443), 'algs.evocnn.genetic.population.Individual.init_a_conv', 'Individual.init_a_conv', (['indi'], {}), '(indi)\n', (17437, 17443), False, 'from algs.evocnn.genetic.population import Individual\n'), ((17485, 17513), 'algs.evocnn.genetic.population.Individual.init_a_pool', 'Individual.init_a_pool', (['indi'], {}), '(indi)\n', (17507, 17513), False, 'from algs.evocnn.genetic.population import Individual\n'), ((19848, 19857), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (19854, 19857), True, 'import numpy as np\n'), ((5410, 5455), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_conv_filter_size_limit', 'StatusUpdateTool.get_conv_filter_size_limit', ([], {}), '()\n', (5453, 5455), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((5866, 5902), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_channel_limit', 'StatusUpdateTool.get_channel_limit', ([], {}), '()\n', (5900, 5902), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((6271, 6304), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_mean_limit', 'StatusUpdateTool.get_mean_limit', ([], {}), '()\n', (6302, 6304), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((6609, 6641), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_std_limit', 'StatusUpdateTool.get_std_limit', ([], {}), '()\n', (6639, 6641), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((7391, 7435), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_pool_kernel_size_list', 'StatusUpdateTool.get_pool_kernel_size_list', ([], {}), '()\n', (7433, 7435), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((7461, 7492), 'numpy.log2', 'np.log2', (['unit_p1.kernel_size[0]'], {}), '(unit_p1.kernel_size[0])\n', (7468, 7492), True, 'import numpy as np\n'), ((7518, 7549), 'numpy.log2', 'np.log2', (['unit_p2.kernel_size[0]'], {}), '(unit_p2.kernel_size[0])\n', (7525, 7549), True, 'import numpy as np\n'), ((8659, 8702), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_hidden_neurons_limit', 'StatusUpdateTool.get_hidden_neurons_limit', ([], {}), '()\n', (8700, 8702), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((9107, 9140), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_mean_limit', 'StatusUpdateTool.get_mean_limit', ([], {}), '()\n', (9138, 9140), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((9445, 9477), 'algs.evocnn.genetic.statusupdatetool.StatusUpdateTool.get_std_limit', 'StatusUpdateTool.get_std_limit', ([], {}), '()\n', (9475, 9477), False, 'from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n'), ((14142, 14160), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (14158, 14160), True, 'import numpy as np\n'), ((16273, 16328), 'algs.evocnn.genetic.population.Individual.update_all_channels', 'Individual.update_all_channels', (['units_list', '(1)', 'self.log'], {}), '(units_list, 1, self.log)\n', (16303, 16328), False, 'from algs.evocnn.genetic.population import Individual\n'), ((16411, 16454), 'algs.evocnn.genetic.population.Individual.calculate_complexity', 'Individual.calculate_complexity', (['units_list'], {}), '(units_list)\n', (16442, 16454), False, 'from algs.evocnn.genetic.population import Individual\n'), ((17097, 17112), 'random.random', 'random.random', ([], {}), '()\n', (17110, 17112), False, 'import random\n'), ((17147, 17175), 'algs.evocnn.genetic.population.Individual.init_a_conv', 'Individual.init_a_conv', (['indi'], {}), '(indi)\n', (17169, 17175), False, 'from algs.evocnn.genetic.population import Individual\n'), ((17225, 17253), 'algs.evocnn.genetic.population.Individual.init_a_pool', 'Individual.init_a_pool', (['indi'], {}), '(indi)\n', (17247, 17253), False, 'from algs.evocnn.genetic.population import Individual\n'), ((7696, 7713), 'numpy.power', 'np.power', (['(2)', 'n_k1'], {}), '(2, n_k1)\n', (7704, 7713), True, 'import numpy as np\n'), ((7746, 7763), 'numpy.power', 'np.power', (['(2)', 'n_k2'], {}), '(2, n_k2)\n', (7754, 7763), True, 'import numpy as np\n'), ((15836, 15864), 'algs.evocnn.genetic.population.Individual.init_a_conv', 'Individual.init_a_conv', (['indi'], {}), '(indi)\n', (15858, 15864), False, 'from algs.evocnn.genetic.population import Individual\n'), ((15904, 15932), 'algs.evocnn.genetic.population.Individual.init_a_pool', 'Individual.init_a_pool', (['indi'], {}), '(indi)\n', (15926, 15932), False, 'from algs.evocnn.genetic.population import Individual\n'), ((16128, 16156), 'algs.evocnn.genetic.population.Individual.init_a_conv', 'Individual.init_a_conv', (['indi'], {}), '(indi)\n', (16150, 16156), False, 'from algs.evocnn.genetic.population import Individual\n')] |
import numpy as np
a = np.random.randn(3, 3)
b = np.random.randn(3, 1)
c = a*b
print(c.shape) | [
"numpy.random.randn"
] | [((26, 47), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (41, 47), True, 'import numpy as np\n'), ((53, 74), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (68, 74), True, 'import numpy as np\n')] |
import numpy as np
from nurbsvisualizer.nurbsgeometry import NurbsCurve
from nurbsvisualizer.visualizer import CurveVisualizer
#################
# 2D NURBS CIRCLE
#################
# Define knot vector
knot_vector = [0, 0, 0, np.pi/2, np.pi/2, np.pi, np.pi, 3 * np.pi / 2, 3 * np.pi / 2, 2 * np.pi, 2 * np.pi, 2 * np.pi]
# Define control points
control_points = [[-1, -1, 0, 1, 1, 1, 0, -1, -1],
[0, 1, 1, 1, 0, -1, -1, -1, 0]]
# Define weights
weights = [1, 1/np.sqrt(2), 1, 1/np.sqrt(2), 1, 1/np.sqrt(2), 1, 1/np.sqrt(2), 1]
# Create a nurbs curve object
nurbs_circle = NurbsCurve(control_points, weights, knot_vector)
# Visualize the curve and basis
CurveVisualizer(nurbs_circle)
| [
"numpy.sqrt",
"nurbsvisualizer.nurbsgeometry.NurbsCurve",
"nurbsvisualizer.visualizer.CurveVisualizer"
] | [((596, 644), 'nurbsvisualizer.nurbsgeometry.NurbsCurve', 'NurbsCurve', (['control_points', 'weights', 'knot_vector'], {}), '(control_points, weights, knot_vector)\n', (606, 644), False, 'from nurbsvisualizer.nurbsgeometry import NurbsCurve\n'), ((678, 707), 'nurbsvisualizer.visualizer.CurveVisualizer', 'CurveVisualizer', (['nurbs_circle'], {}), '(nurbs_circle)\n', (693, 707), False, 'from nurbsvisualizer.visualizer import CurveVisualizer\n'), ((484, 494), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (491, 494), True, 'import numpy as np\n'), ((501, 511), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (508, 511), True, 'import numpy as np\n'), ((518, 528), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (525, 528), True, 'import numpy as np\n'), ((535, 545), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (542, 545), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
@author: <NAME>
@contact: <EMAIL>
@description: centrality-related method tests.
'''
# DEPENDENCIES =================================================================
import numpy as np
import pandas as pd
from gpseqc import centrality as c
# PARAMS =======================================================================
df1 = pd.DataFrame([
['chr1', 0, 249221236, 75860, 4.3781381658683, 46.5264380581169, 17327, 876501, 1],
['chr1', 0, 249221236, 102923, 5.97694541231127, 165.119679583602, 17220, 1045062, 2],
['chr1', 0, 249221236, 580975, 29.1551663572038, 414.534732953575, 19927, 6625898, 3],
['chr1', 0, 249221236, 502659, 25.1820550072642, 434.349758213242, 19961, 5706581, 4],
['chr1', 0, 249221236, 564971, 28.6830989490785, 395.57305703688, 19697, 6380685, 5],
['chr1', 0, 249221236, 639962, 32.2903274635451, 317.043533799097, 19819, 7348941, 6]
])
df2 = pd.DataFrame([
['chr10', 0, 135503768, 37874, 3.77306236302052, 2.68100540922241, 10038, 876501, 1],
['chr10', 0, 135503768, 45549, 4.52818371607516, 3.48548407771986, 10059, 1045062, 2],
['chr10', 0, 135503768, 293384, 25.1723723723724, 18.2363780110097, 11655, 6625898, 3],
['chr10', 0, 135503768, 246839, 21.0829347454732, 14.8824240340175, 11708, 5706581, 4],
['chr10', 0, 135503768, 285805, 24.7022471910112, 20.041079768043, 11570, 6380685, 5],
['chr10', 0, 135503768, 332791, 28.681461690942, 22.5135593268717, 11603, 7348941, 6]
])
df3 = pd.DataFrame([
['chr11', 0, 35503768, 37874, 3.77306236302052, np.nan, 10038, 876501, 1],
['chr11', 0, 35503768, 45549, 4.52818371607516, 3.48548407771986, 10059, 1045062, 2],
['chr11', 0, 35503768, 293384, 25.1723723723724, 18.2363780110097, 11655, 6625898, 3],
['chr11', 0, 35503768, 246839, 21.0829347454732, 14.8824240340175, 11708, 5706581, 4],
['chr11', 0, 35503768, 285805, 24.7022471910112, 20.041079768043, 11570, 6380685, 5],
['chr11', 0, 35503768, 332791, 28.681461690942, 22.5135593268717, 11603, 7348941, 6]
])
df1.index = [0 for i in range(df1.shape[0])]
df2.index = [1 for i in range(df2.shape[0])]
df3.index = [2 for i in range(df3.shape[0])]
df1.columns = ['chrom', 'start', 'end', 'sum', 'mean', 'std', 'count', 'cond_nreads', 'cond']
df2.columns = ['chrom', 'start', 'end', 'sum', 'mean', 'std', 'count', 'cond_nreads', 'cond']
df3.columns = ['chrom', 'start', 'end', 'sum', 'mean', 'std', 'count', 'cond_nreads', 'cond']
# FUNCTIONS ====================================================================
def test_calcP():
assert c.calc_p(df1, 0) == 75860 / (876501 * 17327)
assert c.calc_p(df1, 1) == 102923 / (1045062 * 17220)
assert c.calc_p(df1, 2) == 580975 / (6625898 * 19927)
def test_calcPC():
p1 = 75860 / (876501 * 17327)
assert c.calc_pc(df1, 0) == p1
p2 = 102923 / (1045062 * 17220) + p1
assert c.calc_pc(df1, 1) == p2
p3 = 580975 / (6625898 * 19927) + p2
assert c.calc_pc(df1, 2) == p3
def test_calcPR():
p1 = 75860 / (876501 * 17327)
assert c.calc_pr(df1, 0) == p1
p2 = (102923 + 75860) / (1045062 * 17220 + 876501 * 17327)
assert c.calc_pr(df1, 1) == p2
p3 = (580975 + 102923 + 75860)
p3 /= (6625898 * 19927 + 1045062 * 17220 + 876501 * 17327)
assert c.calc_pr(df1, 2) == p3
def test_calcVar():
v1 = np.power(46.5264380581169, 2)
assert c.calc_var(df1, 0) == v1
v2 = np.power(165.119679583602, 2)
assert c.calc_var(df1, 1) == v2
def test_calcFF():
v1 = np.power(46.5264380581169, 2) / 4.3781381658683
assert c.calc_ff(df1, 0) == v1
v2 = np.power(165.119679583602, 2) / 5.97694541231127
assert c.calc_ff(df1, 1) == v2
def test_calcCV():
v1 = 46.5264380581169 / 4.3781381658683
assert c.calc_cv(df1, 0) == v1
v2 = 165.119679583602 / 5.97694541231127
assert c.calc_cv(df1, 1) == v2
def test_est2p():
v = c.est_2p(df1, c.calc_p, lambda x, y: x / y)
assert v == c.calc_p(df1, -1) / c.calc_p(df1, 0)
def test_estF():
v = sum([c.calc_p(df1, i) / c.calc_p(df1, 0)
for i in range(1, df1.shape[0])])
assert c.est_f(df1, c.calc_p, lambda x, y: x / y) == v
def test_estG():
v = sum([c.calc_p(df1, i) / c.calc_p(df1, i - 1)
for i in range(1, df1.shape[0])])
assert c.est_g(df1, c.calc_p, lambda x, y: x / y) == v
def test_binEstimate():
est = c.bin_estimate(df1, ["prob_2p", "var_f", "roc_g"], False)
# prob_2p
p2p = (639962 / (7348941 * 19819)) / (75860 / (876501 * 17327))
assert p2p == est["prob_2p"].values[0]
# var_f
vf = np.log(np.power(165.119679583602, 2) / np.power(46.5264380581169, 2))
vf += np.log(np.power(414.534732953575, 2) / np.power(46.5264380581169, 2))
vf += np.log(np.power(434.349758213242, 2) / np.power(46.5264380581169, 2))
vf += np.log(np.power(395.57305703688, 2) / np.power(46.5264380581169, 2))
vf += np.log(np.power(317.043533799097, 2) / np.power(46.5264380581169, 2))
assert vf == est["var_f"].values[0]
# roc_g
rg0 = df1['sum'].values[:1].sum() / (
df1['count'].values[:1] * df1['cond_nreads'].values[:1]).sum()
rg1 = df1['sum'].values[:2].sum() / (
df1['count'].values[:2] * df1['cond_nreads'].values[:2]).sum()
rg2 = df1['sum'].values[:3].sum() / (
df1['count'].values[:3] * df1['cond_nreads'].values[:3]).sum()
rg3 = df1['sum'].values[:4].sum() / (
df1['count'].values[:4] * df1['cond_nreads'].values[:4]).sum()
rg4 = df1['sum'].values[:5].sum() / (
df1['count'].values[:5] * df1['cond_nreads'].values[:5]).sum()
rg5 = df1['sum'].values[:6].sum() / (
df1['count'].values[:6] * df1['cond_nreads'].values[:6]).sum()
rg = rg1 / rg0 + rg2 / rg1 + rg3 / rg2 + rg4 / rg3 + rg5 / rg4
assert rg == est["roc_g"].values[0]
def test_rank():
est = c.bin_estimate(pd.concat([df1, df2]),
["prob_2p", "var_f", "roc_g"], False)
rank = c.rank(est, ["prob_2p", "var_f", "roc_g"], False)
erank = ['chr1:0-249221236', 'chr10:0-135503768']
assert all(erank == rank['prob_2p'].values)
assert all(erank[::-1] == rank['var_f'].values)
assert all(erank[::-1] == rank['roc_g'].values)
est = c.bin_estimate(pd.concat([df1, df2, df3]),
["prob_2p", "var_f", "roc_g"], False)
rank = c.rank(est, ["prob_2p", "var_f", "roc_g"], False)
erank = ['chr1:0-249221236', 'chr10:0-135503768', 'chr11:0-35503768']
assert all(erank == rank['prob_2p'].values)
assert str([erank[1], erank[0], np.nan])==str(rank['var_f'].values.tolist())
assert all([erank[1], erank[2], erank[0]] == rank['roc_g'].values)
# END ==========================================================================
################################################################################
| [
"pandas.DataFrame",
"gpseqc.centrality.rank",
"gpseqc.centrality.calc_var",
"gpseqc.centrality.est_2p",
"gpseqc.centrality.calc_ff",
"numpy.power",
"gpseqc.centrality.calc_pr",
"gpseqc.centrality.est_f",
"gpseqc.centrality.calc_pc",
"gpseqc.centrality.calc_p",
"gpseqc.centrality.calc_cv",
"gps... | [((361, 928), 'pandas.DataFrame', 'pd.DataFrame', (["[['chr1', 0, 249221236, 75860, 4.3781381658683, 46.5264380581169, 17327, \n 876501, 1], ['chr1', 0, 249221236, 102923, 5.97694541231127, \n 165.119679583602, 17220, 1045062, 2], ['chr1', 0, 249221236, 580975, \n 29.1551663572038, 414.534732953575, 19927, 6625898, 3], ['chr1', 0, \n 249221236, 502659, 25.1820550072642, 434.349758213242, 19961, 5706581, \n 4], ['chr1', 0, 249221236, 564971, 28.6830989490785, 395.57305703688, \n 19697, 6380685, 5], ['chr1', 0, 249221236, 639962, 32.2903274635451, \n 317.043533799097, 19819, 7348941, 6]]"], {}), "([['chr1', 0, 249221236, 75860, 4.3781381658683, \n 46.5264380581169, 17327, 876501, 1], ['chr1', 0, 249221236, 102923, \n 5.97694541231127, 165.119679583602, 17220, 1045062, 2], ['chr1', 0, \n 249221236, 580975, 29.1551663572038, 414.534732953575, 19927, 6625898, \n 3], ['chr1', 0, 249221236, 502659, 25.1820550072642, 434.349758213242, \n 19961, 5706581, 4], ['chr1', 0, 249221236, 564971, 28.6830989490785, \n 395.57305703688, 19697, 6380685, 5], ['chr1', 0, 249221236, 639962, \n 32.2903274635451, 317.043533799097, 19819, 7348941, 6]])\n", (373, 928), True, 'import pandas as pd\n'), ((926, 1497), 'pandas.DataFrame', 'pd.DataFrame', (["[['chr10', 0, 135503768, 37874, 3.77306236302052, 2.68100540922241, 10038, \n 876501, 1], ['chr10', 0, 135503768, 45549, 4.52818371607516, \n 3.48548407771986, 10059, 1045062, 2], ['chr10', 0, 135503768, 293384, \n 25.1723723723724, 18.2363780110097, 11655, 6625898, 3], ['chr10', 0, \n 135503768, 246839, 21.0829347454732, 14.8824240340175, 11708, 5706581, \n 4], ['chr10', 0, 135503768, 285805, 24.7022471910112, 20.041079768043, \n 11570, 6380685, 5], ['chr10', 0, 135503768, 332791, 28.681461690942, \n 22.5135593268717, 11603, 7348941, 6]]"], {}), "([['chr10', 0, 135503768, 37874, 3.77306236302052, \n 2.68100540922241, 10038, 876501, 1], ['chr10', 0, 135503768, 45549, \n 4.52818371607516, 3.48548407771986, 10059, 1045062, 2], ['chr10', 0, \n 135503768, 293384, 25.1723723723724, 18.2363780110097, 11655, 6625898, \n 3], ['chr10', 0, 135503768, 246839, 21.0829347454732, 14.8824240340175,\n 11708, 5706581, 4], ['chr10', 0, 135503768, 285805, 24.7022471910112, \n 20.041079768043, 11570, 6380685, 5], ['chr10', 0, 135503768, 332791, \n 28.681461690942, 22.5135593268717, 11603, 7348941, 6]])\n", (938, 1497), True, 'import pandas as pd\n'), ((1496, 2051), 'pandas.DataFrame', 'pd.DataFrame', (["[['chr11', 0, 35503768, 37874, 3.77306236302052, np.nan, 10038, 876501, 1],\n ['chr11', 0, 35503768, 45549, 4.52818371607516, 3.48548407771986, 10059,\n 1045062, 2], ['chr11', 0, 35503768, 293384, 25.1723723723724, \n 18.2363780110097, 11655, 6625898, 3], ['chr11', 0, 35503768, 246839, \n 21.0829347454732, 14.8824240340175, 11708, 5706581, 4], ['chr11', 0, \n 35503768, 285805, 24.7022471910112, 20.041079768043, 11570, 6380685, 5],\n ['chr11', 0, 35503768, 332791, 28.681461690942, 22.5135593268717, 11603,\n 7348941, 6]]"], {}), "([['chr11', 0, 35503768, 37874, 3.77306236302052, np.nan, 10038,\n 876501, 1], ['chr11', 0, 35503768, 45549, 4.52818371607516, \n 3.48548407771986, 10059, 1045062, 2], ['chr11', 0, 35503768, 293384, \n 25.1723723723724, 18.2363780110097, 11655, 6625898, 3], ['chr11', 0, \n 35503768, 246839, 21.0829347454732, 14.8824240340175, 11708, 5706581, 4\n ], ['chr11', 0, 35503768, 285805, 24.7022471910112, 20.041079768043, \n 11570, 6380685, 5], ['chr11', 0, 35503768, 332791, 28.681461690942, \n 22.5135593268717, 11603, 7348941, 6]])\n", (1508, 2051), True, 'import pandas as pd\n'), ((3325, 3354), 'numpy.power', 'np.power', (['(46.5264380581169)', '(2)'], {}), '(46.5264380581169, 2)\n', (3333, 3354), True, 'import numpy as np\n'), ((3400, 3429), 'numpy.power', 'np.power', (['(165.119679583602)', '(2)'], {}), '(165.119679583602, 2)\n', (3408, 3429), True, 'import numpy as np\n'), ((3877, 3920), 'gpseqc.centrality.est_2p', 'c.est_2p', (['df1', 'c.calc_p', '(lambda x, y: x / y)'], {}), '(df1, c.calc_p, lambda x, y: x / y)\n', (3885, 3920), True, 'from gpseqc import centrality as c\n'), ((4349, 4406), 'gpseqc.centrality.bin_estimate', 'c.bin_estimate', (['df1', "['prob_2p', 'var_f', 'roc_g']", '(False)'], {}), "(df1, ['prob_2p', 'var_f', 'roc_g'], False)\n", (4363, 4406), True, 'from gpseqc import centrality as c\n'), ((5906, 5955), 'gpseqc.centrality.rank', 'c.rank', (['est', "['prob_2p', 'var_f', 'roc_g']", '(False)'], {}), "(est, ['prob_2p', 'var_f', 'roc_g'], False)\n", (5912, 5955), True, 'from gpseqc import centrality as c\n'), ((6269, 6318), 'gpseqc.centrality.rank', 'c.rank', (['est', "['prob_2p', 'var_f', 'roc_g']", '(False)'], {}), "(est, ['prob_2p', 'var_f', 'roc_g'], False)\n", (6275, 6318), True, 'from gpseqc import centrality as c\n'), ((2573, 2589), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', '(0)'], {}), '(df1, 0)\n', (2581, 2589), True, 'from gpseqc import centrality as c\n'), ((2629, 2645), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', '(1)'], {}), '(df1, 1)\n', (2637, 2645), True, 'from gpseqc import centrality as c\n'), ((2687, 2703), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', '(2)'], {}), '(df1, 2)\n', (2695, 2703), True, 'from gpseqc import centrality as c\n'), ((2799, 2816), 'gpseqc.centrality.calc_pc', 'c.calc_pc', (['df1', '(0)'], {}), '(df1, 0)\n', (2808, 2816), True, 'from gpseqc import centrality as c\n'), ((2875, 2892), 'gpseqc.centrality.calc_pc', 'c.calc_pc', (['df1', '(1)'], {}), '(df1, 1)\n', (2884, 2892), True, 'from gpseqc import centrality as c\n'), ((2951, 2968), 'gpseqc.centrality.calc_pc', 'c.calc_pc', (['df1', '(2)'], {}), '(df1, 2)\n', (2960, 2968), True, 'from gpseqc import centrality as c\n'), ((3040, 3057), 'gpseqc.centrality.calc_pr', 'c.calc_pr', (['df1', '(0)'], {}), '(df1, 0)\n', (3049, 3057), True, 'from gpseqc import centrality as c\n'), ((3138, 3155), 'gpseqc.centrality.calc_pr', 'c.calc_pr', (['df1', '(1)'], {}), '(df1, 1)\n', (3147, 3155), True, 'from gpseqc import centrality as c\n'), ((3271, 3288), 'gpseqc.centrality.calc_pr', 'c.calc_pr', (['df1', '(2)'], {}), '(df1, 2)\n', (3280, 3288), True, 'from gpseqc import centrality as c\n'), ((3366, 3384), 'gpseqc.centrality.calc_var', 'c.calc_var', (['df1', '(0)'], {}), '(df1, 0)\n', (3376, 3384), True, 'from gpseqc import centrality as c\n'), ((3441, 3459), 'gpseqc.centrality.calc_var', 'c.calc_var', (['df1', '(1)'], {}), '(df1, 1)\n', (3451, 3459), True, 'from gpseqc import centrality as c\n'), ((3495, 3524), 'numpy.power', 'np.power', (['(46.5264380581169)', '(2)'], {}), '(46.5264380581169, 2)\n', (3503, 3524), True, 'import numpy as np\n'), ((3554, 3571), 'gpseqc.centrality.calc_ff', 'c.calc_ff', (['df1', '(0)'], {}), '(df1, 0)\n', (3563, 3571), True, 'from gpseqc import centrality as c\n'), ((3587, 3616), 'numpy.power', 'np.power', (['(165.119679583602)', '(2)'], {}), '(165.119679583602, 2)\n', (3595, 3616), True, 'import numpy as np\n'), ((3647, 3664), 'gpseqc.centrality.calc_ff', 'c.calc_ff', (['df1', '(1)'], {}), '(df1, 1)\n', (3656, 3664), True, 'from gpseqc import centrality as c\n'), ((3746, 3763), 'gpseqc.centrality.calc_cv', 'c.calc_cv', (['df1', '(0)'], {}), '(df1, 0)\n', (3755, 3763), True, 'from gpseqc import centrality as c\n'), ((3826, 3843), 'gpseqc.centrality.calc_cv', 'c.calc_cv', (['df1', '(1)'], {}), '(df1, 1)\n', (3835, 3843), True, 'from gpseqc import centrality as c\n'), ((4094, 4136), 'gpseqc.centrality.est_f', 'c.est_f', (['df1', 'c.calc_p', '(lambda x, y: x / y)'], {}), '(df1, c.calc_p, lambda x, y: x / y)\n', (4101, 4136), True, 'from gpseqc import centrality as c\n'), ((4266, 4308), 'gpseqc.centrality.est_g', 'c.est_g', (['df1', 'c.calc_p', '(lambda x, y: x / y)'], {}), '(df1, c.calc_p, lambda x, y: x / y)\n', (4273, 4308), True, 'from gpseqc import centrality as c\n'), ((5826, 5847), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {}), '([df1, df2])\n', (5835, 5847), True, 'import pandas as pd\n'), ((6188, 6214), 'pandas.concat', 'pd.concat', (['[df1, df2, df3]'], {}), '([df1, df2, df3])\n', (6197, 6214), True, 'import pandas as pd\n'), ((3937, 3954), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', '(-1)'], {}), '(df1, -1)\n', (3945, 3954), True, 'from gpseqc import centrality as c\n'), ((3957, 3973), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', '(0)'], {}), '(df1, 0)\n', (3965, 3973), True, 'from gpseqc import centrality as c\n'), ((4563, 4592), 'numpy.power', 'np.power', (['(165.119679583602)', '(2)'], {}), '(165.119679583602, 2)\n', (4571, 4592), True, 'import numpy as np\n'), ((4595, 4624), 'numpy.power', 'np.power', (['(46.5264380581169)', '(2)'], {}), '(46.5264380581169, 2)\n', (4603, 4624), True, 'import numpy as np\n'), ((4643, 4672), 'numpy.power', 'np.power', (['(414.534732953575)', '(2)'], {}), '(414.534732953575, 2)\n', (4651, 4672), True, 'import numpy as np\n'), ((4675, 4704), 'numpy.power', 'np.power', (['(46.5264380581169)', '(2)'], {}), '(46.5264380581169, 2)\n', (4683, 4704), True, 'import numpy as np\n'), ((4723, 4752), 'numpy.power', 'np.power', (['(434.349758213242)', '(2)'], {}), '(434.349758213242, 2)\n', (4731, 4752), True, 'import numpy as np\n'), ((4755, 4784), 'numpy.power', 'np.power', (['(46.5264380581169)', '(2)'], {}), '(46.5264380581169, 2)\n', (4763, 4784), True, 'import numpy as np\n'), ((4803, 4831), 'numpy.power', 'np.power', (['(395.57305703688)', '(2)'], {}), '(395.57305703688, 2)\n', (4811, 4831), True, 'import numpy as np\n'), ((4834, 4863), 'numpy.power', 'np.power', (['(46.5264380581169)', '(2)'], {}), '(46.5264380581169, 2)\n', (4842, 4863), True, 'import numpy as np\n'), ((4882, 4911), 'numpy.power', 'np.power', (['(317.043533799097)', '(2)'], {}), '(317.043533799097, 2)\n', (4890, 4911), True, 'import numpy as np\n'), ((4914, 4943), 'numpy.power', 'np.power', (['(46.5264380581169)', '(2)'], {}), '(46.5264380581169, 2)\n', (4922, 4943), True, 'import numpy as np\n'), ((4005, 4021), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', 'i'], {}), '(df1, i)\n', (4013, 4021), True, 'from gpseqc import centrality as c\n'), ((4024, 4040), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', '(0)'], {}), '(df1, 0)\n', (4032, 4040), True, 'from gpseqc import centrality as c\n'), ((4173, 4189), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', 'i'], {}), '(df1, i)\n', (4181, 4189), True, 'from gpseqc import centrality as c\n'), ((4192, 4212), 'gpseqc.centrality.calc_p', 'c.calc_p', (['df1', '(i - 1)'], {}), '(df1, i - 1)\n', (4200, 4212), True, 'from gpseqc import centrality as c\n')] |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pandas as pd
import pytest
from causalnex.evaluation import classification_report, roc_auc
from causalnex.inference import InferenceEngine
from causalnex.network import BayesianNetwork
from causalnex.structure import StructureModel
from causalnex.structure.notears import from_pandas
from causalnex.utils.network_utils import get_markov_blanket
from .estimator.test_em import naive_bayes_plus_parents
class TestFitNodeStates:
"""Test behaviour of fit node states method"""
@pytest.mark.parametrize(
"weighted_edges, data",
[
([("a", "b", 1)], pd.DataFrame([[1, 1]], columns=["a", "b"])),
(
[("a", "b", 1)],
pd.DataFrame([[1, 1, 1, 1]], columns=["a", "b", "c", "d"]),
),
# c and d are isolated nodes in the data
],
)
def test_all_nodes_included(self, weighted_edges, data):
"""No errors if all the nodes can be found in the columns of training data"""
cg = StructureModel()
cg.add_weighted_edges_from(weighted_edges)
bn = BayesianNetwork(cg).fit_node_states(data)
assert all(node in data.columns for node in bn.node_states.keys())
def test_all_states_included(self):
"""All states in a node should be included"""
cg = StructureModel()
cg.add_weighted_edges_from([("a", "b", 1)])
bn = BayesianNetwork(cg).fit_node_states(
pd.DataFrame([[i, i] for i in range(10)], columns=["a", "b"])
)
assert all(v in bn.node_states["a"] for v in range(10))
def test_fit_with_null_states_raises_error(self):
"""An error should be raised if fit is called with null data"""
cg = StructureModel()
cg.add_weighted_edges_from([("a", "b", 1)])
with pytest.raises(ValueError, match="node '.*' contains None state"):
BayesianNetwork(cg).fit_node_states(
pd.DataFrame([[None, 1]], columns=["a", "b"])
)
def test_fit_with_missing_feature_in_data(self):
"""An error should be raised if fit is called with missing feature in data"""
cg = StructureModel()
cg.add_weighted_edges_from([("a", "e", 1)])
with pytest.raises(
KeyError,
match="The data does not cover all the features found in the Bayesian Network. "
"Please check the following features: {'e'}",
):
BayesianNetwork(cg).fit_node_states(
pd.DataFrame([[1, 1, 1, 1]], columns=["a", "b", "c", "d"])
)
class TestFitCPDSErrors:
"""Test errors for fit CPDs method"""
def test_invalid_method(self, bn, train_data_discrete):
"""a value error should be raised in an invalid method is provided"""
with pytest.raises(ValueError, match=r"unrecognised method.*"):
bn.fit_cpds(train_data_discrete, method="INVALID")
def test_invalid_prior(self, bn, train_data_discrete):
"""a value error should be raised in an invalid prior is provided"""
with pytest.raises(ValueError, match=r"unrecognised bayes_prior.*"):
bn.fit_cpds(
train_data_discrete, method="BayesianEstimator", bayes_prior="INVALID"
)
class TestFitCPDsMaximumLikelihoodEstimator:
"""Test behaviour of fit_cpds using MLE"""
def test_cause_only_node(self, bn, train_data_discrete, train_data_discrete_cpds):
"""Test that probabilities are fit correctly to nodes which are not caused by other nodes"""
bn.fit_cpds(train_data_discrete)
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["d"].values.reshape(2)
- train_data_discrete_cpds["d"].reshape(2)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["e"].values.reshape(2)
- train_data_discrete_cpds["e"].reshape(2)
)
)
< 1e-7
)
def test_dependent_node(self, bn, train_data_discrete, train_data_discrete_cpds):
"""Test that probabilities are fit correctly to nodes that are caused by other nodes"""
bn.fit_cpds(train_data_discrete)
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["a"].values.reshape(24)
- train_data_discrete_cpds["a"].reshape(24)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["b"].values.reshape(12)
- train_data_discrete_cpds["b"].reshape(12)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["c"].values.reshape(60)
- train_data_discrete_cpds["c"].reshape(60)
)
)
< 1e-7
)
def test_fit_missing_states(self):
"""test issues/15: should be possible to fit with missing states"""
sm = StructureModel([("a", "b"), ("c", "b")])
bn = BayesianNetwork(sm)
train = pd.DataFrame(
data=[[0, 0, 1], [1, 0, 1], [1, 1, 1]], columns=["a", "b", "c"]
)
test = pd.DataFrame(
data=[[0, 0, 1], [1, 0, 1], [1, 1, 2]], columns=["a", "b", "c"]
)
data = pd.concat([train, test])
bn.fit_node_states(data)
bn.fit_cpds(train)
assert bn.cpds["c"].loc[1][0] == 1
assert bn.cpds["c"].loc[2][0] == 0
class TestFitBayesianEstimator:
"""Test behaviour of fit_cpds using BE"""
def test_cause_only_node_bdeu(
self, bn, train_data_discrete, train_data_discrete_cpds
):
"""Test that probabilities are fit correctly to nodes which are not caused by other nodes"""
bn.fit_cpds(
train_data_discrete,
method="BayesianEstimator",
bayes_prior="BDeu",
equivalent_sample_size=5,
)
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["d"].values.reshape(2)
- train_data_discrete_cpds["d"].reshape(2)
)
)
< 0.02
)
assert (
np.mean(
np.abs(
cpds["e"].values.reshape(2)
- train_data_discrete_cpds["e"].reshape(2)
)
)
< 0.02
)
def test_cause_only_node_k2(
self, bn, train_data_discrete, train_data_discrete_cpds
):
"""Test that probabilities are fit correctly to nodes which are not caused by other nodes"""
bn.fit_cpds(train_data_discrete, method="BayesianEstimator", bayes_prior="K2")
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["d"].values.reshape(2)
- train_data_discrete_cpds["d"].reshape(2)
)
)
< 0.02
)
assert (
np.mean(
np.abs(
cpds["e"].values.reshape(2)
- train_data_discrete_cpds["e"].reshape(2)
)
)
< 0.02
)
def test_dependent_node_bdeu(
self, bn, train_data_discrete, train_data_discrete_cpds
):
"""Test that probabilities are fit correctly to nodes that are caused by other nodes"""
bn.fit_cpds(
train_data_discrete,
method="BayesianEstimator",
bayes_prior="BDeu",
equivalent_sample_size=1,
)
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["a"].values.reshape(24)
- train_data_discrete_cpds["a"].reshape(24)
)
)
< 0.02
)
assert (
np.mean(
np.abs(
cpds["b"].values.reshape(12)
- train_data_discrete_cpds["b"].reshape(12)
)
)
< 0.02
)
assert (
np.mean(
np.abs(
cpds["c"].values.reshape(60)
- train_data_discrete_cpds["c"].reshape(60)
)
)
< 0.02
)
def test_dependent_node_k2(
self, bn, train_data_discrete, train_data_discrete_cpds_k2
):
"""Test that probabilities are fit correctly to nodes that are caused by other nodes"""
bn.fit_cpds(train_data_discrete, method="BayesianEstimator", bayes_prior="K2")
cpds = bn.cpds
assert (
np.mean(
np.abs(
cpds["a"].values.reshape(24)
- train_data_discrete_cpds_k2["a"].reshape(24)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["b"].values.reshape(12)
- train_data_discrete_cpds_k2["b"].reshape(12)
)
)
< 1e-7
)
assert (
np.mean(
np.abs(
cpds["c"].values.reshape(60)
- train_data_discrete_cpds_k2["c"].reshape(60)
)
)
< 1e-7
)
def test_fit_missing_states(self):
"""test issues/15: should be possible to fit with missing states"""
sm = StructureModel([("a", "b"), ("c", "b")])
bn = BayesianNetwork(sm)
train = pd.DataFrame(
data=[[0, 0, 1], [1, 0, 1], [1, 1, 1]], columns=["a", "b", "c"]
)
test = pd.DataFrame(
data=[[0, 0, 1], [1, 0, 1], [1, 1, 2]], columns=["a", "b", "c"]
)
data = pd.concat([train, test])
bn.fit_node_states(data)
bn.fit_cpds(train, method="BayesianEstimator", bayes_prior="K2")
assert bn.cpds["c"].loc[1][0] == 0.8
assert bn.cpds["c"].loc[2][0] == 0.2
class TestPredictMaximumLikelihoodEstimator:
"""Test behaviour of predict using MLE"""
def test_predictions_are_based_on_probabilities(
self, bn, train_data_discrete, test_data_c_discrete
):
"""Predictions made using the model should be based on the probabilities that are in the model"""
bn.fit_cpds(train_data_discrete)
predictions = bn.predict(test_data_c_discrete, "c")
assert np.all(
predictions.values.reshape(len(predictions.values))
== test_data_c_discrete["c"].values
)
def test_prediction_node_suffixed_as_prediction(
self, bn, train_data_discrete, test_data_c_discrete
):
"""The column that contains the values of the predicted node should be named node_prediction"""
bn.fit_cpds(train_data_discrete)
predictions = bn.predict(test_data_c_discrete, "c")
assert "c_prediction" in predictions.columns
def test_only_predicted_column_returned(
self, bn, train_data_discrete, test_data_c_discrete
):
"""The returned df should not contain any of the input data columns"""
bn.fit_cpds(train_data_discrete)
predictions = bn.predict(test_data_c_discrete, "c")
assert len(predictions.columns) == 1
def test_predictions_are_not_appended_to_input_df(
self, bn, train_data_discrete, test_data_c_discrete
):
"""The predictions should not be appended to the input df"""
expected_cols = test_data_c_discrete.columns
bn.fit_cpds(train_data_discrete)
bn.predict(test_data_c_discrete, "c")
assert np.array_equal(test_data_c_discrete.columns, expected_cols)
def test_missing_parent(self, bn, train_data_discrete, test_data_c_discrete):
"""Predictions made when parents are missing should still be reasonably accurate"""
bn.fit_cpds(train_data_discrete)
predictions = bn.predict(test_data_c_discrete[["a", "b", "c", "d"]], "c")
n = len(test_data_c_discrete)
accuracy = (
1
- np.count_nonzero(
predictions.values.reshape(len(predictions.values))
- test_data_c_discrete["c"].values
)
/ n
)
assert accuracy > 0.9
def test_missing_non_parent(self, bn, train_data_discrete, test_data_c_discrete):
"""It should be possible to make predictions with non-parent nodes missing"""
bn.fit_cpds(train_data_discrete)
predictions = bn.predict(test_data_c_discrete[["b", "c", "d", "e"]], "c")
assert np.all(
predictions.values.reshape(len(predictions.values))
== test_data_c_discrete["c"].values
)
class TestPredictBayesianEstimator:
"""Test behaviour of predict using BE"""
def test_predictions_are_based_on_probabilities_dbeu(
self, bn, train_data_discrete, test_data_c_discrete
):
"""Predictions made using the model should be based on the probabilities that are in the model"""
bn.fit_cpds(
train_data_discrete,
method="BayesianEstimator",
bayes_prior="BDeu",
equivalent_sample_size=5,
)
predictions = bn.predict(test_data_c_discrete, "c")
assert np.all(
predictions.values.reshape(len(predictions.values))
== test_data_c_discrete["c"].values
)
def test_predictions_are_based_on_probabilities_k2(
self, bn, train_data_discrete, test_data_c_discrete
):
"""Predictions made using the model should be based on the probabilities that are in the model"""
bn.fit_cpds(
train_data_discrete,
method="BayesianEstimator",
bayes_prior="K2",
equivalent_sample_size=5,
)
predictions = bn.predict(test_data_c_discrete, "c")
assert np.all(
predictions.values.reshape(len(predictions.values))
== test_data_c_discrete["c"].values
)
class TestPredictProbabilityMaximumLikelihoodEstimator:
"""Test behaviour of predict_probability using MLE"""
def test_expected_probabilities_are_predicted(
self, bn, train_data_discrete, test_data_c_discrete, test_data_c_likelihood
):
"""Probabilities should return exactly correct on a hand computable scenario"""
bn.fit_cpds(train_data_discrete)
probability = bn.predict_probability(test_data_c_discrete, "c")
assert all(
np.isclose(
probability.values.flatten(), test_data_c_likelihood.values.flatten()
)
)
def test_missing_parent(
self, bn, train_data_discrete, test_data_c_discrete, test_data_c_likelihood
):
"""Probabilities made when parents are missing should still be reasonably accurate"""
bn.fit_cpds(train_data_discrete)
probability = bn.predict_probability(
test_data_c_discrete[["a", "b", "c", "d"]], "c"
)
n = len(probability.values.flatten())
accuracy = (
np.count_nonzero(
[
1 if math.isclose(a, b, abs_tol=0.15) else 0
for a, b in zip(
probability.values.flatten(),
test_data_c_likelihood.values.flatten(),
)
]
)
/ n
)
assert accuracy > 0.8
def test_missing_non_parent(
self, bn, train_data_discrete, test_data_c_discrete, test_data_c_likelihood
):
"""It should be possible to make predictions with non-parent nodes missing"""
bn.fit_cpds(train_data_discrete)
probability = bn.predict_probability(
test_data_c_discrete[["b", "c", "d", "e"]], "c"
)
assert all(
np.isclose(
probability.values.flatten(), test_data_c_likelihood.values.flatten()
)
)
class TestPredictProbabilityBayesianEstimator:
"""Test behaviour of predict_probability using BayesianEstimator"""
def test_expected_probabilities_are_predicted(
self, bn, train_data_discrete, test_data_c_discrete, test_data_c_likelihood
):
"""Probabilities should return exactly correct on a hand computable scenario"""
bn.fit_cpds(
train_data_discrete,
method="BayesianEstimator",
bayes_prior="BDeu",
equivalent_sample_size=1,
)
probability = bn.predict_probability(test_data_c_discrete, "c")
assert all(
np.isclose(
probability.values.flatten(),
test_data_c_likelihood.values.flatten(),
atol=0.1,
)
)
class TestFitNodesStatesAndCPDs:
"""Test behaviour of helper function"""
def test_behaves_same_as_separate_calls(self, train_data_idx, train_data_discrete):
bn1 = BayesianNetwork(from_pandas(train_data_idx, w_threshold=0.3))
bn2 = BayesianNetwork(from_pandas(train_data_idx, w_threshold=0.3))
bn1.fit_node_states(train_data_discrete).fit_cpds(train_data_discrete)
bn2.fit_node_states_and_cpds(train_data_discrete)
assert bn1.edges == bn2.edges
assert bn1.node_states == bn2.node_states
cpds1 = bn1.cpds
cpds2 = bn2.cpds
assert cpds1.keys() == cpds2.keys()
for k, df in cpds1.items():
assert df.equals(cpds2[k])
class TestLatentVariable:
@staticmethod
def mean_absolute_error(cpds_a, cpds_b):
"""Compute the absolute error among each single parameter and average them out"""
mae = 0
n_param = 0
for node in cpds_a.keys():
err = np.abs(cpds_a[node] - cpds_b[node]).values
mae += np.sum(err)
n_param += err.shape[0] * err.shape[1]
return mae / n_param
def test_em_algorithm(self): # pylint: disable=too-many-locals
"""
Test if `BayesianNetwork` works with EM algorithm.
We use a naive bayes + parents + an extra node not related to the latent variable.
"""
# p0 p1 p2
# \ | /
# z
# / | \
# c0 c1 c2
# |
# cc0
np.random.seed(22)
data, sm, _, true_lv_values = naive_bayes_plus_parents(
percentage_not_missing=0.1,
samples=1000,
p_z=0.7,
p_c=0.7,
)
data["cc_0"] = np.where(
np.random.random(len(data)) < 0.5, data["c_0"], (data["c_0"] + 1) % 3
)
data.drop(columns=["z"], inplace=True)
complete_data = data.copy(deep=True)
complete_data["z"] = true_lv_values
# Baseline model: the structure of the figure trained with complete data. We try to reproduce it
complete_bn = BayesianNetwork(
StructureModel(list(sm.edges) + [("c_0", "cc_0")])
)
complete_bn.fit_node_states_and_cpds(complete_data)
# BN without latent variable: All `p`s are connected to all `c`s + `c0` ->`cc0`
sm_no_lv = StructureModel(
[(f"p_{p}", f"c_{c}") for p in range(3) for c in range(3)]
+ [("c_0", "cc_0")]
)
bn = BayesianNetwork(sm_no_lv)
bn.fit_node_states(data)
bn.fit_cpds(data)
# TEST 1: cc_0 does not depend on the latent variable so:
assert np.all(bn.cpds["cc_0"] == complete_bn.cpds["cc_0"])
# BN with latent variable
# When we add the latent variable, we add the edges in the image above
# and remove the connection among `p`s and `c`s
edges_to_add = list(sm.edges)
edges_to_remove = [(f"p_{p}", f"c_{c}") for p in range(3) for c in range(3)]
bn.add_node("z", edges_to_add, edges_to_remove)
bn.fit_latent_cpds("z", [0, 1, 2], data, stopping_delta=0.001)
# TEST 2: cc_0 CPD should remain untouched by the EM algorithm
assert np.all(bn.cpds["cc_0"] == complete_bn.cpds["cc_0"])
# TEST 3: We should recover the correct CPDs quite accurately
assert bn.cpds.keys() == complete_bn.cpds.keys()
assert self.mean_absolute_error(bn.cpds, complete_bn.cpds) < 0.01
# TEST 4: Inference over recovered CPDs should be also accurate
eng = InferenceEngine(bn)
query = eng.query()
n_rows = complete_data.shape[0]
for node in query:
assert (
np.abs(query[node][0] - sum(complete_data[node] == 0) / n_rows) < 1e-2
)
assert (
np.abs(query[node][1] - sum(complete_data[node] == 1) / n_rows) < 1e-2
)
# TEST 5: Inference using predict and predict_probability functions
report = classification_report(bn, complete_data, "z")
_, auc = roc_auc(bn, complete_data, "z")
complete_report = classification_report(complete_bn, complete_data, "z")
_, complete_auc = roc_auc(complete_bn, complete_data, "z")
for category, metrics in report.items():
if isinstance(metrics, dict):
for key, val in metrics.items():
assert np.abs(val - complete_report[category][key]) < 1e-2
else:
assert np.abs(metrics - complete_report[category]) < 1e-2
assert np.abs(auc - complete_auc) < 1e-2
class TestAddNode:
def test_add_node_not_in_edges_to_add(self):
"""An error should be raised if the latent variable is NOT part of the edges to add"""
with pytest.raises(
ValueError,
match="Should only add edges containing node 'd'",
):
_, sm, _, _ = naive_bayes_plus_parents()
sm = StructureModel(list(sm.edges))
bn = BayesianNetwork(sm)
bn.add_node("d", [("a", "z"), ("b", "z")], [])
def test_add_node_in_edges_to_remove(self):
"""An error should be raised if the latent variable is part of the edges to remove"""
with pytest.raises(
ValueError,
match="Should only remove edges NOT containing node 'd'",
):
_, sm, _, _ = naive_bayes_plus_parents()
sm = StructureModel(list(sm.edges))
bn = BayesianNetwork(sm)
bn.add_node("d", [], [("a", "d"), ("b", "d")])
class TestFitLatentCPDs:
@pytest.mark.parametrize("lv_name", [None, [], set(), {}, tuple(), 123, {}])
def test_fit_invalid_lv_name(self, lv_name):
"""An error should be raised if the latent variable is of an invalid type"""
with pytest.raises(
ValueError,
match=r"Invalid latent variable name *",
):
df, sm, _, _ = naive_bayes_plus_parents()
sm = StructureModel(list(sm.edges))
bn = BayesianNetwork(sm)
bn.fit_latent_cpds(lv_name, [0, 1, 2], df)
def test_fit_lv_not_added(self):
"""An error should be raised if the latent variable is not added to the network yet"""
with pytest.raises(
ValueError,
match=r"Latent variable 'd' not added to the network",
):
df, sm, _, _ = naive_bayes_plus_parents()
sm = StructureModel(list(sm.edges))
bn = BayesianNetwork(sm)
bn.fit_latent_cpds("d", [0, 1, 2], df)
@pytest.mark.parametrize("lv_states", [None, [], set(), {}])
def test_fit_invalid_lv_states(self, lv_states):
"""An error should be raised if the latent variable has invalid states"""
with pytest.raises(
ValueError,
match="Latent variable 'd' contains no states",
):
df, sm, _, _ = naive_bayes_plus_parents()
sm = StructureModel(list(sm.edges))
bn = BayesianNetwork(sm)
bn.add_node("d", [("z", "d")], [])
bn.fit_latent_cpds("d", lv_states, df)
class TestSetCPD:
"""Test behaviour of adding a self-defined cpd"""
def test_set_cpd(self, bn, good_cpd):
"""The CPD of the target node should be the same as the self-defined table after adding"""
bn.set_cpd("b", good_cpd)
assert bn.cpds["b"].values.tolist() == good_cpd.values.tolist()
def test_set_other_cpd(self, bn, good_cpd):
"""The CPD of nodes other than the target node should not be affected"""
cpd = bn.cpds["a"].values.tolist()
bn.set_cpd("b", good_cpd)
cpd_after_adding = bn.cpds["a"].values.tolist()
assert all(
val == val_after_adding
for val, val_after_adding in zip(*(cpd, cpd_after_adding))
)
def test_set_cpd_to_non_existent_node(self, bn, good_cpd):
"""Should raise error if adding a cpd to a non-existing node in Bayesian Network"""
with pytest.raises(
ValueError,
match=r'Non-existing node "test"',
):
bn.set_cpd("test", good_cpd)
def test_set_bad_cpd(self, bn, bad_cpd):
"""Should raise error if it the prpbability values do not sum up to 1 in the table"""
with pytest.raises(
ValueError,
match=r"Sum or integral of conditional probabilites for node b is not equal to 1.",
):
bn.set_cpd("b", bad_cpd)
def test_no_overwritten_after_setting_bad_cpd(self, bn, bad_cpd):
"""The cpd of bn won't be overwritten if adding a bad cpd"""
original_cpd = bn.cpds["b"].values.tolist()
try:
bn.set_cpd("b", bad_cpd)
except ValueError:
assert bn.cpds["b"].values.tolist() == original_cpd
def test_bad_node_index(self, bn, good_cpd):
"""Should raise an error when setting bad node index"""
bad_cpd = good_cpd
bad_cpd.index.name = "test"
with pytest.raises(
IndexError,
match=r"Wrong index values. Please check your indices",
):
bn.set_cpd("b", bad_cpd)
def test_bad_node_states_index(self, bn, good_cpd):
"""Should raise an error when setting bad node states index"""
bad_cpd = good_cpd.reindex([1, 2, 3])
with pytest.raises(
IndexError,
match=r"Wrong index values. Please check your indices",
):
bn.set_cpd("b", bad_cpd)
def test_bad_parent_node_index(self, bn, good_cpd):
"""Should raise an error when setting bad parent node index"""
bad_cpd = good_cpd
bad_cpd.columns = bad_cpd.columns.rename("test", level=1)
with pytest.raises(
IndexError,
match=r"Wrong index values. Please check your indices",
):
bn.set_cpd("b", bad_cpd)
def test_bad_parent_node_states_index(self, bn, good_cpd):
"""Should raise an error when setting bad parent node states index"""
bad_cpd = good_cpd
bad_cpd.columns.set_levels(["test1", "test2"], level=0, inplace=True)
with pytest.raises(
IndexError,
match=r"Wrong index values. Please check your indices",
):
bn.set_cpd("b", bad_cpd)
class TestCPDsProperty:
"""Test behaviour of the CPDs property"""
def test_row_index_of_state_values(self, bn):
"""CPDs should have row index set to values of all possible states of the node"""
assert bn.cpds["a"].index.tolist() == sorted(list(bn.node_states["a"]))
def test_col_index_of_parent_state_combinations(self, bn):
"""CPDs should have a column multi-index of parent state permutations"""
assert bn.cpds["a"].columns.names == ["b", "d"]
class TestInit:
"""Test behaviour when constructing a BayesianNetwork"""
def test_cycles_in_structure(self):
"""An error should be raised if cycles are present"""
with pytest.raises(
ValueError,
match=r"The given structure is not acyclic\. "
r"Please review the following cycle\.*",
):
BayesianNetwork(StructureModel([(0, 1), (1, 2), (2, 0)]))
@pytest.mark.parametrize(
"test_input,n_components",
[([(0, 1), (1, 2), (3, 4), (4, 6)], 2), ([(0, 1), (1, 2), (3, 4), (5, 6)], 3)],
)
def test_disconnected_components(self, test_input, n_components):
"""An error should be raised if there is more than one graph component"""
with pytest.raises(
ValueError,
match=r"The given structure has "
+ str(n_components)
+ r" separated graph components\. "
r"Please make sure it has only one\.",
):
BayesianNetwork(StructureModel(test_input))
class TestStructure:
"""Test behaviour of the property structure"""
def test_get_structure(self):
"""The structure retrieved should be the same"""
sm = StructureModel()
sm.add_weighted_edges_from([(1, 2, 2.0)], origin="unknown")
sm.add_weighted_edges_from([(1, 3, 1.0)], origin="learned")
sm.add_weighted_edges_from([(3, 5, 0.7)], origin="expert")
bn = BayesianNetwork(sm)
sm_from_bn = bn.structure
assert set(sm.edges.data("origin")) == set(sm_from_bn.edges.data("origin"))
assert set(sm.edges.data("weight")) == set(sm_from_bn.edges.data("weight"))
assert set(sm.nodes) == set(sm_from_bn.nodes)
def test_set_structure(self):
"""An error should be raised if setting the structure"""
sm = StructureModel()
sm.add_weighted_edges_from([(1, 2, 2.0)], origin="unknown")
sm.add_weighted_edges_from([(1, 3, 1.0)], origin="learned")
sm.add_weighted_edges_from([(3, 5, 0.7)], origin="expert")
bn = BayesianNetwork(sm)
new_sm = StructureModel()
sm.add_weighted_edges_from([(2, 5, 3.0)], origin="unknown")
sm.add_weighted_edges_from([(2, 3, 2.0)], origin="learned")
sm.add_weighted_edges_from([(3, 4, 1.7)], origin="expert")
with pytest.raises(AttributeError, match=r"can't set attribute"):
bn.structure = new_sm
class TestMarkovBlanket:
"""Test behavior of Markov Blanket"""
def test_elements(self, bn_train_model):
"""Check if all elements are included"""
blanket = get_markov_blanket(bn_train_model, "a")
parents_of_node = {"b", "d"}
children_of_node = {"f"}
parents_of_children = {"e"}
assert parents_of_node <= set(blanket.nodes)
assert children_of_node <= set(blanket.nodes)
assert parents_of_children <= set(blanket.nodes)
def test_connection(self, bn_train_model):
"""Check if edges are correct"""
blanket = get_markov_blanket(bn_train_model, "a")
assert blanket.structure.has_edge("b", "a")
assert blanket.structure.has_edge("d", "a")
assert blanket.structure.has_edge("a", "f")
assert blanket.structure.has_edge("e", "f")
assert blanket.structure.has_edge("e", "b")
def test_invalid_node(self, bn_train_model):
with pytest.raises(
KeyError,
match="is not found in the network",
):
get_markov_blanket(bn_train_model, "invalid")
| [
"pandas.DataFrame",
"causalnex.evaluation.roc_auc",
"causalnex.structure.notears.from_pandas",
"numpy.random.seed",
"numpy.sum",
"numpy.abs",
"causalnex.inference.InferenceEngine",
"numpy.all",
"causalnex.utils.network_utils.get_markov_blanket",
"pytest.raises",
"causalnex.evaluation.classificat... | [((29766, 29901), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input,n_components"""', '[([(0, 1), (1, 2), (3, 4), (4, 6)], 2), ([(0, 1), (1, 2), (3, 4), (5, 6)], 3)]'], {}), "('test_input,n_components', [([(0, 1), (1, 2), (3, 4\n ), (4, 6)], 2), ([(0, 1), (1, 2), (3, 4), (5, 6)], 3)])\n", (29789, 29901), False, 'import pytest\n'), ((2468, 2484), 'causalnex.structure.StructureModel', 'StructureModel', ([], {}), '()\n', (2482, 2484), False, 'from causalnex.structure import StructureModel\n'), ((2774, 2790), 'causalnex.structure.StructureModel', 'StructureModel', ([], {}), '()\n', (2788, 2790), False, 'from causalnex.structure import StructureModel\n'), ((3181, 3197), 'causalnex.structure.StructureModel', 'StructureModel', ([], {}), '()\n', (3195, 3197), False, 'from causalnex.structure import StructureModel\n'), ((3607, 3623), 'causalnex.structure.StructureModel', 'StructureModel', ([], {}), '()\n', (3621, 3623), False, 'from causalnex.structure import StructureModel\n'), ((6616, 6656), 'causalnex.structure.StructureModel', 'StructureModel', (["[('a', 'b'), ('c', 'b')]"], {}), "([('a', 'b'), ('c', 'b')])\n", (6630, 6656), False, 'from causalnex.structure import StructureModel\n'), ((6670, 6689), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (6685, 6689), False, 'from causalnex.network import BayesianNetwork\n'), ((6707, 6784), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0, 0, 1], [1, 0, 1], [1, 1, 1]]', 'columns': "['a', 'b', 'c']"}), "(data=[[0, 0, 1], [1, 0, 1], [1, 1, 1]], columns=['a', 'b', 'c'])\n", (6719, 6784), True, 'import pandas as pd\n'), ((6822, 6899), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0, 0, 1], [1, 0, 1], [1, 1, 2]]', 'columns': "['a', 'b', 'c']"}), "(data=[[0, 0, 1], [1, 0, 1], [1, 1, 2]], columns=['a', 'b', 'c'])\n", (6834, 6899), True, 'import pandas as pd\n'), ((6937, 6961), 'pandas.concat', 'pd.concat', (['[train, test]'], {}), '([train, test])\n', (6946, 6961), True, 'import pandas as pd\n'), ((11122, 11162), 'causalnex.structure.StructureModel', 'StructureModel', (["[('a', 'b'), ('c', 'b')]"], {}), "([('a', 'b'), ('c', 'b')])\n", (11136, 11162), False, 'from causalnex.structure import StructureModel\n'), ((11176, 11195), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (11191, 11195), False, 'from causalnex.network import BayesianNetwork\n'), ((11213, 11290), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0, 0, 1], [1, 0, 1], [1, 1, 1]]', 'columns': "['a', 'b', 'c']"}), "(data=[[0, 0, 1], [1, 0, 1], [1, 1, 1]], columns=['a', 'b', 'c'])\n", (11225, 11290), True, 'import pandas as pd\n'), ((11328, 11405), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0, 0, 1], [1, 0, 1], [1, 1, 2]]', 'columns': "['a', 'b', 'c']"}), "(data=[[0, 0, 1], [1, 0, 1], [1, 1, 2]], columns=['a', 'b', 'c'])\n", (11340, 11405), True, 'import pandas as pd\n'), ((11443, 11467), 'pandas.concat', 'pd.concat', (['[train, test]'], {}), '([train, test])\n', (11452, 11467), True, 'import pandas as pd\n'), ((13300, 13359), 'numpy.array_equal', 'np.array_equal', (['test_data_c_discrete.columns', 'expected_cols'], {}), '(test_data_c_discrete.columns, expected_cols)\n', (13314, 13359), True, 'import numpy as np\n'), ((19982, 20000), 'numpy.random.seed', 'np.random.seed', (['(22)'], {}), '(22)\n', (19996, 20000), True, 'import numpy as np\n'), ((20974, 20999), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm_no_lv'], {}), '(sm_no_lv)\n', (20989, 20999), False, 'from causalnex.network import BayesianNetwork\n'), ((21141, 21192), 'numpy.all', 'np.all', (["(bn.cpds['cc_0'] == complete_bn.cpds['cc_0'])"], {}), "(bn.cpds['cc_0'] == complete_bn.cpds['cc_0'])\n", (21147, 21192), True, 'import numpy as np\n'), ((21700, 21751), 'numpy.all', 'np.all', (["(bn.cpds['cc_0'] == complete_bn.cpds['cc_0'])"], {}), "(bn.cpds['cc_0'] == complete_bn.cpds['cc_0'])\n", (21706, 21751), True, 'import numpy as np\n'), ((22041, 22060), 'causalnex.inference.InferenceEngine', 'InferenceEngine', (['bn'], {}), '(bn)\n', (22056, 22060), False, 'from causalnex.inference import InferenceEngine\n'), ((22495, 22540), 'causalnex.evaluation.classification_report', 'classification_report', (['bn', 'complete_data', '"""z"""'], {}), "(bn, complete_data, 'z')\n", (22516, 22540), False, 'from causalnex.evaluation import classification_report, roc_auc\n'), ((22558, 22589), 'causalnex.evaluation.roc_auc', 'roc_auc', (['bn', 'complete_data', '"""z"""'], {}), "(bn, complete_data, 'z')\n", (22565, 22589), False, 'from causalnex.evaluation import classification_report, roc_auc\n'), ((22616, 22670), 'causalnex.evaluation.classification_report', 'classification_report', (['complete_bn', 'complete_data', '"""z"""'], {}), "(complete_bn, complete_data, 'z')\n", (22637, 22670), False, 'from causalnex.evaluation import classification_report, roc_auc\n'), ((22697, 22737), 'causalnex.evaluation.roc_auc', 'roc_auc', (['complete_bn', 'complete_data', '"""z"""'], {}), "(complete_bn, complete_data, 'z')\n", (22704, 22737), False, 'from causalnex.evaluation import classification_report, roc_auc\n'), ((30549, 30565), 'causalnex.structure.StructureModel', 'StructureModel', ([], {}), '()\n', (30563, 30565), False, 'from causalnex.structure import StructureModel\n'), ((30783, 30802), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (30798, 30802), False, 'from causalnex.network import BayesianNetwork\n'), ((31175, 31191), 'causalnex.structure.StructureModel', 'StructureModel', ([], {}), '()\n', (31189, 31191), False, 'from causalnex.structure import StructureModel\n'), ((31409, 31428), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (31424, 31428), False, 'from causalnex.network import BayesianNetwork\n'), ((31447, 31463), 'causalnex.structure.StructureModel', 'StructureModel', ([], {}), '()\n', (31461, 31463), False, 'from causalnex.structure import StructureModel\n'), ((31959, 31998), 'causalnex.utils.network_utils.get_markov_blanket', 'get_markov_blanket', (['bn_train_model', '"""a"""'], {}), "(bn_train_model, 'a')\n", (31977, 31998), False, 'from causalnex.utils.network_utils import get_markov_blanket\n'), ((32378, 32417), 'causalnex.utils.network_utils.get_markov_blanket', 'get_markov_blanket', (['bn_train_model', '"""a"""'], {}), "(bn_train_model, 'a')\n", (32396, 32417), False, 'from causalnex.utils.network_utils import get_markov_blanket\n'), ((3263, 3327), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""node \'.*\' contains None state"""'}), '(ValueError, match="node \'.*\' contains None state")\n', (3276, 3327), False, 'import pytest\n'), ((3690, 3847), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""The data does not cover all the features found in the Bayesian Network. Please check the following features: {\'e\'}"""'}), '(KeyError, match=\n "The data does not cover all the features found in the Bayesian Network. Please check the following features: {\'e\'}"\n )\n', (3703, 3847), False, 'import pytest\n'), ((4249, 4305), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unrecognised method.*"""'}), "(ValueError, match='unrecognised method.*')\n", (4262, 4305), False, 'import pytest\n'), ((4522, 4583), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unrecognised bayes_prior.*"""'}), "(ValueError, match='unrecognised bayes_prior.*')\n", (4535, 4583), False, 'import pytest\n'), ((18656, 18700), 'causalnex.structure.notears.from_pandas', 'from_pandas', (['train_data_idx'], {'w_threshold': '(0.3)'}), '(train_data_idx, w_threshold=0.3)\n', (18667, 18700), False, 'from causalnex.structure.notears import from_pandas\n'), ((18732, 18776), 'causalnex.structure.notears.from_pandas', 'from_pandas', (['train_data_idx'], {'w_threshold': '(0.3)'}), '(train_data_idx, w_threshold=0.3)\n', (18743, 18776), False, 'from causalnex.structure.notears import from_pandas\n'), ((19511, 19522), 'numpy.sum', 'np.sum', (['err'], {}), '(err)\n', (19517, 19522), True, 'import numpy as np\n'), ((23066, 23092), 'numpy.abs', 'np.abs', (['(auc - complete_auc)'], {}), '(auc - complete_auc)\n', (23072, 23092), True, 'import numpy as np\n'), ((23279, 23355), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Should only add edges containing node \'d\'"""'}), '(ValueError, match="Should only add edges containing node \'d\'")\n', (23292, 23355), False, 'import pytest\n'), ((23510, 23529), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (23525, 23529), False, 'from causalnex.network import BayesianNetwork\n'), ((23746, 23834), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Should only remove edges NOT containing node \'d\'"""'}), '(ValueError, match=\n "Should only remove edges NOT containing node \'d\'")\n', (23759, 23834), False, 'import pytest\n'), ((23984, 24003), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (23999, 24003), False, 'from causalnex.network import BayesianNetwork\n'), ((24319, 24384), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid latent variable name *"""'}), "(ValueError, match='Invalid latent variable name *')\n", (24332, 24384), False, 'import pytest\n'), ((24541, 24560), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (24556, 24560), False, 'from causalnex.network import BayesianNetwork\n'), ((24763, 24842), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Latent variable \'d\' not added to the network"""'}), '(ValueError, match="Latent variable \'d\' not added to the network")\n', (24776, 24842), False, 'import pytest\n'), ((24999, 25018), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (25014, 25018), False, 'from causalnex.network import BayesianNetwork\n'), ((25285, 25358), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Latent variable \'d\' contains no states"""'}), '(ValueError, match="Latent variable \'d\' contains no states")\n', (25298, 25358), False, 'import pytest\n'), ((25514, 25533), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['sm'], {}), '(sm)\n', (25529, 25533), False, 'from causalnex.network import BayesianNetwork\n'), ((26527, 26586), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Non-existing node "test\\""""'}), '(ValueError, match=\'Non-existing node "test"\')\n', (26540, 26586), False, 'import pytest\n'), ((26819, 26937), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Sum or integral of conditional probabilites for node b is not equal to 1."""'}), "(ValueError, match=\n 'Sum or integral of conditional probabilites for node b is not equal to 1.'\n )\n", (26832, 26937), False, 'import pytest\n'), ((27529, 27614), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Wrong index values. Please check your indices"""'}), "(IndexError, match='Wrong index values. Please check your indices'\n )\n", (27542, 27614), False, 'import pytest\n'), ((27873, 27958), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Wrong index values. Please check your indices"""'}), "(IndexError, match='Wrong index values. Please check your indices'\n )\n", (27886, 27958), False, 'import pytest\n'), ((28264, 28349), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Wrong index values. Please check your indices"""'}), "(IndexError, match='Wrong index values. Please check your indices'\n )\n", (28277, 28349), False, 'import pytest\n'), ((28681, 28766), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Wrong index values. Please check your indices"""'}), "(IndexError, match='Wrong index values. Please check your indices'\n )\n", (28694, 28766), False, 'import pytest\n'), ((29528, 29648), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""The given structure is not acyclic\\\\. Please review the following cycle\\\\.*"""'}), "(ValueError, match=\n 'The given structure is not acyclic\\\\. Please review the following cycle\\\\.*'\n )\n", (29541, 29648), False, 'import pytest\n'), ((31681, 31739), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""can\'t set attribute"""'}), '(AttributeError, match="can\'t set attribute")\n', (31694, 31739), False, 'import pytest\n'), ((32741, 32801), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""is not found in the network"""'}), "(KeyError, match='is not found in the network')\n", (32754, 32801), False, 'import pytest\n'), ((32850, 32895), 'causalnex.utils.network_utils.get_markov_blanket', 'get_markov_blanket', (['bn_train_model', '"""invalid"""'], {}), "(bn_train_model, 'invalid')\n", (32868, 32895), False, 'from causalnex.utils.network_utils import get_markov_blanket\n'), ((2549, 2568), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['cg'], {}), '(cg)\n', (2564, 2568), False, 'from causalnex.network import BayesianNetwork\n'), ((2055, 2097), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1]]'], {'columns': "['a', 'b']"}), "([[1, 1]], columns=['a', 'b'])\n", (2067, 2097), True, 'import pandas as pd\n'), ((2163, 2221), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1, 1, 1]]'], {'columns': "['a', 'b', 'c', 'd']"}), "([[1, 1, 1, 1]], columns=['a', 'b', 'c', 'd'])\n", (2175, 2221), True, 'import pandas as pd\n'), ((2856, 2875), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['cg'], {}), '(cg)\n', (2871, 2875), False, 'from causalnex.network import BayesianNetwork\n'), ((3394, 3439), 'pandas.DataFrame', 'pd.DataFrame', (['[[None, 1]]'], {'columns': "['a', 'b']"}), "([[None, 1]], columns=['a', 'b'])\n", (3406, 3439), True, 'import pandas as pd\n'), ((3954, 4012), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1, 1, 1]]'], {'columns': "['a', 'b', 'c', 'd']"}), "([[1, 1, 1, 1]], columns=['a', 'b', 'c', 'd'])\n", (3966, 4012), True, 'import pandas as pd\n'), ((19449, 19484), 'numpy.abs', 'np.abs', (['(cpds_a[node] - cpds_b[node])'], {}), '(cpds_a[node] - cpds_b[node])\n', (19455, 19484), True, 'import numpy as np\n'), ((29718, 29758), 'causalnex.structure.StructureModel', 'StructureModel', (['[(0, 1), (1, 2), (2, 0)]'], {}), '([(0, 1), (1, 2), (2, 0)])\n', (29732, 29758), False, 'from causalnex.structure import StructureModel\n'), ((30341, 30367), 'causalnex.structure.StructureModel', 'StructureModel', (['test_input'], {}), '(test_input)\n', (30355, 30367), False, 'from causalnex.structure import StructureModel\n'), ((3341, 3360), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['cg'], {}), '(cg)\n', (3356, 3360), False, 'from causalnex.network import BayesianNetwork\n'), ((3901, 3920), 'causalnex.network.BayesianNetwork', 'BayesianNetwork', (['cg'], {}), '(cg)\n', (3916, 3920), False, 'from causalnex.network import BayesianNetwork\n'), ((22999, 23042), 'numpy.abs', 'np.abs', (['(metrics - complete_report[category])'], {}), '(metrics - complete_report[category])\n', (23005, 23042), True, 'import numpy as np\n'), ((16832, 16864), 'math.isclose', 'math.isclose', (['a', 'b'], {'abs_tol': '(0.15)'}), '(a, b, abs_tol=0.15)\n', (16844, 16864), False, 'import math\n'), ((22906, 22950), 'numpy.abs', 'np.abs', (['(val - complete_report[category][key])'], {}), '(val - complete_report[category][key])\n', (22912, 22950), True, 'import numpy as np\n')] |
from gurobipy import *
import math
import numpy as np
import xlrd #excel
import sys
#quatratic
import datetime
from random import sample
from sympy import *
from sklearn.cluster import KMeans
from sklearn.externals import joblib
from sklearn import cluster
import time
import matplotlib.pyplot as plt
import warnings
from sklearn import linear_model
warnings.filterwarnings("ignore")
from numpy import *
import datetime
readfile1=r"...\data\data.xlsx"
book1 = xlrd.open_workbook(readfile1)
sh1= book1.sheet_by_name("Sheet2")
book2 = xlrd.open_workbook(readfile1)
sh2= book2.sheet_by_name("testing")
N=500
K=1
D=1
#TN=10
#A=TN-N
A=500
TW=1
TS=9
y=[]
x=[]
vay=[]
vax=[]
testy=[[0]*A for i in range(TS)]
testx=[[0]*A for i in range(TS)]
number=0
while number<=N-1:
y.append(sh1.cell_value(number, D))
dx=[]
for j in range(D):
dx.append(sh1.cell_value(number, j))
x.append(dx)
number=number+1
for i in range(TS):
number=0
while number<=A-1:
testy[i][number]=sh2.cell_value(number, D+i*(D+1))
dx=[]
for j in range(D):
dx.append(sh2.cell_value(number, j+i*(D+1)))
testx[i][number]=dx
number=number+1
#totaltime1=[0]*5
#totaltime2=[0]*5
MM=sys.float_info.max
para=0.01
tolerance=0.01
gamma=0.001
absdimax = []
dimax = []
dimin = []
extrax= []
MM1 = math.sqrt(sum(y[i]**2 for i in range(N))/para)
for j in range(D):
for i in range(N):
extrax.append(x[i][j])
abslist=map(abs, extrax)
absdimax.append(max(abslist))
dimax.append(max(extrax))
dimin.append(min(extrax))
extrax=[]
filenameresultAP2=r"C:\Users\...\result(AP2 cluster).txt"
filenameCVAP2=r"C:\Users\...\CV(AP2 cluster).txt"
filenametimeAP2=r"C:\Users\...\time(AP2 cluster).txt"
filenameCVprint=r"C:\Users\...\CV loss(cluster).txt"
filenameresultprint=r"C:\Users\...\result(excel)(cluster).txt"
def optimizeothers(sigma,weight,knn):
x1=[[0]*D for k in range(knn)]
x2=[]
#x4=[]
objective=0
m=Model('optimizeothers')
beta = m.addVars(knn, D+1,lb=-MM, vtype=GRB.CONTINUOUS, name="beta")
m.update()
m.setObjective(quicksum(sigma[i][k]*(y[i]-sum(beta[k,j]*x[i][j] for j in range(D))-beta[k,D])*(y[i]-sum(beta[k,j]*x[i][j] for j in range(D))-beta[k,D]) for i in range(N) for k in range(knn)), GRB.MINIMIZE)
m.optimize()
status = m.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
if status == GRB.Status.OPTIMAL:
print('The optimal objective is %g' % m.objVal)
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
m.write('clustering.lp')
print('')
if m.status == GRB.Status.OPTIMAL:
objective=m.objVal
for k in range(knn):
for j in range(D):
x1[k][j]=sum(sigma[i][k]*x[i][j] for i in range(N))/sum(sigma[i][k] for i in range(N))
# print ('m')
# for k in range(knn):
# temp2=[]
# for j in range(D):
# temp2.append(ce[k,j].x)
# #print ('%d th feature of cluster %d is %.4f' % (j+1,k+1,ce[k,j].x))
# x1.append(temp2)
# #print (ce[k,j])
print ('beta')
for k in range(knn):
temp3=[]
for j in range(D+1):
temp3.append(beta[k,j].x)
#print ('%d th regression of cluster %d is %.4f' % (j+1,k+1,beta[k,j].x))
x2.append(temp3)
return x1,x2
#def optimizeothers2(sigma,initialm,initialbeta,weight,knn):
# x1=[]
# x2=[]
# #print('z',z)
# #print('s',s)
# #print('sigma',sigma)
# #x4=[]
# objective=0
# objective2=0
# m=Model('optimizeothers')
# beta = m.addVars(knn, D+1,lb=-MM, vtype=GRB.CONTINUOUS, name="beta")
# #w = m.addVars(N, K, D, lb=0.0, vtype=GRB.CONTINUOUS, name="w")
# ce = m.addVars(knn, D, lb=-MM,vtype=GRB.CONTINUOUS, name="ce")
# temp1= m.addVars(knn, D+1, lb=0,vtype=GRB.CONTINUOUS, name="temp1")
# temp2= m.addVars(knn, D, lb=0,vtype=GRB.CONTINUOUS, name="temp2")
# #L = m.addVars(N, K, D,lb=-MM, ub=MM,vtype=GRB.CONTINUOUS, name='L')
# m.update()
#
#
# m.setObjective(quicksum(sigma[i][k]*(y[i]-sum(beta[k,j]*x[i][j] for j in range(D))-beta[k,D])*(y[i]-sum(beta[k,j]*x[i][j] for j in range(D))-beta[k,D]) for i in range(N) for k in range(knn))\
# +para*quicksum(beta[k,j]*beta[k,j] for j in range(D+1) for k in range(knn))\
# +weight*quicksum(sigma[i][k]*sum((x[i][j]-ce[k,j])*(x[i][j]-ce[k,j]) for j in range(D)) for i in range(N) for k in range(knn))\
# +gamma*(quicksum(temp1[k,j] for k in range(knn) for j in range(D+1))+quicksum(temp2[k,j] for k in range(knn) for j in range(D))), GRB.MINIMIZE)
# #+gamma*quicksum(sum((beta[k,j]-initialbeta[k][j])*(beta[k,j]-initialbeta[k][j]) for j in range (D+1))+ sum((ce[k,j]-initialm[k][j])*(ce[k,j]-initialm[k][j]) for j in range (D)) for k in range(knn)), GRB.MINIMIZE)
#
# m.addConstrs(
# (temp1[k,j]>=beta[k,j]-initialbeta[k][j] for k in range(knn) for j in range(D+1)),"c15")
#
# m.addConstrs(
# (temp1[k,j]>=initialbeta[k][j]-beta[k,j] for k in range(knn) for j in range(D+1)),"c15")
#
# m.addConstrs(
# (temp2[k,j]>=ce[k,j]-initialm[k][j] for j in range (D) for k in range(knn)),"c15")
#
# m.addConstrs(
# (temp2[k,j]>=initialm[k][j]-ce[k,j] for j in range (D) for k in range(knn)),"c15")
#
#
#
# m.optimize()
#
# status = m.status
# if status == GRB.Status.UNBOUNDED:
# print('The model cannot be solved because it is unbounded')
# #exit(0)
# if status == GRB.Status.OPTIMAL:
# print('The optimal objective is %g' % m.objVal)
# #exit(0)
# if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
# print('Optimization was stopped with status %d' % status)
# #exit(0)
#
# m.write('clustering.lp')
#
#
# if m.status == GRB.Status.OPTIMAL:
# objective=m.objVal
# print(' optimal objective1 is %g\n' % objective)
#
# print ('ce')
# for k in range(knn):
# temp1=[]
# for j in range(D):
# temp1.append(ce[k,j].x)
# print ('%d th center of cluster %d is %.4f' % (j+1,k+1,ce[k,j].x))
# x1.append(temp1)
#
# print ('beta')
# for k in range(knn):
# temp3=[]
# for j in range(D+1):
# temp3.append(beta[k,j].x)
# print ('%d th regression of cluster %d is %.4f' % (j+1,k+1,beta[k,j].x))
# x2.append(temp3)
#
# objective2=objective-gamma*sum(sum((x2[k][j]-initialbeta[k][j])*(x2[k][j]-initialbeta[k][j]) for j in range (D+1)) + sum((x1[k][j]-initialm[k][j])*(x1[k][j]-initialm[k][j]) for j in range (D)) for k in range(knn))
# return x1, x2, objective, objective2
##function 2 fix other variables and calculate sigma
#
def assignment(ce,beta,weight,var,x0,y0):
totalobj=math.log(var)+pow(y0-sum(beta[j]*x0[j] for j in range(D))-beta[D],2)/(2*pow(var,2))+weight*L2Distance(x0, np.mat(ce))
return totalobj
def assignmentCLR(beta,x0,y0):
totalobj=pow(y0-sum(beta[j]*x0[j] for j in range(D))-beta[D],2)
return totalobj
def optimizesigmanew(ce,beta,weight,knn,variance,XX,YY):
sigma=[[0]*knn for i in range(A)]
distance=[[0]*knn for i in range(A)]
for i in range(A):
minDist = 100000.0
minIndex = 0
for k in range(knn):
distance[i][k] = assignment(ce[k],beta[k],weight,variance[k],XX[i],YY[i])
if distance[i][k] < minDist:
minDist = distance[i][k]
minIndex = k
sigma[i][minIndex]=1
return sigma
def optimizesigmanewCLR(beta,knn,XX,YY):
sigma=[[0]*knn for i in range(A)]
distance=[[0]*knn for i in range(A)]
for i in range(A):
minDist = 100000.0
minIndex = 0
for k in range(knn):
distance[i][k] = assignmentCLR(beta[k],XX[i],YY[i])
if distance[i][k] < minDist:
minDist = distance[i][k]
minIndex = k
sigma[i][minIndex]=1
return sigma
def optimizesigmaCLR(ce,beta,weight,knn):#update clustering
m=Model('optimizex')
sigma = m.addVars(N, knn, vtype=GRB.BINARY, name='sigma')
# temp3= m.addVars(N, knn, lb=0,vtype=GRB.CONTINUOUS, name="temp3")
x1=[]
objective=0
m.update()
m.setObjective(quicksum(sigma[i,k]*pow(y[i]-sum(beta[k][j]*x[i][j] for j in range(D))-beta[k][D],2) for i in range(N) for k in range(knn)), GRB.MINIMIZE)
m.addConstrs(
(quicksum(sigma[i,k] for k in range(knn)) == 1 for i in range(N)),"c1")
m.addConstrs(
(quicksum(sigma[i,k] for i in range(N)) >= 1 for k in range(knn)),"c15")
#m.Params.TimeLimit = 600
m.optimize()
status = m.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
#exit(0)
if status == GRB.Status.OPTIMAL:
print('The optimal objective is %g' % m.objVal)
#exit(0)
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
#exit(0)
m.write('optimizex.lp')
print('')
#if m.status == GRB.Status.OPTIMAL:
objective=m.objVal
print(' optimal objective2 is %g\n' % objective)
print ('sigma')
for i in range(N):
temp2=[]
for k in range(knn):
temp2.append(sigma[i,k].x)
x1.append(temp2)
#mipgap=m.MIPGap
return x1
def optimizesigma(ce,beta,weight,knn,initialsigma,variance):#update clustering
m=Model('optimizex')
sigma = m.addVars(N, knn, vtype=GRB.BINARY, name='sigma')
# temp3= m.addVars(N, knn, lb=0,vtype=GRB.CONTINUOUS, name="temp3")
x1=[]
objective=0
m.update()
m.setObjective(quicksum(math.log(variance[k])*quicksum(sigma[i,k] for i in range(N)) for k in range(knn))\
+quicksum(sigma[i,k]*pow(y[i]-sum(beta[k][j]*x[i][j] for j in range(D))-beta[k][D],2) for i in range(N) for k in range(knn))\
+weight*quicksum(sigma[i,k]*sum(pow(x[i][j]-ce[k][j],2) for j in range(D)) for i in range(N) for k in range(knn))\
+gamma*quicksum((1-initialsigma[i][k])*sigma[i,k]+initialsigma[i][k]*(1-sigma[i,k]) for k in range(knn) for i in range(N)), GRB.MINIMIZE)
m.addConstrs(
(quicksum(sigma[i,k] for k in range(knn)) == 1 for i in range(N)),"c1")
m.addConstrs(
(quicksum(sigma[i,k] for i in range(N)) >= 1 for k in range(knn)),"c15")
#m.Params.TimeLimit = 600
m.optimize()
status = m.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
#exit(0)
if status == GRB.Status.OPTIMAL:
print('The optimal objective is %g' % m.objVal)
#exit(0)
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
#exit(0)
m.write('optimizex.lp')
print('')
#if m.status == GRB.Status.OPTIMAL:
objective=m.objVal
print(' optimal objective2 is %g\n' % objective)
print ('sigma')
for i in range(N):
temp2=[]
for k in range(knn):
temp2.append(sigma[i,k].x)
x1.append(temp2)
#mipgap=m.MIPGap
return x1
def L1Distance(vector1, vector2): # L1 distance
t = sum(abs(vector2 - vector1))
return t
def L2Distance(vector1, vector2):
t=np.sum(np.square(vector1 - vector2))
return t
def initialassignment(dataSet, knn):
numSamples = dataSet.shape[0]
clusterAssment = mat(zeros((numSamples, 1)))
not_find = False
countt=[0]*knn
for i in range(N):
index = int(random.uniform(0, knn))
clusterAssment[i] = index
countt[index]=1
for j in range(knn):
if countt[j]<=0.5:
not_find=True
break;
return clusterAssment, not_find
dataSet1 = mat(x)
Time=[[0]*TW for k in range(K)]
minimumobj=[[10000]*TW for k in range(K)]
minimumerror=[[10000]*TW for k in range(K)]
recordcounttt=[[10000]*TW for k in range(K)]
recordtime=[[[0]*10 for i in range(TW)] for k in range(K)]
recordbeta=[[0]*(D+1) for k in range(4)]
recordce=[[0]*D for k in range(4)]
recordsigma=[[0]*(4) for i in range(N)]
for counttt in range(1):
groupx=[]
groupy=[]
groupnewy=[]
counting=[]
newy=[0]*(N)
recordloss1=[[0]*TW for k in range(K)]
recordloss2=[[0]*TW for k in range(K)]
File = open(filenameresultAP2, "a")
File.write('iteration:%d\n' % (counttt+1))
File.close()
#
# File = open(filenameCVAP2, "a")
# File.write('iteration:%d\n' % (counttt+1))
# File.close()
#
# File = open(filenametimeAP2, "a")
# File.write('iteration:%d\n' % (counttt+1))
# File.close()
#
# File = open(filenameCVprint, "a")
# File.write('iteration:%d\n' % (counttt+1))
# File.close()
#
# File = open(filenameresultprint, "a")
# File.write('iteration:%d\n' % (counttt+1))
# File.close()
for countk in range(K):#run the algorithm for Runtime times
knn=3
f1=True
while f1:
(clusterAssment ,f1) = initialassignment(dataSet1, knn+1)
for ww in range(TW):
weight=0.05
# temp_sigma1=[[0]*(knn+1) for i in range(N)]#1st dimension=parts of cv
#temp_sigma2=[[0]*(N) for i in range(N)]
temp_sigma2=[[0]*(knn+1) for i in range(N)]
for i in range(N):
temp_sigma2[i][int(clusterAssment[i])]=1
temp_variance=[0]*(knn+1)
start2 = datetime.datetime.now()
itr = 1
loss2=[]
# x_ce2=[]
# x_beta2=[]
loss2.append(MM)
actualobj=0
obj3=0
while 1:
# if itr<=1:
(temp_ce2,temp_beta2)=optimizeothers(temp_sigma2,weight,knn+1)
# else:
# (temp_ce2,temp_beta2,obj2,obj3)=optimizeothers2(temp_sigma2,x_ce2,x_beta2,weight,knn+1)
for k in range(knn+1):
temp_variance[k]=max(1,np.sqrt(sum(temp_sigma2[i][k]*pow(y[i]-sum(temp_beta2[k][j]*x[i][j] for j in range(D))-temp_beta2[k][D],2) for i in range(N))/sum(temp_sigma2[i][k] for i in range(N))))
obj2=sum(math.log(temp_variance[k])*sum(temp_sigma2[i][k] for i in range(N)) for k in range(knn+1))\
+sum(temp_sigma2[i][k]*pow(y[i]-sum(temp_beta2[k][j]*x[i][j] for j in range(D))-temp_beta2[k][D],2)/(2*pow(temp_variance[k],2)) for i in range(N) for k in range(knn+1))\
+weight*sum(temp_sigma2[i][k]*sum(pow(x[i][j]-temp_ce2[k][j],2) for j in range(D)) for k in range(knn+1) for i in range(N))+(N/2)*math.log(2*math.pi)
if (loss2[itr-1]-obj2)/obj2>=tolerance:
x_sigma2=temp_sigma2
loss2.append(obj2)
x_ce2=temp_ce2
x_beta2=temp_beta2
x_variance=temp_variance
(temp_sigma2)=optimizesigma(x_ce2,x_beta2,weight,knn+1,x_sigma2,x_variance)#obj given other variables
else:
break
itr=itr+1
end2 = datetime.datetime.now()
ftime= (end2 - start2).total_seconds()
perror=sum((y[i]-sum(x_sigma2[i][k]*(sum(x_beta2[k][j]*x[i][j] for j in range(D))+x_beta2[k][D]) for k in range(knn+1)))\
*(y[i]-sum(x_sigma2[i][k]*(sum(x_beta2[k][j]*x[i][j] for j in range(D))+x_beta2[k][D]) for k in range(knn+1))) for i in range(N)) #L1距离
recordtime[countk][ww][counttt]=ftime
if loss2[-1]<=minimumobj[countk][ww]:
minimumobj[countk][ww]=loss2[-1]
minimumerror[countk][ww]=perror/A
recordcounttt[countk][ww]=counttt
recordbeta=x_beta2
recordce=x_ce2
recordsigma=x_sigma2
recordvariance=x_variance
# for i in range(N):
# for k in range(knn+1):
# if x_sigma2[i][k]>=0.9:
# for j in range(D):
# newy[i]+=x[i][j]*x_beta2[k][j]
# newy[i]=newy[i]+x_beta2[k][D]
# #recordtime2[countk][ww]=end2-start2
# #totaltime2[counttt]+=end2-start2
# for k in range(knn+1):
# number=0
# for i in range(N):
# if x_sigma2[i][k]>=0.9:
# number+=1
# groupx.append(x[i][0])
# groupy.append(y[i])
# groupnewy.append(newy[i])
# counting.append(number)
# f1 = plt.figure(1)
# p1 = plt.scatter(groupx[:counting[0]], groupy[:counting[0]], marker = 'o', color='r', label='1', s = 15)
# p2 = plt.scatter(groupx[counting[0]:counting[0]+counting[1]], groupy[counting[0]:counting[0]+counting[1]], marker = 'o', color='#808080', label='2', s = 15)
# p3 = plt.scatter(groupx[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], groupy[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], marker = 'o', color='b', label='3', s = 15)
# p4 = plt.scatter(groupx[counting[0]+counting[1]+counting[2]:500], groupy[counting[0]+counting[1]+counting[2]:500], marker = 'o', color='#008000', label='4', s = 15)
# plt.legend(loc = 'upper right')
# plt.savefig(r'C:\Users\...\original'+str(weight)+'_'+str(counttt+1)+'.png')
# plt.show()
#
# f2 = plt.figure(2)
# p1 = plt.scatter(groupx[:counting[0]], groupnewy[:counting[0]], marker = 'o', color='r', label='1', s = 15)
# p2 = plt.scatter(groupx[counting[0]:counting[0]+counting[1]], groupnewy[counting[0]:counting[0]+counting[1]], marker = 'o', color='#808080', label='2', s = 15)
# p3 = plt.scatter(groupx[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], groupnewy[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], marker = 'o', color='b', label='3', s = 15)
# p4 = plt.scatter(groupx[counting[0]+counting[1]+counting[2]:500], groupnewy[counting[0]+counting[1]+counting[2]:500], marker = 'o', color='#008000', label='4', s = 15)
# plt.legend(loc = 'upper right')
# plt.savefig(r'C:\Users\...\predict'+str(weight)+'_'+str(counttt+1)+'.png')
# plt.show()
#
# File = open(filenameresultprint, "a")
# File.write('AP2: K=%d,weight=%f,total error=%s\n' % (knn+1,weight,loss2[-1]))
# File.write('AP2: K=%d,weight=%f,regression error=%s\n' % (knn+1,weight,perror))
# File.close()
#
#
# File = open(filenametimeAP2, "a")
# File.write('iteration:%d, computational time when k=%d,weight=%f: %f\n' % (counttt+1,knn+1,weight,ftime))
# File.close()
#
# File = open(filenameresultAP2, "a")
# File.write('AP2: K=%d,weight=%f\n' % (knn+1,weight))
# File.write('obj=%g\n'% loss2[-1])
# File.write('sigma\n')
# for i in range(N):
# for k in range(knn+1):
# if x_sigma2[i][k]>=0.9:
# File.write('cluster:%d contain: %d\n' % (k+1,i+1))
#
# File.write('m\n')
# for k in range(knn+1):
# for j in range(D):
# File.write('%d th center of cluster %d is %.4f\n' % (j+1,k+1,x_ce2[k][j]))
#
#
# File.write('beta\n')
# for k in range(knn+1):
# #if x_z2[t]>=0.9:
# for j in range(D+1):
# File.write('%d th regression of cluster %d is %.4f\n' % (j+1,k+1,x_beta2[k][j]))
#
# File.close()
File = open(filenameresultprint, "a")
for countk in range(K):
for ww in range(TW):
File.write('K=%d,weight=%f,minimum error=%s\n' % (knn+1,weight,minimumobj[countk][ww]))
File.write('K=%d,weight=%f,minimum regression error=%s\n' % (knn+1,weight,minimumerror[countk][ww]))
File.close()
for i in range(N):
for k in range(knn+1):
if recordsigma[i][k]>=0.9:
for j in range(D):
newy[i]+=x[i][j]*recordbeta[k][j]
newy[i]=newy[i]+recordbeta[k][D]
for k in range(knn+1):
number=0
for i in range(N):
if recordsigma[i][k]>=0.9:
number+=1
groupx.append(x[i][0])
groupy.append(y[i])
groupnewy.append(newy[i])
counting.append(number)
f1 = plt.figure(1)
#p1 = plt.scatter(groupx[:counting[0]], groupy[:counting[0]], marker = 'o', color='r', label='1', s = 15)
#p2 = plt.scatter(groupx[counting[0]:counting[0]+counting[1]], groupy[counting[0]:counting[0]+counting[1]], marker = 'o', color='#808080', label='2', s = 15)
#p3 = plt.scatter(groupx[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], groupy[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], marker = 'o', color='b', label='3', s = 15)
#p4 = plt.scatter(groupx[counting[0]+counting[1]+counting[2]:500], groupy[counting[0]+counting[1]+counting[2]:500], marker = 'o', color='#008000', label='4', s = 15)
p1 = plt.scatter(groupx[:counting[0]], groupy[:counting[0]], marker = 'o', color='r', label='Cluster 1', s = 15)
p2 = plt.scatter(groupx[counting[0]:counting[0]+counting[1]], groupy[counting[0]:counting[0]+counting[1]], marker = 'x', color='#808080', label='Cluster 2', s = 20)
p3 = plt.scatter(groupx[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], groupy[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], marker = '|', color='b', label='Cluster 3', s = 20)
p4 = plt.scatter(groupx[counting[0]+counting[1]+counting[2]:500], groupy[counting[0]+counting[1]+counting[2]:500], marker = '_', color='#008000', label='Cluster 4', s = 20)
plt.legend(loc = 'upper left')
plt.savefig(r'C:\Users\...\original'+'_'+str(weight)+'.png')
plt.show()
f2 = plt.figure(2)
#p1 = plt.scatter(groupx[:counting[0]], groupnewy[:counting[0]], marker = 'o', color='r', label='1', s = 15)
#p2 = plt.scatter(groupx[counting[0]:counting[0]+counting[1]], groupnewy[counting[0]:counting[0]+counting[1]], marker = 'o', color='#808080', label='2', s = 15)
#p3 = plt.scatter(groupx[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], groupnewy[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], marker = 'o', color='b', label='3', s = 15)
#p4 = plt.scatter(groupx[counting[0]+counting[1]+counting[2]:500], groupnewy[counting[0]+counting[1]+counting[2]:500], marker = 'o', color='#008000', label='4', s = 15)
p1 = plt.scatter(groupx[:counting[0]], groupnewy[:counting[0]], marker = 'o', color='r', label='Cluster 1', s = 15)
p2 = plt.scatter(groupx[counting[0]:counting[0]+counting[1]], groupnewy[counting[0]:counting[0]+counting[1]], marker = 'x', color='#808080', label='Cluster 2', s = 20)
p3 = plt.scatter(groupx[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], groupnewy[counting[0]+counting[1]:counting[0]+counting[1]+counting[2]], marker = '|', color='b', label='Cluster 3', s = 20)
p4 = plt.scatter(groupx[counting[0]+counting[1]+counting[2]:500], groupnewy[counting[0]+counting[1]+counting[2]:500], marker = '_', color='#008000', label='Cluster 4', s = 20)
plt.legend(loc = 'upper left')
plt.savefig(r'C:\Users\...\predict'+'_'+str(weight)+'.png')
plt.show()
for ts in range(TS):
vam = Model("validation")
perror2=0
vasigma=[]
assign=vam.addVars(A, knn+1, vtype=GRB.BINARY, name='assign')
vam.update()
vam.setObjective(sum(math.log(recordvariance[k])*sum(assign[i,k] for i in range(A)) for k in range(knn+1))\
+sum((testy[ts][i]*assign[i,k]-assign[i,k]*(sum(recordbeta[k][j]*testx[ts][i][j] for j in range(D))+recordbeta[k][D]))*(testy[ts][i]*assign[i,k]-assign[i,k]*(sum(recordbeta[k][j]*testx[ts][i][j] for j in range(D))+recordbeta[k][D])) for k in range(knn+1) for i in range(A))\
+weight*sum(assign[i,k]*sum((testx[ts][i][j]-recordce[k][j])*(testx[ts][i][j]-recordce[k][j]) for j in range(D)) for i in range(A) for k in range(knn+1)), GRB.MINIMIZE)
vam.addConstrs(
(quicksum(assign[i,k] for k in range(knn+1)) == 1 for i in range(A)),"c21")
vam.optimize()
status = vam.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
#exit(0)
if status == GRB.Status.OPTIMAL:
print('The optimal objective is %g' % vam.objVal)
#exit(0)
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
#exit(0)
vam.write('validation.lp')
if vam.status == GRB.Status.OPTIMAL:
print ('assign')
for i in range(A):
temp=[]
for k in range(knn+1):
temp.append(assign[i,k].x)
vasigma.append(temp)
perror2=sum((testy[ts][i]-sum(vasigma[i][k]*(sum(recordbeta[k][j]*testx[ts][i][j] for j in range(D))+recordbeta[k][D]) for k in range(knn+1)))\
*(testy[ts][i]-sum(vasigma[i][k]*(sum(recordbeta[k][j]*testx[ts][i][j] for j in range(D))+recordbeta[k][D]) for k in range(knn+1))) for i in range(A)) #L1 distance
print(perror2)
#recordloss2[knn][ww]=perror2/A
recordloss2[0][0]=perror2/A
if vam.status == GRB.Status.OPTIMAL:
File = open(filenameCVAP2, "a")
File.write('***testing set=%d, K=%d, weight=%f,obj=%g***\n'% (ts+1, knn+1,weight,vam.objVal))
File.write('total error=%s\n' % str(perror2/A))
File.write('assign\n')
for i in range(A):
for k in range(knn+1):
if assign[i,k].x>=0.9:
File.write('data point:%d belong to cluster %d\n' % (i+1,k+1))
File.close()
File = open(filenameCVprint, "a")
File.write('testing set=%d,K=%d,weight=%f,total error=%s\n' % (ts+1,knn+1,weight,str(perror2/A)))
File.close()
| [
"matplotlib.pyplot.show",
"warnings.filterwarnings",
"matplotlib.pyplot.scatter",
"xlrd.open_workbook",
"matplotlib.pyplot.legend",
"numpy.square",
"matplotlib.pyplot.figure",
"numpy.mat",
"math.log",
"datetime.datetime.now"
] | [((353, 386), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (376, 386), False, 'import warnings\n'), ((464, 493), 'xlrd.open_workbook', 'xlrd.open_workbook', (['readfile1'], {}), '(readfile1)\n', (482, 493), False, 'import xlrd\n'), ((539, 568), 'xlrd.open_workbook', 'xlrd.open_workbook', (['readfile1'], {}), '(readfile1)\n', (557, 568), False, 'import xlrd\n'), ((21581, 21594), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (21591, 21594), True, 'import matplotlib.pyplot as plt\n'), ((22230, 22338), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[:counting[0]]', 'groupy[:counting[0]]'], {'marker': '"""o"""', 'color': '"""r"""', 'label': '"""Cluster 1"""', 's': '(15)'}), "(groupx[:counting[0]], groupy[:counting[0]], marker='o', color=\n 'r', label='Cluster 1', s=15)\n", (22241, 22338), True, 'import matplotlib.pyplot as plt\n'), ((22343, 22512), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[counting[0]:counting[0] + counting[1]]', 'groupy[counting[0]:counting[0] + counting[1]]'], {'marker': '"""x"""', 'color': '"""#808080"""', 'label': '"""Cluster 2"""', 's': '(20)'}), "(groupx[counting[0]:counting[0] + counting[1]], groupy[counting[\n 0]:counting[0] + counting[1]], marker='x', color='#808080', label=\n 'Cluster 2', s=20)\n", (22354, 22512), True, 'import matplotlib.pyplot as plt\n'), ((22508, 22726), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[counting[0] + counting[1]:counting[0] + counting[1] + counting[2]]', 'groupy[counting[0] + counting[1]:counting[0] + counting[1] + counting[2]]'], {'marker': '"""|"""', 'color': '"""b"""', 'label': '"""Cluster 3"""', 's': '(20)'}), "(groupx[counting[0] + counting[1]:counting[0] + counting[1] +\n counting[2]], groupy[counting[0] + counting[1]:counting[0] + counting[1\n ] + counting[2]], marker='|', color='b', label='Cluster 3', s=20)\n", (22519, 22726), True, 'import matplotlib.pyplot as plt\n'), ((22715, 22896), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[counting[0] + counting[1] + counting[2]:500]', 'groupy[counting[0] + counting[1] + counting[2]:500]'], {'marker': '"""_"""', 'color': '"""#008000"""', 'label': '"""Cluster 4"""', 's': '(20)'}), "(groupx[counting[0] + counting[1] + counting[2]:500], groupy[\n counting[0] + counting[1] + counting[2]:500], marker='_', color=\n '#008000', label='Cluster 4', s=20)\n", (22726, 22896), True, 'import matplotlib.pyplot as plt\n'), ((22883, 22911), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (22893, 22911), True, 'import matplotlib.pyplot as plt\n'), ((22975, 22985), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22983, 22985), True, 'import matplotlib.pyplot as plt\n'), ((22993, 23006), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (23003, 23006), True, 'import matplotlib.pyplot as plt\n'), ((23654, 23764), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[:counting[0]]', 'groupnewy[:counting[0]]'], {'marker': '"""o"""', 'color': '"""r"""', 'label': '"""Cluster 1"""', 's': '(15)'}), "(groupx[:counting[0]], groupnewy[:counting[0]], marker='o',\n color='r', label='Cluster 1', s=15)\n", (23665, 23764), True, 'import matplotlib.pyplot as plt\n'), ((23770, 23941), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[counting[0]:counting[0] + counting[1]]', 'groupnewy[counting[0]:counting[0] + counting[1]]'], {'marker': '"""x"""', 'color': '"""#808080"""', 'label': '"""Cluster 2"""', 's': '(20)'}), "(groupx[counting[0]:counting[0] + counting[1]], groupnewy[\n counting[0]:counting[0] + counting[1]], marker='x', color='#808080',\n label='Cluster 2', s=20)\n", (23781, 23941), True, 'import matplotlib.pyplot as plt\n'), ((23938, 24158), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[counting[0] + counting[1]:counting[0] + counting[1] + counting[2]]', 'groupnewy[counting[0] + counting[1]:counting[0] + counting[1] + counting[2]]'], {'marker': '"""|"""', 'color': '"""b"""', 'label': '"""Cluster 3"""', 's': '(20)'}), "(groupx[counting[0] + counting[1]:counting[0] + counting[1] +\n counting[2]], groupnewy[counting[0] + counting[1]:counting[0] +\n counting[1] + counting[2]], marker='|', color='b', label='Cluster 3', s=20)\n", (23949, 24158), True, 'import matplotlib.pyplot as plt\n'), ((24148, 24332), 'matplotlib.pyplot.scatter', 'plt.scatter', (['groupx[counting[0] + counting[1] + counting[2]:500]', 'groupnewy[counting[0] + counting[1] + counting[2]:500]'], {'marker': '"""_"""', 'color': '"""#008000"""', 'label': '"""Cluster 4"""', 's': '(20)'}), "(groupx[counting[0] + counting[1] + counting[2]:500], groupnewy[\n counting[0] + counting[1] + counting[2]:500], marker='_', color=\n '#008000', label='Cluster 4', s=20)\n", (24159, 24332), True, 'import matplotlib.pyplot as plt\n'), ((24319, 24347), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (24329, 24347), True, 'import matplotlib.pyplot as plt\n'), ((24410, 24420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24418, 24420), True, 'import matplotlib.pyplot as plt\n'), ((12097, 12125), 'numpy.square', 'np.square', (['(vector1 - vector2)'], {}), '(vector1 - vector2)\n', (12106, 12125), True, 'import numpy as np\n'), ((7290, 7303), 'math.log', 'math.log', (['var'], {}), '(var)\n', (7298, 7303), False, 'import math\n'), ((14326, 14349), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14347, 14349), False, 'import datetime\n'), ((16099, 16122), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16120, 16122), False, 'import datetime\n'), ((7396, 7406), 'numpy.mat', 'np.mat', (['ce'], {}), '(ce)\n', (7402, 7406), True, 'import numpy as np\n'), ((15503, 15524), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (15511, 15524), False, 'import math\n'), ((24649, 24676), 'math.log', 'math.log', (['recordvariance[k]'], {}), '(recordvariance[k])\n', (24657, 24676), False, 'import math\n'), ((10355, 10376), 'math.log', 'math.log', (['variance[k]'], {}), '(variance[k])\n', (10363, 10376), False, 'import math\n'), ((15073, 15099), 'math.log', 'math.log', (['temp_variance[k]'], {}), '(temp_variance[k])\n', (15081, 15099), False, 'import math\n')] |
from os import path
import MDAnalysis
import numpy as np
from enmspring.graphs import Stack
from enmspring.miscell import check_dir_exist_and_make
enmspring_folder = '/home/yizaochen/codes/dna_rna/enmspring'
all_folder = '/home/yizaochen/codes/dna_rna/all_systems'
class BaseStackImportanceAgent:
type_na = 'bdna+bdna'
def __init__(self, host, rootfolder, pic_out_folder):
self.host = host
self.rootfolder = rootfolder
self.tcl_folder = path.join(enmspring_folder, 'tclscripts')
self.pic_out_folder = pic_out_folder
self.mol_stru_folder = path.join(self.pic_out_folder, 'mol_structure')
self.allatom_folder = path.join(all_folder, host, self.type_na, 'input', 'allatoms')
self.perferct_gro = path.join(self.allatom_folder, f'{self.type_na}.perfect.gro')
self.g_agent = self.get_g_agent_and_preprocess()
self.check_folder()
def check_folder(self):
for folder in [self.mol_stru_folder]:
check_dir_exist_and_make(folder)
def get_g_agent_and_preprocess(self):
g_agent = Stack(self.host, self.rootfolder)
g_agent.pre_process()
return g_agent
def vmd_show_pair_example(self, atomname_i, atomname_j, sele_strandid):
lines = list()
print(f'vmd -cor {self.g_agent.npt4_crd}')
atomidpairs = self.g_agent.get_atomidpairs_atomname1_atomname2(atomname_i, atomname_j, sele_strandid)
lines = self.process_lines_for_edges_tcl(lines, atomidpairs)
tcl_out = path.join(self.tcl_folder, 'illustrate_pairimportance.tcl')
self.write_tcl_out(tcl_out, lines)
def process_lines_for_edges_tcl(self, lines, atomidpairs, radius=0.25):
u_npt4 = MDAnalysis.Universe(self.g_agent.npt4_crd, self.g_agent.npt4_crd)
for atomid1, atomid2 in atomidpairs:
line = self.get_draw_edge_line(u_npt4.atoms.positions, atomid1-1, atomid2-1, radius)
lines.append(line)
return lines
def get_draw_edge_line(self, positions, atomid1, atomid2, radius):
str_0 = 'graphics 0 cylinder {'
str_1 = f'{positions[atomid1,0]:.3f} {positions[atomid1,1]:.3f} {positions[atomid1,2]:.3f}'
str_2 = '} {'
str_3 = f'{positions[atomid2,0]:.3f} {positions[atomid2,1]:.3f} {positions[atomid2,2]:.3f}'
str_4 = '} '
str_5 = f'radius {radius:.2f}\n'
return str_0 + str_1 + str_2 + str_3 + str_4 + str_5
def vmd_show_a_tract_single_A(self):
resid = 7
bigatomlist = [['C6'], ['N1'], ['C4', 'C5'], ['C2', 'N3', 'N6', 'N7', 'C8', 'N9']]
colorid_list = [0, 0, 1, 5]
cpkradius_list = [1.2, 0.9, 1.2, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_A_single')
def vmd_show_a_tract_single_T(self):
resid = 24
bigatomlist = [['C5'], ['N1', 'C2', 'C4'], ['N3'], ['O2', 'O4', 'C6', 'C7']]
colorid_list = [0, 0, 0, 5]
cpkradius_list = [1.2, 0.9, 0.7, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_T_single')
def vmd_show_atat_single_A(self):
resid = 7
bigatomlist = [['C4', 'C5'], ['C6'], ['C2', 'N3'], ['N1', 'C6', 'N6', 'N7', 'C8', 'N9']]
colorid_list = [0, 0, 1, 5]
cpkradius_list = [1.2, 0.8, 0.8, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_A_single')
def vmd_show_atat_single_T(self):
resid = 8
bigatomlist = [['C4'], ['C5'], ['N3'], ['C2'], ['N1', 'O2', 'O4', 'C6', 'C7']]
colorid_list = [0, 0, 1, 1, 5]
cpkradius_list = [1.2, 0.7, 1.1, 0.7, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_T_single')
def vmd_show_g_tract_single_G(self):
resid = 7
bigatomlist = [['C6', 'C4'], ['N1', 'N3'], ['C2', 'N2', 'O6', 'C4', 'C5', 'N7', 'C8', 'N9']]
colorid_list = [0, 0, 5]
cpkradius_list = [1.2, 0.9, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_G_single')
def vmd_show_g_tract_single_C(self):
resid = 24
bigatomlist = [['C4'], ['N3'], ['C2'], ['N1', 'O2', 'C6', 'C5', 'N4']]
colorid_list = [0, 0, 0, 5]
cpkradius_list = [1.2, 0.9, 0.7, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_C_single')
def vmd_show_gcgc_single_G(self):
resid = 7
bigatomlist = [['C4'], ['C5'], ['N3', 'C2', 'C6', 'O6', 'N1', 'N2', 'C4', 'N7', 'C8', 'N9']]
colorid_list = [0, 0, 5]
cpkradius_list = [1.2, 0.9, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_G_single')
def vmd_show_gcgc_single_C(self):
resid = 8
bigatomlist = [['N3', 'C2'], ['C4'], ['C5', 'N1', 'O2', 'C6', 'N4']]
colorid_list = [0, 0, 5]
cpkradius_list = [1.2, 0.7, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_C_single')
def vmd_show_ctct_single_C(self):
resid = 7
bigatomlist = [['N1', 'N3', 'C2'], ['C5', 'C4', 'O2', 'C6', 'N4']]
colorid_list = [0, 5]
cpkradius_list = [1.2, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_C_single')
def vmd_show_ctct_single_T(self):
resid = 8
bigatomlist = [['C4', 'C5'], ['N3', 'C2'], ['N1', 'O2', 'O4', 'C6', 'C7']]
colorid_list = [0, 0, 5]
cpkradius_list = [1.2, 0.7, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_T_single')
def vmd_show_ctct_single_A(self):
resid = 27
bigatomlist = [['C6'], ['C5'], ['C4', 'C2', 'N3', 'N1', 'C6', 'N6', 'N7', 'C8', 'N9']]
colorid_list = [0, 1, 5]
cpkradius_list = [1.2, 1.0, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_A_single')
def vmd_show_ctct_single_G(self):
resid = 28
bigatomlist = [['C6', 'N1'], ['C4'], ['N3', 'C5', 'C2', 'O6', 'N2', 'C4', 'N7', 'C8', 'N9']]
colorid_list = [0, 1, 5]
cpkradius_list = [1.2, 1.0, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_G_single')
def vmd_show_tgtg_single_T(self):
resid = 7
bigatomlist = [['C4', 'C5'], ['N3'], ['C2'], ['N1', 'O2', 'O4', 'C6', 'C7']]
colorid_list = [0, 0, 1, 5]
cpkradius_list = [1.2, 1.2, 1.2, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_T_single')
def vmd_show_tgtg_single_G(self):
resid = 8
bigatomlist = [['C4'], ['N3', 'C2'], ['C6', 'N1', 'C5', 'O6', 'N2', 'C4', 'N7', 'C8', 'N9']]
colorid_list = [0, 0, 5]
cpkradius_list = [1.2, 0.7, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_G_single')
def vmd_show_tgtg_single_C(self):
resid = 27
bigatomlist = [['C4', 'N3', 'C2'], ['C5', 'N1', 'O2', 'C6', 'N4']]
colorid_list = [0, 5]
cpkradius_list = [1.2, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_C_single')
def vmd_show_tgtg_single_A(self):
resid = 26
bigatomlist = [['C5', 'C4'], ['C2', 'C6', 'N3', 'N1', 'C6', 'N6', 'N7', 'C8', 'N9']]
colorid_list = [0, 5]
cpkradius_list = [1.2, 0.5]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid(resid)
for atomlist, colorid, cpkradius in zip(bigatomlist, colorid_list, cpkradius_list):
lines += self.vmd_add_atomlist_vdw(atomlist, resid, colorid, cpkradius)
tcl_out = path.join(self.tcl_folder, 'show_single_nucleotide.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_A_single')
def vmd_show_a_tract_AA_pair1(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 1
resid_j = 2
atompair_list = [('N1', 'N1'), ('N1', 'C6'), ('C6', 'C6'), ('C6', 'N6')]
radius_list = [0.08, 0.15, 0.2, 0.08]
color_list = [1, 1, 1, 1]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_AA_pair1')
def vmd_show_a_tract_AA_pair2(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 1
resid_j = 2
atompair_list = [('C2', 'C5'), ('C2', 'C4'), ('N3', 'C4'), ('N3', 'C5'), ('C4', 'C4'), ('C4', 'C5'), ('C4', 'N7'), ('C5', 'C5')]
radius_list = [0.08, 0.08, 0.15, 0.15, 0.08, 0.2, 0.08, 0.15]
color_list = [7] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_AA_pair2')
def vmd_show_a_tract_TT_pair(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 22
resid_j = 23
atompair_list = [('N1', 'C5'), ('C2', 'C5'), ('N3', 'C4'), ('N3', 'C5'), ('C2', 'C4'), ('C2', 'C6'), ('C4', 'C4')]
radius_list = [0.2, 0.2, 0.15, 0.15, 0.08, 0.08, 0.08]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_TT_pair')
def vmd_show_ATAT_AT_pair1(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 1
resid_j = 2
atompair_list = [('C4', 'C4'), ('C4', 'C5'), ('C5', 'C4'), ('C5', 'C5'), ('C6', 'C4')]
radius_list = [0.1, 0.1, 0.1, 0.1, 0.1]
color_list = [1, 1, 1, 1, 1]
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_AT_pair1')
def vmd_show_ATAT_AT_pair2(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 1
resid_j = 2
atompair_list = [('N1', 'N3'), ('C2', 'C2'), ('C2', 'N3'), ('N3', 'C2')]
radius_list = [0.1, 0.1, 0.1, 0.1]
color_list = [7] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_AT_pair2')
def vmd_show_g_tract_GG_pair(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 1
resid_j = 2
atompair_list = [('N1', 'C6'), ('C6', 'C6'), ('N3', 'C4'), ('N1', 'N1'), ('C2', 'C4'), ('C4', 'C5'), ('C4', 'N7')]
radius_list = [0.2, 0.2, 0.2, 0.08, 0.08, 0.08, 0.08]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_GG_pair')
def vmd_show_g_tract_CC_pair(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 22
resid_j = 23
atompair_list = [('N3', 'C4'), ('C2', 'C4'), ('N3', 'N4'), ('N3', 'N3')]
radius_list = [0.2, 0.15, 0.08, 0.05]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_CC_pair')
def vmd_show_GCGC_GC_pair(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 1
resid_j = 2
atompair_list = [('C4', 'N3'), ('C4', 'C4'), ('N1', 'N3'), ('C2', 'N3'), ('C2', 'C2'), ('C5', 'C4'), ('N3', 'C2')]
radius_list = [0.12, 0.12, 0.08, 0.08, 0.08, 0.08, 0.08]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_GC_pair')
def vmd_show_CTCT_CT_pair(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 1
resid_j = 2
atompair_list = [('N1', 'C5'), ('C2', 'C4'), ('C2', 'C5'), ('N3', 'C4')]
radius_list = [0.12, 0.12, 0.12, 0.12]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_CT_pair')
def vmd_show_CTCT_GA_pair1(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 22
resid_j = 23
atompair_list = [('C4', 'C5')]
radius_list = [0.15]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_GA_pair1')
def vmd_show_CTCT_GA_pair2(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 22
resid_j = 23
atompair_list = [('C6', 'C6'), ('N1', 'C6')]
radius_list = [0.16, 0.08]
color_list = [0] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_GA_pair2')
def vmd_show_TGTG_GT_pair(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 4
resid_j = 5
atompair_list = [('C4', 'C5'), ('C4', 'C4'), ('C2', 'C2'), ('C2', 'N3')]
radius_list = [0.15, 0.1, 0.1, 0.1]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_GT_pair')
def vmd_show_TGTG_AC_pair(self):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
resid_i = 22
resid_j = 23
atompair_list = [('C5', 'C4'), ('C4', 'C4'), ('N1', 'N3')]
radius_list = [0.1, 0.1, 0.1]
color_list = [1] * len(atompair_list)
self.vmd_open_perfect_gro()
lines = ['mol delrep 0 0']
lines += self.vmd_add_resid_cpk_color_by_name(resid_i)
lines += self.vmd_add_resid_cpk_color_by_name(resid_j)
for atompair, radius, color in zip(atompair_list, radius_list, color_list):
positions = self.get_pair_positions_by_resid_names(u, resid_i, resid_j, atompair[0], atompair[1])
temp_lines = [f'graphics 0 color {color}',
self.get_draw_edge_line(positions, 0, 1, radius)]
lines += temp_lines
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_AC_pair')
def get_pair_positions_by_resid_names(self, u, resid_i, resid_j, atomname_i, atomname_j):
positions = np.zeros((2,3))
positions[0,:] = u.select_atoms(f'resid {resid_i} and name {atomname_i}').positions[0,:]
positions[1,:] = u.select_atoms(f'resid {resid_j} and name {atomname_j}').positions[0,:]
return positions
def vmd_add_resid(self, resid):
lines = ['mol color ColorID 2',
'mol representation Licorice 0.100000 12.000000 12.000000',
f'mol selection resid {resid} and not hydrogen and not (name C1\' C2\' O4\' C3\' C4\' C5\' P O1P O2P O5\' O3\')',
'mol material Opaque',
'mol addrep 0']
return lines
def vmd_add_resid_cpk_color_by_name(self, resid):
lines = ['mol color Name',
'mol representation CPK 1.00000 0.300000 12.000000 12.000000',
f'mol selection resid {resid} and not hydrogen and not (name C1\' C2\' O4\' C3\' C4\' C5\' P O1P O2P O5\' O3\')',
'mol material Transparent',
'mol addrep 0']
return lines
def vmd_add_atomlist_vdw(self, atomlist, resid, colorid, cpkradius):
atomnames = ' '.join(atomlist)
lines = [f'mol color ColorID {colorid}',
f'mol representation CPK {cpkradius:.3f} 0.200000 12.000000 12.000000',
f'mol selection resid {resid} and name {atomnames}',
'mol material Opaque',
'mol addrep 0']
return lines
def vmd_open_perfect_gro(self):
print(f'vmd -gro {self.perferct_gro}')
def write_tcl_out(self, tcl_out, container):
f = open(tcl_out, 'w')
for line in container:
f.write(line)
f.write('\n')
f.close()
print(f'source {tcl_out}')
def print_tga_out(self, out_name):
print(path.join(self.mol_stru_folder, out_name))
class StackWholeMolecule(BaseStackImportanceAgent):
def __init__(self, host, rootfolder, pic_out_folder):
super().__init__(host, rootfolder, pic_out_folder)
def vmd_show_whole_stack(self, df_in, radius, eigv_id, strandid):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
zipobj = zip(df_in['Strand_i'].tolist(), df_in['Resid_i'].tolist(), df_in['Atomname_i'].tolist(), df_in['Strand_j'].tolist(), df_in['Resid_j'].tolist(), df_in['Atomname_j'].tolist())
self.vmd_open_perfect_gro()
lines = self.get_initial_lines()
lines += [f'graphics 0 color 1'] # red color
for strand_i, resid_i, atomname_i, strand_j, resid_j, atomname_j in zipobj:
if (strand_i == 'STRAND2') and (strand_j == 'STRAND2'):
gro_resid_i = resid_i + 21
gro_resid_j = resid_j + 21
else:
gro_resid_i = resid_i
gro_resid_j = resid_j
positions = self.get_pair_positions_by_resid_names(u, gro_resid_i, gro_resid_j, atomname_i, atomname_j)
lines += [self.get_draw_edge_line(positions, 0, 1, radius)]
tcl_out = path.join(self.tcl_folder, 'show_basestack_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_stack_{strandid}_{eigv_id}')
def get_initial_lines(self):
return ['mol delrep 0 0',
'mol color ColorID 2',
'mol representation Licorice 0.200000 12.000000 12.000000',
'mol selection all',
'mol material Transparent',
'mol addrep 0']
def get_draw_edge_line(self, positions, atomid1, atomid2, radius):
str_0 = 'graphics 0 cylinder {'
str_1 = f'{positions[atomid1,0]:.3f} {positions[atomid1,1]:.3f} {positions[atomid1,2]:.3f}'
str_2 = '} {'
str_3 = f'{positions[atomid2,0]:.3f} {positions[atomid2,1]:.3f} {positions[atomid2,2]:.3f}'
str_4 = '} '
str_5 = f'radius {radius:.2f}\n'
return str_0 + str_1 + str_2 + str_3 + str_4 + str_5
class BackboneWholeMolecule(StackWholeMolecule):
def vmd_show_whole_backbone(self, df_in, radius, eigv_id, strandid):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
zipobj = zip(df_in['Strand_i'].tolist(), df_in['Resid_i'].tolist(), df_in['Atomname_i'].tolist(), df_in['Strand_j'].tolist(), df_in['Resid_j'].tolist(), df_in['Atomname_j'].tolist())
self.vmd_open_perfect_gro()
lines = self.get_initial_lines()
lines += [f'graphics 0 color 1'] # red color
for strand_i, resid_i, atomname_i, strand_j, resid_j, atomname_j in zipobj:
if (strand_i == 'STRAND2') and (strand_j == 'STRAND2'):
gro_resid_i = resid_i + 21
gro_resid_j = resid_j + 21
else:
gro_resid_i = resid_i
gro_resid_j = resid_j
positions = self.get_pair_positions_by_resid_names(u, gro_resid_i, gro_resid_j, atomname_i, atomname_j)
lines += [self.get_draw_edge_line(positions, 0, 1, radius)]
tcl_out = path.join(self.tcl_folder, 'show_backbone_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_backbone_{strandid}_{eigv_id}')
class HBWholeMolecule(StackWholeMolecule):
def vmd_show_whole_HB(self, df_in, radius, eigv_id):
u = MDAnalysis.Universe(self.perferct_gro, self.perferct_gro)
zipobj = zip(df_in['Strand_i'].tolist(), df_in['Resid_i'].tolist(), df_in['Atomname_i'].tolist(), df_in['Strand_j'].tolist(), df_in['Resid_j'].tolist(), df_in['Atomname_j'].tolist())
self.vmd_open_perfect_gro()
lines = self.get_initial_lines()
lines += [f'graphics 0 color 1'] # red color
for strand_i, resid_i, atomname_i, strand_j, resid_j, atomname_j in zipobj:
if strand_i == 'STRAND2':
gro_resid_i = resid_i + 21
else:
gro_resid_i = resid_i
if strand_j == 'STRAND2':
gro_resid_j = resid_j + 21
else:
gro_resid_j = resid_j
positions = self.get_pair_positions_by_resid_names(u, gro_resid_i, gro_resid_j, atomname_i, atomname_j)
lines += [self.get_draw_edge_line(positions, 0, 1, radius)]
tcl_out = path.join(self.tcl_folder, 'show_backbone_pair.tcl')
self.write_tcl_out(tcl_out, lines)
self.print_tga_out(f'{self.host}_hb_{eigv_id}')
def get_initial_lines(self):
return ['mol delrep 0 0',
'mol color ColorID 2',
'mol representation Licorice 0.100000 12.000000 12.000000',
'mol selection all',
'mol material Transparent',
'mol addrep 0']
| [
"enmspring.graphs.Stack",
"numpy.zeros",
"MDAnalysis.Universe",
"os.path.join",
"enmspring.miscell.check_dir_exist_and_make"
] | [((471, 512), 'os.path.join', 'path.join', (['enmspring_folder', '"""tclscripts"""'], {}), "(enmspring_folder, 'tclscripts')\n", (480, 512), False, 'from os import path\n'), ((589, 636), 'os.path.join', 'path.join', (['self.pic_out_folder', '"""mol_structure"""'], {}), "(self.pic_out_folder, 'mol_structure')\n", (598, 636), False, 'from os import path\n'), ((668, 730), 'os.path.join', 'path.join', (['all_folder', 'host', 'self.type_na', '"""input"""', '"""allatoms"""'], {}), "(all_folder, host, self.type_na, 'input', 'allatoms')\n", (677, 730), False, 'from os import path\n'), ((759, 820), 'os.path.join', 'path.join', (['self.allatom_folder', 'f"""{self.type_na}.perfect.gro"""'], {}), "(self.allatom_folder, f'{self.type_na}.perfect.gro')\n", (768, 820), False, 'from os import path\n'), ((1089, 1122), 'enmspring.graphs.Stack', 'Stack', (['self.host', 'self.rootfolder'], {}), '(self.host, self.rootfolder)\n', (1094, 1122), False, 'from enmspring.graphs import Stack\n'), ((1524, 1583), 'os.path.join', 'path.join', (['self.tcl_folder', '"""illustrate_pairimportance.tcl"""'], {}), "(self.tcl_folder, 'illustrate_pairimportance.tcl')\n", (1533, 1583), False, 'from os import path\n'), ((1729, 1794), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.g_agent.npt4_crd', 'self.g_agent.npt4_crd'], {}), '(self.g_agent.npt4_crd, self.g_agent.npt4_crd)\n', (1748, 1794), False, 'import MDAnalysis\n'), ((2988, 3044), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (2997, 3044), False, 'from os import path\n'), ((3676, 3732), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (3685, 3732), False, 'from os import path\n'), ((4372, 4428), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (4381, 4428), False, 'from os import path\n'), ((5066, 5122), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (5075, 5122), False, 'from os import path\n'), ((5761, 5817), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (5770, 5817), False, 'from os import path\n'), ((6443, 6499), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (6452, 6499), False, 'from os import path\n'), ((7135, 7191), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (7144, 7191), False, 'from os import path\n'), ((7803, 7859), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (7812, 7859), False, 'from os import path\n'), ((8461, 8517), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (8470, 8517), False, 'from os import path\n'), ((9135, 9191), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (9144, 9191), False, 'from os import path\n'), ((9822, 9878), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (9831, 9878), False, 'from os import path\n'), ((10515, 10571), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (10524, 10571), False, 'from os import path\n'), ((11199, 11255), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (11208, 11255), False, 'from os import path\n'), ((11891, 11947), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (11900, 11947), False, 'from os import path\n'), ((12550, 12606), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (12559, 12606), False, 'from os import path\n'), ((13227, 13283), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_single_nucleotide.tcl"""'], {}), "(self.tcl_folder, 'show_single_nucleotide.tcl')\n", (13236, 13283), False, 'from os import path\n'), ((13433, 13490), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (13452, 13490), False, 'import MDAnalysis\n'), ((14264, 14317), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (14273, 14317), False, 'from os import path\n'), ((14467, 14524), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (14486, 14524), False, 'import MDAnalysis\n'), ((15390, 15443), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (15399, 15443), False, 'from os import path\n'), ((15592, 15649), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (15611, 15649), False, 'import MDAnalysis\n'), ((16496, 16549), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (16505, 16549), False, 'from os import path\n'), ((16695, 16752), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (16714, 16752), False, 'import MDAnalysis\n'), ((17545, 17598), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (17554, 17598), False, 'from os import path\n'), ((17745, 17802), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (17764, 17802), False, 'import MDAnalysis\n'), ((18585, 18638), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (18594, 18638), False, 'from os import path\n'), ((18787, 18844), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (18806, 18844), False, 'import MDAnalysis\n'), ((19688, 19741), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (19697, 19741), False, 'from os import path\n'), ((19889, 19946), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (19908, 19946), False, 'import MDAnalysis\n'), ((20734, 20787), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (20743, 20787), False, 'from os import path\n'), ((20932, 20989), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (20951, 20989), False, 'import MDAnalysis\n'), ((21836, 21889), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (21845, 21889), False, 'from os import path\n'), ((22034, 22091), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (22053, 22091), False, 'import MDAnalysis\n'), ((22878, 22931), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (22887, 22931), False, 'from os import path\n'), ((23077, 23134), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (23096, 23134), False, 'import MDAnalysis\n'), ((23863, 23916), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (23872, 23916), False, 'from os import path\n'), ((24063, 24120), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (24082, 24120), False, 'import MDAnalysis\n'), ((24869, 24922), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (24878, 24922), False, 'from os import path\n'), ((25068, 25125), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (25087, 25125), False, 'import MDAnalysis\n'), ((25909, 25962), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (25918, 25962), False, 'from os import path\n'), ((26107, 26164), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (26126, 26164), False, 'import MDAnalysis\n'), ((26930, 26983), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (26939, 26983), False, 'from os import path\n'), ((27193, 27209), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (27201, 27209), True, 'import numpy as np\n'), ((29276, 29333), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (29295, 29333), False, 'import MDAnalysis\n'), ((30193, 30246), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_basestack_pair.tcl"""'], {}), "(self.tcl_folder, 'show_basestack_pair.tcl')\n", (30202, 30246), False, 'from os import path\n'), ((31248, 31305), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (31267, 31305), False, 'import MDAnalysis\n'), ((32165, 32217), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_backbone_pair.tcl"""'], {}), "(self.tcl_folder, 'show_backbone_pair.tcl')\n", (32174, 32217), False, 'from os import path\n'), ((32447, 32504), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['self.perferct_gro', 'self.perferct_gro'], {}), '(self.perferct_gro, self.perferct_gro)\n', (32466, 32504), False, 'import MDAnalysis\n'), ((33390, 33442), 'os.path.join', 'path.join', (['self.tcl_folder', '"""show_backbone_pair.tcl"""'], {}), "(self.tcl_folder, 'show_backbone_pair.tcl')\n", (33399, 33442), False, 'from os import path\n'), ((995, 1027), 'enmspring.miscell.check_dir_exist_and_make', 'check_dir_exist_and_make', (['folder'], {}), '(folder)\n', (1019, 1027), False, 'from enmspring.miscell import check_dir_exist_and_make\n'), ((28979, 29020), 'os.path.join', 'path.join', (['self.mol_stru_folder', 'out_name'], {}), '(self.mol_stru_folder, out_name)\n', (28988, 29020), False, 'from os import path\n')] |
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
num_iterations = 3
raws_reward = np.zeros((1000, num_iterations))
pretrained_rewards = np.zeros_like(raws_reward)
for array_index, array in enumerate([raws_reward, pretrained_rewards]):
for i in range(num_iterations):
if array_index == 0:
file_name = f"run-raw_{i+1}.csv"
else:
file_name = f"run-pretrained_{i + 1}.csv"
csv = pd.read_csv(file_name)
values = csv["Value"]
values = savgol_filter(values, 51, 3)
array[:, i] = values
sns.tsplot(raws_reward.transpose(), color="red")
sns.tsplot(pretrained_rewards.transpose())
plt.legend(["Baseline", "Pretrained"])
plt.ylabel("Episode Reward")
plt.title("Comparison on Pendulum-v0")
plt.grid()
plt.show()
| [
"matplotlib.pyplot.title",
"scipy.signal.savgol_filter",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid"
] | [((166, 198), 'numpy.zeros', 'np.zeros', (['(1000, num_iterations)'], {}), '((1000, num_iterations))\n', (174, 198), True, 'import numpy as np\n'), ((220, 246), 'numpy.zeros_like', 'np.zeros_like', (['raws_reward'], {}), '(raws_reward)\n', (233, 246), True, 'import numpy as np\n'), ((734, 772), 'matplotlib.pyplot.legend', 'plt.legend', (["['Baseline', 'Pretrained']"], {}), "(['Baseline', 'Pretrained'])\n", (744, 772), True, 'import matplotlib.pyplot as plt\n'), ((773, 801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (783, 801), True, 'import matplotlib.pyplot as plt\n'), ((802, 840), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparison on Pendulum-v0"""'], {}), "('Comparison on Pendulum-v0')\n", (811, 840), True, 'import matplotlib.pyplot as plt\n'), ((841, 851), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (849, 851), True, 'import matplotlib.pyplot as plt\n'), ((852, 862), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (860, 862), True, 'import matplotlib.pyplot as plt\n'), ((512, 534), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {}), '(file_name)\n', (523, 534), True, 'import pandas as pd\n'), ((582, 610), 'scipy.signal.savgol_filter', 'savgol_filter', (['values', '(51)', '(3)'], {}), '(values, 51, 3)\n', (595, 610), False, 'from scipy.signal import savgol_filter\n')] |
"""
A simple Psi 4 input script to compute CISD energy from a SCF reference
Requirements:
SciPy 0.13.0+, NumPy 1.7.2+
References:
Equations from [Szabo:1996]
"""
__authors__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__copyright__ = "(c) 2014-2017, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2017-05-26"
import time
import numpy as np
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Check energy against psi4?
compare_psi4 = True
# Memory for Psi4 in GB
# psi4.core.set_memory(int(2e9), False)
psi4.core.set_output_file('output.dat', False)
# Memory for numpy in GB
numpy_memory = 2
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.set_options({'basis': 'sto-3g',
'scf_type': 'pk',
'e_convergence': 1e-8,
'd_convergence': 1e-8})
print('\nStarting SCF and integral build...')
t = time.time()
# First compute SCF energy using Psi4
scf_e, wfn = psi4.energy('SCF', return_wfn=True)
# Grab data from wavfunction class
C = wfn.Ca()
ndocc = wfn.doccpi()[0]
nmo = wfn.nmo()
nvirt = nmo - ndocc
# Compute size of Hamiltonian in GB
from scipy.special import comb
nDet_S = ndocc * nvirt * 2
nDet_D = 2 * comb(ndocc, 2) * comb(nvirt, 2) + ndocc **2 * nvirt **2
nDet = 1 + nDet_S + nDet_D
H_Size = nDet**2 * 8e-9
print('\nSize of the Hamiltonian Matrix will be %4.2f GB.' % H_Size)
if H_Size > numpy_memory:
clean()
raise Exception("Estimated memory utilization (%4.2f GB) exceeds numpy_memory \
limit of %4.2f GB." % (H_Size, numpy_memory))
# Integral generation from Psi4's MintsHelper
t = time.time()
mints = psi4.core.MintsHelper(wfn.basisset())
H = np.asarray(mints.ao_kinetic()) + np.asarray(mints.ao_potential())
print('\nTotal time taken for ERI integrals: %.3f seconds.\n' % (time.time() - t))
#Make spin-orbital MO
print('Starting AO -> spin-orbital MO transformation...')
t = time.time()
MO = np.asarray(mints.mo_spin_eri(C, C))
# Update H, transform to MO basis and tile for alpha/beta spin
H = np.einsum('uj,vi,uv', C, C, H)
H = np.repeat(H, 2, axis=0)
H = np.repeat(H, 2, axis=1)
# Make H block diagonal
spin_ind = np.arange(H.shape[0], dtype=np.int) % 2
H *= (spin_ind.reshape(-1, 1) == spin_ind)
print('..finished transformation in %.3f seconds.\n' % (time.time() - t))
from helper_CI import Determinant, HamiltonianGenerator
from itertools import combinations
print('Generating %d CISD Determinants...' % (nDet))
t = time.time()
occList = [i for i in range(ndocc)]
det_ref = Determinant(alphaObtList=occList, betaObtList=occList)
detList = det_ref.generateSingleAndDoubleExcitationsOfDet(nmo)
detList.append(det_ref)
print('..finished generating determinants in %.3f seconds.\n' % (time.time() - t))
print('Generating Hamiltonian Matrix...')
t = time.time()
Hamiltonian_generator = HamiltonianGenerator(H, MO)
Hamiltonian_matrix = Hamiltonian_generator.generateMatrix(detList)
print('..finished generating Matrix in %.3f seconds.\n' % (time.time() - t))
print('Diagonalizing Hamiltonian Matrix...')
t = time.time()
e_cisd, wavefunctions = np.linalg.eigh(Hamiltonian_matrix)
print('..finished diagonalization in %.3f seconds.\n' % (time.time() - t))
cisd_mol_e = e_cisd[0] + mol.nuclear_repulsion_energy()
print('# Determinants: % 16d' % (len(detList)))
print('SCF energy: % 16.10f' % (scf_e))
print('CISD correlation: % 16.10f' % (cisd_mol_e - scf_e))
print('Total CISD energy: % 16.10f' % (cisd_mol_e))
if compare_psi4:
psi4.driver.p4util.compare_values(psi4.energy('DETCI'), cisd_mol_e, 6, 'CISD Energy')
| [
"numpy.set_printoptions",
"psi4.set_options",
"helper_CI.HamiltonianGenerator",
"scipy.special.comb",
"numpy.einsum",
"psi4.energy",
"time.time",
"helper_CI.Determinant",
"numpy.linalg.eigh",
"numpy.arange",
"psi4.core.set_output_file",
"psi4.geometry",
"numpy.repeat"
] | [((389, 451), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)', 'linewidth': '(200)', 'suppress': '(True)'}), '(precision=5, linewidth=200, suppress=True)\n', (408, 451), True, 'import numpy as np\n'), ((579, 625), 'psi4.core.set_output_file', 'psi4.core.set_output_file', (['"""output.dat"""', '(False)'], {}), "('output.dat', False)\n", (604, 625), False, 'import psi4\n'), ((676, 734), 'psi4.geometry', 'psi4.geometry', (['"""\nO\nH 1 1.1\nH 1 1.1 2 104\nsymmetry c1\n"""'], {}), '("""\nO\nH 1 1.1\nH 1 1.1 2 104\nsymmetry c1\n""")\n', (689, 734), False, 'import psi4\n'), ((737, 845), 'psi4.set_options', 'psi4.set_options', (["{'basis': 'sto-3g', 'scf_type': 'pk', 'e_convergence': 1e-08,\n 'd_convergence': 1e-08}"], {}), "({'basis': 'sto-3g', 'scf_type': 'pk', 'e_convergence': \n 1e-08, 'd_convergence': 1e-08})\n", (753, 845), False, 'import psi4\n'), ((944, 955), 'time.time', 'time.time', ([], {}), '()\n', (953, 955), False, 'import time\n'), ((1008, 1043), 'psi4.energy', 'psi4.energy', (['"""SCF"""'], {'return_wfn': '(True)'}), "('SCF', return_wfn=True)\n", (1019, 1043), False, 'import psi4\n'), ((1677, 1688), 'time.time', 'time.time', ([], {}), '()\n', (1686, 1688), False, 'import time\n'), ((1974, 1985), 'time.time', 'time.time', ([], {}), '()\n', (1983, 1985), False, 'import time\n'), ((2095, 2125), 'numpy.einsum', 'np.einsum', (['"""uj,vi,uv"""', 'C', 'C', 'H'], {}), "('uj,vi,uv', C, C, H)\n", (2104, 2125), True, 'import numpy as np\n'), ((2130, 2153), 'numpy.repeat', 'np.repeat', (['H', '(2)'], {'axis': '(0)'}), '(H, 2, axis=0)\n', (2139, 2153), True, 'import numpy as np\n'), ((2158, 2181), 'numpy.repeat', 'np.repeat', (['H', '(2)'], {'axis': '(1)'}), '(H, 2, axis=1)\n', (2167, 2181), True, 'import numpy as np\n'), ((2526, 2537), 'time.time', 'time.time', ([], {}), '()\n', (2535, 2537), False, 'import time\n'), ((2585, 2639), 'helper_CI.Determinant', 'Determinant', ([], {'alphaObtList': 'occList', 'betaObtList': 'occList'}), '(alphaObtList=occList, betaObtList=occList)\n', (2596, 2639), False, 'from helper_CI import Determinant, HamiltonianGenerator\n'), ((2859, 2870), 'time.time', 'time.time', ([], {}), '()\n', (2868, 2870), False, 'import time\n'), ((2895, 2922), 'helper_CI.HamiltonianGenerator', 'HamiltonianGenerator', (['H', 'MO'], {}), '(H, MO)\n', (2915, 2922), False, 'from helper_CI import Determinant, HamiltonianGenerator\n'), ((3119, 3130), 'time.time', 'time.time', ([], {}), '()\n', (3128, 3130), False, 'import time\n'), ((3156, 3190), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Hamiltonian_matrix'], {}), '(Hamiltonian_matrix)\n', (3170, 3190), True, 'import numpy as np\n'), ((2218, 2253), 'numpy.arange', 'np.arange', (['H.shape[0]'], {'dtype': 'np.int'}), '(H.shape[0], dtype=np.int)\n', (2227, 2253), True, 'import numpy as np\n'), ((1279, 1293), 'scipy.special.comb', 'comb', (['nvirt', '(2)'], {}), '(nvirt, 2)\n', (1283, 1293), False, 'from scipy.special import comb\n'), ((3597, 3617), 'psi4.energy', 'psi4.energy', (['"""DETCI"""'], {}), "('DETCI')\n", (3608, 3617), False, 'import psi4\n'), ((1262, 1276), 'scipy.special.comb', 'comb', (['ndocc', '(2)'], {}), '(ndocc, 2)\n', (1266, 1276), False, 'from scipy.special import comb\n'), ((1871, 1882), 'time.time', 'time.time', ([], {}), '()\n', (1880, 1882), False, 'import time\n'), ((2358, 2369), 'time.time', 'time.time', ([], {}), '()\n', (2367, 2369), False, 'import time\n'), ((2793, 2804), 'time.time', 'time.time', ([], {}), '()\n', (2802, 2804), False, 'import time\n'), ((3050, 3061), 'time.time', 'time.time', ([], {}), '()\n', (3059, 3061), False, 'import time\n'), ((3248, 3259), 'time.time', 'time.time', ([], {}), '()\n', (3257, 3259), False, 'import time\n')] |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# Licensed under CC BY-NC-SA 4.0 (Attribution-NonCommercial-ShareAlike 4.0 International) (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
#
# The code is released for academic research use only. For commercial use, please contact Huawei Technologies Co., Ltd.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains content licensed by https://github.com/xinntao/BasicSR/blob/master/LICENSE/LICENSE
import glob
import sys
from collections import OrderedDict
from natsort import natsort
import options.options as option
from Measure import Measure, psnr
from imresize import imresize
from models import create_model
import torch
from utils.util import opt_get
import numpy as np
import pandas as pd
import os
import cv2
from utils import util
def fiFindByWildcard(wildcard):
return natsort.natsorted(glob.glob(wildcard, recursive=True))
def load_model(conf_path):
opt = option.parse(conf_path, is_train=False)
opt['gpu_ids'] = None
opt = option.dict_to_nonedict(opt)
model = create_model(opt)
model_path = opt_get(opt, ['model_path'], None)
model_path = '/mnt/HDD3_coursework/srdualglow/SRFlow/experiments/train/models/163001_G.pth'
model.load_sj(load_path=model_path) # network=model.netG)
# model = model.to('cuda:2')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
network = torch.nn.DataParallel(model)
network.to(device)
return model, opt
def predict(model, lr):
model.feed_data({"LQ": t(lr)}, need_GT=False)
model.test()
visuals = model.get_current_visuals(need_GT=False)
return visuals.get('rlt', visuals.get("SR"))
def t(array): return torch.Tensor(np.expand_dims(array.transpose([2, 0, 1]), axis=0).astype(np.float32)) / 255
def rgb(t): return (
np.clip((t[0] if len(t.shape) == 4 else t).detach().cpu().numpy().transpose([1, 2, 0]), 0, 1) * 255).astype(
np.uint8)
def imread(path):
return cv2.imread(path)[:, :, [2, 1, 0]]
def imwrite(path, img):
os.makedirs(os.path.dirname(path), exist_ok=True)
cv2.imwrite(path, img[:, :, [2, 1, 0]])
def imCropCenter(img, size):
h, w, c = img.shape
h_start = max(h // 2 - size // 2, 0)
h_end = min(h_start + size, h)
w_start = max(w // 2 - size // 2, 0)
w_end = min(w_start + size, w)
return img[h_start:h_end, w_start:w_end]
def impad(img, top=0, bottom=0, left=0, right=0, color=255):
return np.pad(img, [(top, bottom), (left, right), (0, 0)], 'reflect')
def main():
import torchvision
from torchvision import transforms
# conf_path = sys.argv[1]
# conf = conf_path.split('/')[-1].replace('.yml', '')
# model, opt = load_model(conf_path)
from models.SRFlow_model import SRFlowModel
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YMAL file.')
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
opt = option.parse(args.opt, is_train=True)
opt = option.dict_to_nonedict(opt)
# conf_path = sys.argv[1]
conf_path = 'SRFlow/code/confs/SRFlow_CelebA_4X_seungjae_load_for_test.yml'
conf = conf_path.split('/')[-1].replace('.yml', '')
model = SRFlowModel(opt=opt, step=0)
def sample_data(path, batch_size, image_size):
transform = transforms.Compose(
[
transforms.Resize(image_size),
# transforms.CenterCrop(image_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
dataset = torchvision.datasets.ImageFolder(path, transform=transform)
# print(dataset)
# dataset = datasets.CelebA(root='./dataset', split='train', transform=transform, download=True)
loader = torch.utils.data.DataLoader(dataset, shuffle=False, batch_size=batch_size)
loader = iter(loader)
while True:
try:
yield next(loader)
except StopIteration:
loader = torch.utils.data.DataLoader(
dataset, shuffle=False, batch_size=batch_size, num_workers=4
)
loader = iter(loader)
yield next(loader)
dataset_lr = iter(sample_data('/mnt/HDD3_coursework/srdualglow/celeba_small_test', 1, 128 // 4))
dataset_hr = iter(sample_data('/mnt/HDD3_coursework/srdualglow/celeba_small_test', 1, 128))
dataset = torchvision.datasets.ImageFolder('/mnt/HDD3_coursework/srdualglow/celeba_small_test', transform=None)
leng = len(dataset)
this_dir = os.path.dirname(os.path.realpath(__file__))
test_dir = os.path.join(this_dir, 'results', conf)
os.makedirs(test_dir, exist_ok=True)
print(f"Out dir: {test_dir}")
measure = Measure(use_gpu=False)
fname = f'measure_full.csv'
fname_tmp = fname + "_"
path_out_measures = os.path.join(test_dir, fname_tmp)
path_out_measures_final = os.path.join(test_dir, fname)
if os.path.isfile(path_out_measures_final):
df = pd.read_csv(path_out_measures_final)
elif os.path.isfile(path_out_measures):
df = pd.read_csv(path_out_measures)
else:
df = None
scale = opt['scale']
pad_factor = 2
with torch.no_grad():
for idx_test in range(leng):
lr, _ = next(dataset_lr)
print(lr.size())
# lr = lr.cpu()
hr, _ = next(dataset_hr)
print(hr.size())
# hr = hr.cpu()
# print(lr.size(), hr.size())
# _, _, h, w = hr.size()
# to_pil = transforms.ToPILImage()
# resize = transforms.Resize((128, 128))
# resize_32 = transforms.Resize((32, 32))
# to_ten = transforms.ToTensor()
# lr = to_ten(resize_32(to_pil(lr[0]))).unsqueeze(0)
# hr = to_ten(resize(to_pil(hr[0]))).unsqueeze(0)
# print(lr.size())
heat = opt['heat'] # 0.9 default
if df is not None and len(df[(df['heat'] == heat) & (df['name'] == idx_test)]) == 1:
continue
model.feed_data({'LQ' : lr, 'GT' : hr})
model.test()
visuals = model.get_current_visuals()
'''
for heat in model.heats:
for i in range(model.n_sample):
sr_img = util.tensor2img(visuals['SR', heat, i]) # uint8 int(heat * 100), i))
util.save_img(sr_img, f"{test_dir}/sr_{str(idx_test).zfill(6)}_{int(heat * 100)}_{i}.png")
'''
# resize_rev = transforms.Resize((h, w))
visuals_tmp = visuals['SR', heat, 0]
# visuals_tmp = to_ten(resize_rev(to_pil(visuals['SR', heat, 0]))).unsqueeze(0)
sr_img = util.tensor2img(visuals_tmp)
# sr_img = to_ten(resize_rev(to_pil(sr_img[0]))).unsqueeze(0)
# sr_t = model.get_sr(lq=lr, heat=heat)
path_out_sr = os.path.join(test_dir, "{:0.2f}_{:06d}.png".format(heat, idx_test))
# "{:0.2f}".format(heat).replace('.', ''),
# print(path_out_sr)
if idx_test % (leng // 50) == 0:
print(f'{idx_test} / {leng}')
util.save_img(sr_img, path_out_sr)
hr_img = util.tensor2img(hr)
meas = OrderedDict(conf=conf, heat=heat, name=idx_test)
meas['PSNR'], meas['SSIM'], meas['LPIPS'] = measure.measure(sr_img, hr_img)
# lr_reconstruct_rgb = imresize(sr, 1 / opt['scale'])
# meas['LRC PSNR'] = psnr(lr, lr_reconstruct_rgb)
str_out = format_measurements(meas)
print(str_out)
df = pd.DataFrame([meas]) if df is None else pd.concat([pd.DataFrame([meas]), df])
df.to_csv(path_out_measures + "_", index=False)
os.rename(path_out_measures + "_", path_out_measures)
df.to_csv(path_out_measures, index=False)
os.rename(path_out_measures, path_out_measures_final)
str_out = format_measurements(df.mean())
print(f"Results in: {path_out_measures_final}")
print('Mean: ' + str_out)
def format_measurements(meas):
s_out = []
for k, v in meas.items():
v = f"{v:0.2f}" if isinstance(v, float) else v
s_out.append(f"{k}: {v}")
str_out = ", ".join(s_out)
return str_out
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"models.create_model",
"torch.cuda.device_count",
"os.path.isfile",
"glob.glob",
"models.SRFlow_model.SRFlowModel",
"torch.no_grad",
"os.path.join",
"numpy.pad",
"pandas.DataFrame",
"torch.utils.data.DataLoader",
"cv2.imwrite",
"utils.util.opt_... | [((1366, 1405), 'options.options.parse', 'option.parse', (['conf_path'], {'is_train': '(False)'}), '(conf_path, is_train=False)\n', (1378, 1405), True, 'import options.options as option\n'), ((1442, 1470), 'options.options.dict_to_nonedict', 'option.dict_to_nonedict', (['opt'], {}), '(opt)\n', (1465, 1470), True, 'import options.options as option\n'), ((1483, 1500), 'models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (1495, 1500), False, 'from models import create_model\n'), ((1519, 1553), 'utils.util.opt_get', 'opt_get', (['opt', "['model_path']", 'None'], {}), "(opt, ['model_path'], None)\n", (1526, 1553), False, 'from utils.util import opt_get\n'), ((2627, 2666), 'cv2.imwrite', 'cv2.imwrite', (['path', 'img[:, :, [2, 1, 0]]'], {}), '(path, img[:, :, [2, 1, 0]])\n', (2638, 2666), False, 'import cv2\n'), ((2996, 3058), 'numpy.pad', 'np.pad', (['img', '[(top, bottom), (left, right), (0, 0)]', '"""reflect"""'], {}), "(img, [(top, bottom), (left, right), (0, 0)], 'reflect')\n", (3002, 3058), True, 'import numpy as np\n'), ((3347, 3372), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3370, 3372), False, 'import argparse\n'), ((3679, 3716), 'options.options.parse', 'option.parse', (['args.opt'], {'is_train': '(True)'}), '(args.opt, is_train=True)\n', (3691, 3716), True, 'import options.options as option\n'), ((3727, 3755), 'options.options.dict_to_nonedict', 'option.dict_to_nonedict', (['opt'], {}), '(opt)\n', (3750, 3755), True, 'import options.options as option\n'), ((3936, 3964), 'models.SRFlow_model.SRFlowModel', 'SRFlowModel', ([], {'opt': 'opt', 'step': '(0)'}), '(opt=opt, step=0)\n', (3947, 3964), False, 'from models.SRFlow_model import SRFlowModel\n'), ((5165, 5271), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (['"""/mnt/HDD3_coursework/srdualglow/celeba_small_test"""'], {'transform': 'None'}), "(\n '/mnt/HDD3_coursework/srdualglow/celeba_small_test', transform=None)\n", (5197, 5271), False, 'import torchvision\n'), ((5366, 5405), 'os.path.join', 'os.path.join', (['this_dir', '"""results"""', 'conf'], {}), "(this_dir, 'results', conf)\n", (5378, 5405), False, 'import os\n'), ((5410, 5446), 'os.makedirs', 'os.makedirs', (['test_dir'], {'exist_ok': '(True)'}), '(test_dir, exist_ok=True)\n', (5421, 5446), False, 'import os\n'), ((5496, 5518), 'Measure.Measure', 'Measure', ([], {'use_gpu': '(False)'}), '(use_gpu=False)\n', (5503, 5518), False, 'from Measure import Measure, psnr\n'), ((5604, 5637), 'os.path.join', 'os.path.join', (['test_dir', 'fname_tmp'], {}), '(test_dir, fname_tmp)\n', (5616, 5637), False, 'import os\n'), ((5668, 5697), 'os.path.join', 'os.path.join', (['test_dir', 'fname'], {}), '(test_dir, fname)\n', (5680, 5697), False, 'import os\n'), ((5706, 5745), 'os.path.isfile', 'os.path.isfile', (['path_out_measures_final'], {}), '(path_out_measures_final)\n', (5720, 5745), False, 'import os\n'), ((1290, 1325), 'glob.glob', 'glob.glob', (['wildcard'], {'recursive': '(True)'}), '(wildcard, recursive=True)\n', (1299, 1325), False, 'import glob\n'), ((1827, 1852), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1850, 1852), False, 'import torch\n'), ((1939, 1967), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1960, 1967), False, 'import torch\n'), ((2509, 2525), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2519, 2525), False, 'import cv2\n'), ((2585, 2606), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2600, 2606), False, 'import os\n'), ((4306, 4365), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (['path'], {'transform': 'transform'}), '(path, transform=transform)\n', (4338, 4365), False, 'import torchvision\n'), ((4513, 4587), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(False)', 'batch_size': 'batch_size'}), '(dataset, shuffle=False, batch_size=batch_size)\n', (4540, 4587), False, 'import torch\n'), ((5323, 5349), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5339, 5349), False, 'import os\n'), ((5760, 5796), 'pandas.read_csv', 'pd.read_csv', (['path_out_measures_final'], {}), '(path_out_measures_final)\n', (5771, 5796), True, 'import pandas as pd\n'), ((5806, 5839), 'os.path.isfile', 'os.path.isfile', (['path_out_measures'], {}), '(path_out_measures)\n', (5820, 5839), False, 'import os\n'), ((5968, 5983), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5981, 5983), False, 'import torch\n'), ((8778, 8831), 'os.rename', 'os.rename', (['path_out_measures', 'path_out_measures_final'], {}), '(path_out_measures, path_out_measures_final)\n', (8787, 8831), False, 'import os\n'), ((1782, 1807), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1805, 1807), False, 'import torch\n'), ((1885, 1910), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1908, 1910), False, 'import torch\n'), ((5854, 5884), 'pandas.read_csv', 'pd.read_csv', (['path_out_measures'], {}), '(path_out_measures)\n', (5865, 5884), True, 'import pandas as pd\n'), ((7575, 7603), 'utils.util.tensor2img', 'util.tensor2img', (['visuals_tmp'], {}), '(visuals_tmp)\n', (7590, 7603), False, 'from utils import util\n'), ((8033, 8067), 'utils.util.save_img', 'util.save_img', (['sr_img', 'path_out_sr'], {}), '(sr_img, path_out_sr)\n', (8046, 8067), False, 'from utils import util\n'), ((8102, 8121), 'utils.util.tensor2img', 'util.tensor2img', (['hr'], {}), '(hr)\n', (8117, 8121), False, 'from utils import util\n'), ((8142, 8190), 'collections.OrderedDict', 'OrderedDict', ([], {'conf': 'conf', 'heat': 'heat', 'name': 'idx_test'}), '(conf=conf, heat=heat, name=idx_test)\n', (8153, 8190), False, 'from collections import OrderedDict\n'), ((8652, 8705), 'os.rename', 'os.rename', (["(path_out_measures + '_')", 'path_out_measures'], {}), "(path_out_measures + '_', path_out_measures)\n", (8661, 8705), False, 'import os\n'), ((4087, 4116), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_size'], {}), '(image_size)\n', (4104, 4116), False, 'from torchvision import transforms\n'), ((4240, 4261), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4259, 4261), False, 'from torchvision import transforms\n'), ((8501, 8521), 'pandas.DataFrame', 'pd.DataFrame', (['[meas]'], {}), '([meas])\n', (8513, 8521), True, 'import pandas as pd\n'), ((4751, 4844), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(False)', 'batch_size': 'batch_size', 'num_workers': '(4)'}), '(dataset, shuffle=False, batch_size=batch_size,\n num_workers=4)\n', (4778, 4844), False, 'import torch\n'), ((8552, 8572), 'pandas.DataFrame', 'pd.DataFrame', (['[meas]'], {}), '([meas])\n', (8564, 8572), True, 'import pandas as pd\n')] |
import functools
from pathlib import Path
from typing import List, Optional, Sequence, Tuple
import dask
import numpy
import scipy
from arbol.arbol import aprint, asection
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from dexp.datasets import BaseDataset
from dexp.optics.psf.standard_psfs import nikon16x08na, olympus20x10na
from dexp.processing.deconvolution import (
admm_deconvolution,
lucy_richardson_deconvolution,
)
from dexp.processing.filters.fft_convolve import fft_convolve
from dexp.processing.utils.scatter_gather_i2i import scatter_gather_i2i
from dexp.utils.backends import Backend, BestBackend
from dexp.utils.slicing import slice_from_shape
def dataset_deconv(
dataset: BaseDataset,
dest_path: str,
channels: Sequence[str],
slicing,
store: str = "dir",
compression: str = "zstd",
compression_level: int = 3,
overwrite: bool = False,
tilesize: Optional[Tuple[int]] = None,
method: str = "lr",
num_iterations: int = 16,
max_correction: int = 16,
power: float = 1,
blind_spot: int = 0,
back_projection: Optional[str] = None,
wb_order: int = 5,
psf_objective: str = "nikon16x08na",
psf_na: float = 0.8,
psf_dxy: float = 0.485,
psf_dz: float = 2,
psf_xy_size: int = 17,
psf_z_size: int = 17,
psf_show: bool = False,
scaling: Optional[Tuple[float]] = None,
workers: int = 1,
workersbackend: str = "",
devices: Optional[List[int]] = None,
check: bool = True,
stop_at_exception: bool = True,
):
from dexp.datasets import ZDataset
mode = "w" + ("" if overwrite else "-")
dest_dataset = ZDataset(dest_path, mode, store, parent=dataset)
# Default tile size:
if tilesize is None:
tilesize = 320 # very conservative
# Scaling default value:
if scaling is None:
scaling = (1, 1, 1)
sz, sy, sx = scaling
aprint(f"Input images will be scaled by: (sz,sy,sx)={scaling}")
# CUDA DASK cluster
cluster = LocalCUDACluster(CUDA_VISIBLE_DEVICES=devices)
client = Client(cluster)
aprint("Dask Client", client)
lazy_computation = []
for channel in dataset._selected_channels(channels):
array = dataset.get_array(channel)
aprint(f"Slicing with: {slicing}")
out_shape, volume_slicing, time_points = slice_from_shape(array.shape, slicing)
out_shape = tuple(int(round(u * v)) for u, v in zip(out_shape, (1,) + scaling))
dtype = numpy.float16 if method == "admm" else array.dtype
# Adds destination array channel to dataset
dest_array = dest_dataset.add_channel(
name=channel, shape=out_shape, dtype=dtype, codec=compression, clevel=compression_level
)
# This is not ideal but difficult to avoid right now:
sxy = (sx + sy) / 2
# PSF paraneters:
psf_kwargs = {
"dxy": psf_dxy / sxy,
"dz": psf_dz / sz,
"xy_size": int(round(psf_xy_size * sxy)),
"z_size": int(round(psf_z_size * sz)),
}
aprint(f"psf_kwargs: {psf_kwargs}")
# NA override:
if psf_na is not None:
aprint(f"Numerical aperture overridden to a value of: {psf_na}")
psf_kwargs["NA"] = psf_na
# choose psf from detection optics:
if psf_objective == "nikon16x08na":
psf_kernel = nikon16x08na(**psf_kwargs)
elif psf_objective == "olympus20x10na":
psf_kernel = olympus20x10na(**psf_kwargs)
elif Path(psf_objective).exists():
psf_kernel = numpy.load(psf_objective)
if sz != 1.0 or sy != 1.0 or sx != 1.0:
psf_kernel = scipy.ndimage.zoom(psf_kernel, zoom=(sz, sy, sx), order=1)
psf_z_size = psf_kernel.shape[0] + 10
psf_xy_size = max(psf_kernel.shape[1:]) + 10
else:
raise RuntimeError(f"Object/path {psf_objective} not found.")
# usefull for debugging:
if psf_show:
import napari
viewer = napari.Viewer(title="DEXP | viewing PSF with napari", ndisplay=3)
viewer.add_image(psf_kernel)
napari.run()
margins = max(psf_xy_size, psf_z_size)
if method == "lr":
normalize = False
convolve = functools.partial(fft_convolve, in_place=False, mode="reflect", internal_dtype=numpy.float32)
def deconv(image):
min_value = image.min()
max_value = image.max()
return lucy_richardson_deconvolution(
image=image,
psf=psf_kernel,
num_iterations=num_iterations,
max_correction=max_correction,
normalise_minmax=(min_value, max_value),
power=power,
blind_spot=blind_spot,
blind_spot_mode="median+uniform",
blind_spot_axis_exclusion=(0,),
wb_order=wb_order,
back_projection=back_projection,
convolve_method=convolve,
)
elif method == "admm":
normalize = True
def deconv(image):
out = admm_deconvolution(
image,
psf=psf_kernel,
iterations=num_iterations,
derivative=2,
)
return out
else:
raise ValueError(f"Unknown deconvolution mode: {method}")
@dask.delayed
def process(i):
tp = time_points[i]
try:
with asection(f"Deconvolving time point for time point {i}/{len(time_points)}"):
with asection(f"Loading channel: {channel}"):
tp_array = numpy.asarray(array[tp][volume_slicing])
with BestBackend(exclusive=True, enable_unified_memory=True):
if sz != 1.0 or sy != 1.0 or sx != 1.0:
with asection(f"Applying scaling {(sz, sy, sx)} to image."):
sp = Backend.get_sp_module()
tp_array = Backend.to_backend(tp_array)
tp_array = sp.ndimage.zoom(tp_array, zoom=(sz, sy, sx), order=1)
tp_array = Backend.to_numpy(tp_array)
with asection(
f"Deconvolving image of shape: {tp_array.shape}, with tile size: {tilesize}, "
+ "margins: {margins} "
):
aprint(f"Number of iterations: {num_iterations}, back_projection:{back_projection}, ")
tp_array = scatter_gather_i2i(
deconv,
tp_array,
tiles=tilesize,
margins=margins,
normalise=normalize,
internal_dtype=dtype,
)
with asection("Moving array from backend to numpy."):
tp_array = Backend.to_numpy(tp_array, dtype=dest_array.dtype, force_copy=False)
with asection(
f"Saving deconvolved stack for time point {i}, shape:{tp_array.shape}, dtype:{array.dtype}"
):
dest_dataset.write_stack(channel=channel, time_point=i, stack_array=tp_array)
aprint(f"Done processing time point: {i}/{len(time_points)} .")
except Exception as error:
aprint(error)
aprint(f"Error occurred while processing time point {i} !")
import traceback
traceback.print_exc()
if stop_at_exception:
raise error
for i in range(len(time_points)):
lazy_computation.append(process(i))
dask.compute(*lazy_computation)
# Dataset info:
aprint(dest_dataset.info())
# Check dataset integrity:
if check:
dest_dataset.check_integrity()
# close destination dataset:
dest_dataset.close()
client.close()
| [
"numpy.load",
"dask_cuda.LocalCUDACluster",
"pathlib.Path",
"dexp.datasets.ZDataset",
"dexp.utils.backends.Backend.to_backend",
"napari.Viewer",
"dexp.processing.deconvolution.admm_deconvolution",
"dask.distributed.Client",
"traceback.print_exc",
"scipy.ndimage.zoom",
"dexp.processing.utils.scat... | [((1666, 1714), 'dexp.datasets.ZDataset', 'ZDataset', (['dest_path', 'mode', 'store'], {'parent': 'dataset'}), '(dest_path, mode, store, parent=dataset)\n', (1674, 1714), False, 'from dexp.datasets import ZDataset\n'), ((1921, 1984), 'arbol.arbol.aprint', 'aprint', (['f"""Input images will be scaled by: (sz,sy,sx)={scaling}"""'], {}), "(f'Input images will be scaled by: (sz,sy,sx)={scaling}')\n", (1927, 1984), False, 'from arbol.arbol import aprint, asection\n'), ((2024, 2070), 'dask_cuda.LocalCUDACluster', 'LocalCUDACluster', ([], {'CUDA_VISIBLE_DEVICES': 'devices'}), '(CUDA_VISIBLE_DEVICES=devices)\n', (2040, 2070), False, 'from dask_cuda import LocalCUDACluster\n'), ((2084, 2099), 'dask.distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (2090, 2099), False, 'from dask.distributed import Client\n'), ((2104, 2133), 'arbol.arbol.aprint', 'aprint', (['"""Dask Client"""', 'client'], {}), "('Dask Client', client)\n", (2110, 2133), False, 'from arbol.arbol import aprint, asection\n'), ((8094, 8125), 'dask.compute', 'dask.compute', (['*lazy_computation'], {}), '(*lazy_computation)\n', (8106, 8125), False, 'import dask\n'), ((2271, 2305), 'arbol.arbol.aprint', 'aprint', (['f"""Slicing with: {slicing}"""'], {}), "(f'Slicing with: {slicing}')\n", (2277, 2305), False, 'from arbol.arbol import aprint, asection\n'), ((2355, 2393), 'dexp.utils.slicing.slice_from_shape', 'slice_from_shape', (['array.shape', 'slicing'], {}), '(array.shape, slicing)\n', (2371, 2393), False, 'from dexp.utils.slicing import slice_from_shape\n'), ((3090, 3125), 'arbol.arbol.aprint', 'aprint', (['f"""psf_kwargs: {psf_kwargs}"""'], {}), "(f'psf_kwargs: {psf_kwargs}')\n", (3096, 3125), False, 'from arbol.arbol import aprint, asection\n'), ((3193, 3257), 'arbol.arbol.aprint', 'aprint', (['f"""Numerical aperture overridden to a value of: {psf_na}"""'], {}), "(f'Numerical aperture overridden to a value of: {psf_na}')\n", (3199, 3257), False, 'from arbol.arbol import aprint, asection\n'), ((3410, 3436), 'dexp.optics.psf.standard_psfs.nikon16x08na', 'nikon16x08na', ([], {}), '(**psf_kwargs)\n', (3422, 3436), False, 'from dexp.optics.psf.standard_psfs import nikon16x08na, olympus20x10na\n'), ((4071, 4136), 'napari.Viewer', 'napari.Viewer', ([], {'title': '"""DEXP | viewing PSF with napari"""', 'ndisplay': '(3)'}), "(title='DEXP | viewing PSF with napari', ndisplay=3)\n", (4084, 4136), False, 'import napari\n'), ((4190, 4202), 'napari.run', 'napari.run', ([], {}), '()\n', (4200, 4202), False, 'import napari\n'), ((4332, 4429), 'functools.partial', 'functools.partial', (['fft_convolve'], {'in_place': '(False)', 'mode': '"""reflect"""', 'internal_dtype': 'numpy.float32'}), "(fft_convolve, in_place=False, mode='reflect',\n internal_dtype=numpy.float32)\n", (4349, 4429), False, 'import functools\n'), ((3510, 3538), 'dexp.optics.psf.standard_psfs.olympus20x10na', 'olympus20x10na', ([], {}), '(**psf_kwargs)\n', (3524, 3538), False, 'from dexp.optics.psf.standard_psfs import nikon16x08na, olympus20x10na\n'), ((4562, 4926), 'dexp.processing.deconvolution.lucy_richardson_deconvolution', 'lucy_richardson_deconvolution', ([], {'image': 'image', 'psf': 'psf_kernel', 'num_iterations': 'num_iterations', 'max_correction': 'max_correction', 'normalise_minmax': '(min_value, max_value)', 'power': 'power', 'blind_spot': 'blind_spot', 'blind_spot_mode': '"""median+uniform"""', 'blind_spot_axis_exclusion': '(0,)', 'wb_order': 'wb_order', 'back_projection': 'back_projection', 'convolve_method': 'convolve'}), "(image=image, psf=psf_kernel, num_iterations=\n num_iterations, max_correction=max_correction, normalise_minmax=(\n min_value, max_value), power=power, blind_spot=blind_spot,\n blind_spot_mode='median+uniform', blind_spot_axis_exclusion=(0,),\n wb_order=wb_order, back_projection=back_projection, convolve_method=\n convolve)\n", (4591, 4926), False, 'from dexp.processing.deconvolution import admm_deconvolution, lucy_richardson_deconvolution\n'), ((3607, 3632), 'numpy.load', 'numpy.load', (['psf_objective'], {}), '(psf_objective)\n', (3617, 3632), False, 'import numpy\n'), ((5278, 5364), 'dexp.processing.deconvolution.admm_deconvolution', 'admm_deconvolution', (['image'], {'psf': 'psf_kernel', 'iterations': 'num_iterations', 'derivative': '(2)'}), '(image, psf=psf_kernel, iterations=num_iterations,\n derivative=2)\n', (5296, 5364), False, 'from dexp.processing.deconvolution import admm_deconvolution, lucy_richardson_deconvolution\n'), ((7765, 7778), 'arbol.arbol.aprint', 'aprint', (['error'], {}), '(error)\n', (7771, 7778), False, 'from arbol.arbol import aprint, asection\n'), ((7795, 7854), 'arbol.arbol.aprint', 'aprint', (['f"""Error occurred while processing time point {i} !"""'], {}), "(f'Error occurred while processing time point {i} !')\n", (7801, 7854), False, 'from arbol.arbol import aprint, asection\n'), ((7905, 7926), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7924, 7926), False, 'import traceback\n'), ((3552, 3571), 'pathlib.Path', 'Path', (['psf_objective'], {}), '(psf_objective)\n', (3556, 3571), False, 'from pathlib import Path\n'), ((3714, 3772), 'scipy.ndimage.zoom', 'scipy.ndimage.zoom', (['psf_kernel'], {'zoom': '(sz, sy, sx)', 'order': '(1)'}), '(psf_kernel, zoom=(sz, sy, sx), order=1)\n', (3732, 3772), False, 'import scipy\n'), ((5790, 5829), 'arbol.arbol.asection', 'asection', (['f"""Loading channel: {channel}"""'], {}), "(f'Loading channel: {channel}')\n", (5798, 5829), False, 'from arbol.arbol import aprint, asection\n'), ((5866, 5906), 'numpy.asarray', 'numpy.asarray', (['array[tp][volume_slicing]'], {}), '(array[tp][volume_slicing])\n', (5879, 5906), False, 'import numpy\n'), ((5933, 5988), 'dexp.utils.backends.BestBackend', 'BestBackend', ([], {'exclusive': '(True)', 'enable_unified_memory': '(True)'}), '(exclusive=True, enable_unified_memory=True)\n', (5944, 5988), False, 'from dexp.utils.backends import Backend, BestBackend\n'), ((7373, 7484), 'arbol.arbol.asection', 'asection', (['f"""Saving deconvolved stack for time point {i}, shape:{tp_array.shape}, dtype:{array.dtype}"""'], {}), "(\n f'Saving deconvolved stack for time point {i}, shape:{tp_array.shape}, dtype:{array.dtype}'\n )\n", (7381, 7484), False, 'from arbol.arbol import aprint, asection\n'), ((6474, 6596), 'arbol.arbol.asection', 'asection', (["(f'Deconvolving image of shape: {tp_array.shape}, with tile size: {tilesize}, '\n + 'margins: {margins} ')"], {}), "(\n f'Deconvolving image of shape: {tp_array.shape}, with tile size: {tilesize}, '\n + 'margins: {margins} ')\n", (6482, 6596), False, 'from arbol.arbol import aprint, asection\n'), ((6698, 6794), 'arbol.arbol.aprint', 'aprint', (['f"""Number of iterations: {num_iterations}, back_projection:{back_projection}, """'], {}), "(\n f'Number of iterations: {num_iterations}, back_projection:{back_projection}, '\n )\n", (6704, 6794), False, 'from arbol.arbol import aprint, asection\n'), ((6824, 6940), 'dexp.processing.utils.scatter_gather_i2i.scatter_gather_i2i', 'scatter_gather_i2i', (['deconv', 'tp_array'], {'tiles': 'tilesize', 'margins': 'margins', 'normalise': 'normalize', 'internal_dtype': 'dtype'}), '(deconv, tp_array, tiles=tilesize, margins=margins,\n normalise=normalize, internal_dtype=dtype)\n', (6842, 6940), False, 'from dexp.processing.utils.scatter_gather_i2i import scatter_gather_i2i\n'), ((7190, 7237), 'arbol.arbol.asection', 'asection', (['"""Moving array from backend to numpy."""'], {}), "('Moving array from backend to numpy.')\n", (7198, 7237), False, 'from arbol.arbol import aprint, asection\n'), ((7278, 7346), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['tp_array'], {'dtype': 'dest_array.dtype', 'force_copy': '(False)'}), '(tp_array, dtype=dest_array.dtype, force_copy=False)\n', (7294, 7346), False, 'from dexp.utils.backends import Backend, BestBackend\n'), ((6088, 6140), 'arbol.arbol.asection', 'asection', (['f"""Applying scaling {sz, sy, sx} to image."""'], {}), "(f'Applying scaling {sz, sy, sx} to image.')\n", (6096, 6140), False, 'from arbol.arbol import aprint, asection\n'), ((6181, 6204), 'dexp.utils.backends.Backend.get_sp_module', 'Backend.get_sp_module', ([], {}), '()\n', (6202, 6204), False, 'from dexp.utils.backends import Backend, BestBackend\n'), ((6248, 6276), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['tp_array'], {}), '(tp_array)\n', (6266, 6276), False, 'from dexp.utils.backends import Backend, BestBackend\n'), ((6417, 6443), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['tp_array'], {}), '(tp_array)\n', (6433, 6443), False, 'from dexp.utils.backends import Backend, BestBackend\n')] |
import os
import xarray as xr
server = "ftp.star.nesdis.noaa.gov"
base_dir = "/pub/smcd/jhuang/npp.viirs.aerosol.data/edraot550/"
def open_dataset(date, resolution="high", datapath="."):
current = change_dir(datapath) # noqa: F841
# check resolution; by default 0.1 degree data is assumed
if resolution in {"high", "h"}:
# this is the 0.1 degree data
nlat = 1800
nlon = 3600
lon, lat = _get_latlons(nlat, nlon)
fname, date = download_data(date, resolution="high")
else:
nlat = 720
nlon = 1440
lon, lat = _get_latlons(nlat, nlon)
fname, date = download_data(date, resolution=0.25)
# unzip fname
fname = _unzip_file(fname)
# read the data
data = read_data(fname, lat, lon, date)
return data
def open_mfdataset(dates, resolution="high", datapath="."):
das = []
for i in dates:
das.append(open_dataset(i, resolution=resolution, datapath=datapath))
ds = xr.concat(das, dim="time")
return ds
def read_data(fname, lat, lon, date):
from numpy import float32, fromfile, nan
from pandas import to_datetime
f = fromfile(fname, dtype=float32)
nlat, nlon = lon.shape
aot = f.reshape(2, nlat, nlon)[0, :, :].reshape(1, nlat, nlon)
aot[aot < -999] = nan
datearr = to_datetime([date])
da = xr.DataArray(aot, coords=[datearr, range(nlat), range(nlon)], dims=["time", "y", "x"])
da["latitude"] = (("y", "x"), lat)
da["longitude"] = (("y", "x"), lon)
da.attrs["units"] = ""
da.name = "VIIRS EDR AOD"
da.attrs["long_name"] = "Aerosol Optical Depth"
da.attrs[
"source"
] = "ftp://ftp.star.nesdis.noaa.gov/pub/smcd/jhuang/npp.viirs.aerosol.data/edraot550"
return da
def _unzip_file(fname):
import subprocess
subprocess.run(["gunzip", "-f", fname])
return fname[:-3]
def change_dir(to_path):
current = os.getcwd()
os.chdir(to_path)
return current
def download_data(date, resolution="high"):
import ftplib
from datetime import datetime
if isinstance(date, datetime):
year = date.strftime("%Y")
yyyymmdd = date.strftime("%Y%m%d")
else:
from pandas import Timestamp
date = Timestamp(date)
year = date.strftime("%Y")
yyyymmdd = date.strftime("%Y%m%d")
if resolution == "high":
file = f"npp_aot550_edr_gridded_0.10_{yyyymmdd}.high.bin.gz"
else:
file = f"npp_aot550_edr_gridded_0.25_{yyyymmdd}.high.bin.gz"
ftp = ftplib.FTP(server)
ftp.login()
# print(base_dir)
# print(year)
# print(base_dir + year)
ftp.cwd(base_dir + year)
# print(file)
ftp.retrbinary("RETR " + file, open(file, "wb").write)
return file, date
def _get_latlons(nlat, nlon):
from numpy import linspace, meshgrid
lon_min = -179.875
lon_max = -1 * lon_min
lat_min = -89.875
lat_max = -1.0 * lat_min
lons = linspace(lon_min, lon_max, nlon)
lats = linspace(lat_min, lat_max, nlat)
lon, lat = meshgrid(lons, lats)
return lon, lat
| [
"subprocess.run",
"numpy.meshgrid",
"pandas.Timestamp",
"numpy.fromfile",
"os.getcwd",
"xarray.concat",
"pandas.to_datetime",
"numpy.linspace",
"ftplib.FTP",
"os.chdir"
] | [((984, 1010), 'xarray.concat', 'xr.concat', (['das'], {'dim': '"""time"""'}), "(das, dim='time')\n", (993, 1010), True, 'import xarray as xr\n'), ((1154, 1184), 'numpy.fromfile', 'fromfile', (['fname'], {'dtype': 'float32'}), '(fname, dtype=float32)\n', (1162, 1184), False, 'from numpy import float32, fromfile, nan\n'), ((1319, 1338), 'pandas.to_datetime', 'to_datetime', (['[date]'], {}), '([date])\n', (1330, 1338), False, 'from pandas import to_datetime\n'), ((1811, 1850), 'subprocess.run', 'subprocess.run', (["['gunzip', '-f', fname]"], {}), "(['gunzip', '-f', fname])\n", (1825, 1850), False, 'import subprocess\n'), ((1914, 1925), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1923, 1925), False, 'import os\n'), ((1930, 1947), 'os.chdir', 'os.chdir', (['to_path'], {}), '(to_path)\n', (1938, 1947), False, 'import os\n'), ((2523, 2541), 'ftplib.FTP', 'ftplib.FTP', (['server'], {}), '(server)\n', (2533, 2541), False, 'import ftplib\n'), ((2941, 2973), 'numpy.linspace', 'linspace', (['lon_min', 'lon_max', 'nlon'], {}), '(lon_min, lon_max, nlon)\n', (2949, 2973), False, 'from numpy import linspace, meshgrid\n'), ((2985, 3017), 'numpy.linspace', 'linspace', (['lat_min', 'lat_max', 'nlat'], {}), '(lat_min, lat_max, nlat)\n', (2993, 3017), False, 'from numpy import linspace, meshgrid\n'), ((3033, 3053), 'numpy.meshgrid', 'meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (3041, 3053), False, 'from numpy import linspace, meshgrid\n'), ((2242, 2257), 'pandas.Timestamp', 'Timestamp', (['date'], {}), '(date)\n', (2251, 2257), False, 'from pandas import Timestamp\n')] |
"""
A test suite to check whether lbann.onnx can convert typical LBANN networks to ONNX networks correctly.
For each test case, this script
1. converts a given LBANN network into an ONNX network, and
2. adds dummy (zero) parameters to the converted network (if ADD_DUMMY_PARAMS is set),
3. saves the network to DUMP_DIR (if LBANN_ONNX_DUMP_MODELS is set),
4. checks shapes of prepared hidden tensors are correct.
The LBANN networks are borrowed from the LBANN model zoo.
"""
import re
import unittest
import os
import onnx
from onnx import numpy_helper
import numpy as np
import lbann.onnx.l2o
import lbann.onnx.util
import lbann.onnx.l2o.util
from lbann.onnx.util import parseBoolEnvVar, getLbannRoot
from lbann.onnx.tests.util import isModelDumpEnabled, createAndGetDumpedModelsDir
LBANN_MODEL_ROOT = "{}/model_zoo/models".format(getLbannRoot())
SAVE_ONNX = isModelDumpEnabled()
DUMP_DIR = createAndGetDumpedModelsDir()
ADD_DUMMY_PARAMS = parseBoolEnvVar("LBANN_ONNX_ADD_DUMMY_PARAMS", False)
MB_PLACEHOLDER = "MB"
class TestLbann2Onnx(unittest.TestCase):
def _test(self, model, inputShapes, testedOutputs=[]):
modelName = re.compile("^.*?/?([^/.]+).prototext$").search(model).group(1)
o, miniBatchSize = lbann.onnx.l2o.parseLbannModelPB(model, inputShapes)
if ADD_DUMMY_PARAMS:
dummyInits = []
for i in o.graph.input:
if i.name in list(map(lambda x: "{}_0".format(x), inputShapes.keys())) or \
i.name in list(map(lambda x: x.name, o.graph.initializer)):
continue
shape = lbann.onnx.util.getDimFromValueInfo(i)
dummyInits.append(numpy_helper.from_array(np.zeros(shape, dtype=lbann.onnx.ELEM_TYPE_NP),
name=i.name))
g = onnx.helper.make_graph(o.graph.node,
o.graph.name,
o.graph.input,
o.graph.output,
list(o.graph.initializer) + dummyInits,
value_info=o.graph.value_info)
o = onnx.helper.make_model(g)
if SAVE_ONNX:
onnx.save(o, os.path.join(DUMP_DIR, "{}.onnx".format(modelName)))
for nodeName, outputShape in testedOutputs:
node, = list(filter(lambda x: x.name == nodeName, o.graph.node))
outputVI, = list(filter(lambda x: x.name == node.output[0], o.graph.value_info))
outputShapeActual = lbann.onnx.util.getDimFromValueInfo(outputVI)
outputShapeWithMB = list(map(lambda x: miniBatchSize if x == MB_PLACEHOLDER else x, outputShape))
assert outputShapeWithMB == outputShapeActual, (outputShapeWithMB, outputShapeActual)
def test_l2o_mnist(self):
width = 28
classes = 10
self._test("{}/simple_mnist/model_mnist_simple_1.prototext".format(LBANN_MODEL_ROOT),
{"image": [width*width], "label": [classes]},
[("relu1", [MB_PLACEHOLDER, 500]),
("prob", [MB_PLACEHOLDER, classes])])
def test_l2o_lenet_mnist(self):
width = 28
classes = 10
self._test("{}/lenet_mnist/model_lenet_mnist.prototext".format(LBANN_MODEL_ROOT),
{"images": [1, width, width], "labels": [classes]},
[("pool2", [MB_PLACEHOLDER, 50, 4, 4]),
("prob", [MB_PLACEHOLDER, classes])])
def test_l2o_autoencoder_mnist(self):
width = 28
self._test("{}/autoencoder_mnist/model_autoencoder_mnist.prototext".format(LBANN_MODEL_ROOT),
{"image": [width*width]},
[("reconstruction", [MB_PLACEHOLDER, width*width])])
@unittest.skip("Skipped since some tensor shapes cannot be inferred.")
def test_l2o_vae_mnist(self):
width = 28
self._test("{}/autoencoder_mnist/vae_mnist.prototext".format(LBANN_MODEL_ROOT),
{"image": [width*width]},
[("reconstruction", [MB_PLACEHOLDER, width*width])])
def test_l2o_alexnet(self):
width = 224
classes = 1000
self._test("{}/alexnet/model_alexnet.prototext".format(LBANN_MODEL_ROOT),
{"image": [3, width, width], "label": [classes]},
[("relu4", [MB_PLACEHOLDER, 384, 12, 12]),
("prob", [MB_PLACEHOLDER, classes])])
def test_l2o_resnet50(self):
width = 224
classes = 1000
self._test("{}/resnet50/model_resnet50.prototext".format(LBANN_MODEL_ROOT),
{"images": [3, width, width], "labels": [classes]},
[("res4a", [MB_PLACEHOLDER, 1024, 14, 14]),
("prob", [MB_PLACEHOLDER, classes])])
def test_l2o_cosmoflow(self):
width = 128
secrets = 3
self._test("{}/cosmoflow/model_cosmoflow.prototext".format(LBANN_MODEL_ROOT),
{"DARK_MATTER": [1, width, width, width], "SECRETS_OF_THE_UNIVERSE": [secrets]},
[("act5", [MB_PLACEHOLDER, 256, 4, 4, 4]),
("drop3", [MB_PLACEHOLDER, secrets])])
@unittest.skip("This model contains a 'not' layer, which is not implemented yet.")
def test_l2o_gan_mnist_adversarial(self):
width = 28
classes = 2
self._test("{}/gan/mnist/adversarial_model.prototext".format(LBANN_MODEL_ROOT),
{"data": [width, width], "label": [classes]},
[("fc4_tanh", [1, width*width]),
("prob", [MB_PLACEHOLDER, 2])])
@unittest.skip("This model contains a 'not' layer, which is not implemented yet.")
def test_l2o_gan_mnist_discriminator(self):
width = 28
classes = 2
self._test("{}/gan/mnist/discriminator_model.prototext".format(LBANN_MODEL_ROOT),
{"data": [width, width], "label": [classes]},
[("fc4_tanh", [1, width*width]),
("prob", [MB_PLACEHOLDER, 2])])
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"lbann.onnx.util.getLbannRoot",
"onnx.helper.make_model",
"numpy.zeros",
"unittest.skip",
"lbann.onnx.tests.util.isModelDumpEnabled",
"lbann.onnx.util.parseBoolEnvVar",
"lbann.onnx.tests.util.createAndGetDumpedModelsDir",
"re.compile"
] | [((866, 886), 'lbann.onnx.tests.util.isModelDumpEnabled', 'isModelDumpEnabled', ([], {}), '()\n', (884, 886), False, 'from lbann.onnx.tests.util import isModelDumpEnabled, createAndGetDumpedModelsDir\n'), ((898, 927), 'lbann.onnx.tests.util.createAndGetDumpedModelsDir', 'createAndGetDumpedModelsDir', ([], {}), '()\n', (925, 927), False, 'from lbann.onnx.tests.util import isModelDumpEnabled, createAndGetDumpedModelsDir\n'), ((947, 1000), 'lbann.onnx.util.parseBoolEnvVar', 'parseBoolEnvVar', (['"""LBANN_ONNX_ADD_DUMMY_PARAMS"""', '(False)'], {}), "('LBANN_ONNX_ADD_DUMMY_PARAMS', False)\n", (962, 1000), False, 'from lbann.onnx.util import parseBoolEnvVar, getLbannRoot\n'), ((838, 852), 'lbann.onnx.util.getLbannRoot', 'getLbannRoot', ([], {}), '()\n', (850, 852), False, 'from lbann.onnx.util import parseBoolEnvVar, getLbannRoot\n'), ((3825, 3894), 'unittest.skip', 'unittest.skip', (['"""Skipped since some tensor shapes cannot be inferred."""'], {}), "('Skipped since some tensor shapes cannot be inferred.')\n", (3838, 3894), False, 'import unittest\n'), ((5241, 5327), 'unittest.skip', 'unittest.skip', (['"""This model contains a \'not\' layer, which is not implemented yet."""'], {}), '(\n "This model contains a \'not\' layer, which is not implemented yet.")\n', (5254, 5327), False, 'import unittest\n'), ((5671, 5757), 'unittest.skip', 'unittest.skip', (['"""This model contains a \'not\' layer, which is not implemented yet."""'], {}), '(\n "This model contains a \'not\' layer, which is not implemented yet.")\n', (5684, 5757), False, 'import unittest\n'), ((6131, 6146), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6144, 6146), False, 'import unittest\n'), ((2205, 2230), 'onnx.helper.make_model', 'onnx.helper.make_model', (['g'], {}), '(g)\n', (2227, 2230), False, 'import onnx\n'), ((1145, 1184), 're.compile', 're.compile', (['"""^.*?/?([^/.]+).prototext$"""'], {}), "('^.*?/?([^/.]+).prototext$')\n", (1155, 1184), False, 'import re\n'), ((1704, 1750), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'lbann.onnx.ELEM_TYPE_NP'}), '(shape, dtype=lbann.onnx.ELEM_TYPE_NP)\n', (1712, 1750), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from trainer import Trainer
from config import get_config
from data_loader import get_loader
from utils import prepare_dirs_and_logger, save_config
def main(config):
prepare_dirs_and_logger(config)
rng = np.random.RandomState(config.random_seed)
tf.set_random_seed(config.random_seed)
if config.is_train:
data_path = config.data_path
batch_size = config.batch_size
do_shuffle = True
else:
setattr(config, 'batch_size', 64)
if config.test_data_path is None:
data_path = config.data_path
else:
data_path = config.test_data_path
batch_size = config.sample_per_image
do_shuffle = False
data_loader = get_loader(
data_path, config.batch_size, config.input_scale_size,
config.data_format, config.split, is_square=config.is_square)
one_data_loader = get_loader(
data_path, 1, config.input_scale_size,
config.data_format, config.split, is_square=config.is_square)
trainer = Trainer(config, data_loader)
one_trainer = Trainer(config, one_data_loader)
if config.is_train:
save_config(config)
trainer.train()
else:
if not config.load_path:
raise Exception("[!] You should specify `load_path` to load a pretrained model")
print("MEEE load_path check error: " + config.load_path)
# trainer.test()
# trainer.generate_interpolation_G()
# trainer.interpolate_one_G()
# trainer.interpolate_many_G(30)
# one_trainer.random_interpolate_D()
trainer.random_interpolate_D()
if __name__ == "__main__":
config, unparsed = get_config()
main(config)
| [
"utils.prepare_dirs_and_logger",
"numpy.random.RandomState",
"tensorflow.set_random_seed",
"utils.save_config",
"config.get_config",
"data_loader.get_loader",
"trainer.Trainer"
] | [((215, 246), 'utils.prepare_dirs_and_logger', 'prepare_dirs_and_logger', (['config'], {}), '(config)\n', (238, 246), False, 'from utils import prepare_dirs_and_logger, save_config\n'), ((258, 299), 'numpy.random.RandomState', 'np.random.RandomState', (['config.random_seed'], {}), '(config.random_seed)\n', (279, 299), True, 'import numpy as np\n'), ((304, 342), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['config.random_seed'], {}), '(config.random_seed)\n', (322, 342), True, 'import tensorflow as tf\n'), ((756, 888), 'data_loader.get_loader', 'get_loader', (['data_path', 'config.batch_size', 'config.input_scale_size', 'config.data_format', 'config.split'], {'is_square': 'config.is_square'}), '(data_path, config.batch_size, config.input_scale_size, config.\n data_format, config.split, is_square=config.is_square)\n', (766, 888), False, 'from data_loader import get_loader\n'), ((932, 1047), 'data_loader.get_loader', 'get_loader', (['data_path', '(1)', 'config.input_scale_size', 'config.data_format', 'config.split'], {'is_square': 'config.is_square'}), '(data_path, 1, config.input_scale_size, config.data_format,\n config.split, is_square=config.is_square)\n', (942, 1047), False, 'from data_loader import get_loader\n'), ((1084, 1112), 'trainer.Trainer', 'Trainer', (['config', 'data_loader'], {}), '(config, data_loader)\n', (1091, 1112), False, 'from trainer import Trainer\n'), ((1131, 1163), 'trainer.Trainer', 'Trainer', (['config', 'one_data_loader'], {}), '(config, one_data_loader)\n', (1138, 1163), False, 'from trainer import Trainer\n'), ((1726, 1738), 'config.get_config', 'get_config', ([], {}), '()\n', (1736, 1738), False, 'from config import get_config\n'), ((1197, 1216), 'utils.save_config', 'save_config', (['config'], {}), '(config)\n', (1208, 1216), False, 'from utils import prepare_dirs_and_logger, save_config\n')] |
import os, sys, inspect, time, json, yaml
import redis
import numpy as np
import tensorflow as tf
from keras.applications import imagenet_utils
# sys.path.insert(1, os.path.join(sys.path[0], '../..')) # insert mlpipe root to path
mlpipe_root = os.path.abspath("../..")
sys.path.insert(0, mlpipe_root)
from config.clistyle import bcolor
from keras.models import model_from_json
from servers.helpers.helperfunctions import base64_decoding, NumpyEncoder
with open(mlpipe_root + "/config/settings.yaml", 'r') as stream:
try:
settings = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
rdb = redis.StrictRedis(
host=settings['redis']['host'],
port=settings['redis']['port'],
db=settings['redis']['db']
)
def get_paths(root_path):
model_dir = root_path + settings['model']['pathdir']
graph_file = settings['model']['graph_file']
weights_file = settings['model']['weights_file']
graph = model_dir + graph_file
weights = model_dir + weights_file
return graph, graph_file, weights, weights_file
def load_model(model_file_path, weights_file_path, graph_file, weights_file):
global model
with open("{}".format(model_file_path), 'r') as model_json_file:
loaded_model_json = model_json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("{}".format(weights_file_path))
global graph
graph = tf.get_default_graph()
print(bcolor.BOLD + "Loaded model '{}' from disk and inserted weights from '{}'.".format(graph_file, weights_file) + bcolor.END)
return loaded_model
def classify_process():
graph_path, graph_file, weights_path, weights_file = get_paths(mlpipe_root)
model = load_model(graph_path, weights_path, graph_file, weights_file)
while True:
queue = rdb.lrange(
settings['data_stream']['data_queue'], 0, settings['data_stream']['batch_size'] - 1)
dataIDs = []
batch = None
for q in queue:
q = q.decode("utf-8").replace("\'", "\"")
q = json.loads(q)
data = base64_decoding(q["data"], q["dtype"], q["shape"])
print("QSHAPE: ", q['shape'], q["filetype"])
if batch is None:
batch = data
else:
batch = np.vstack([batch, data]) # if already data in queue add a new layer
dataIDs.append(q["id"])
# Check if it fits in batch and processing is needed
if len(dataIDs) > 0:
print("Batch size: {}".format(batch.shape))
with graph.as_default():
predictions = model.predict(batch)
# print("PREDICITONS: ", predictions, type(predictions))
# This if statement possibly move out of model server, since imagenet specific
# if (q["filetype"] in ['jpg', 'jpeg', 'png']):
# predictions = imagenet_utils.decode_predictions(predictions)
# else:
# pass
# print("PREDICTIONS: ", predictions)
for (dataID, prediction) in zip(dataIDs, predictions):
output = []
# for prediction in predictionSet:
# print("PREDICTION: ", prediction, type(predictions))
r = {"result": prediction} # float() modify prediction as non-array so it can be stored to redis db
output.append(r)
output.append({
"input": {
"uid": dataID,
"filename": q["filename"],
"filetype": q["filetype"],
"dtype": q["dtype"],
"shape": batch.shape
}
})
rdb.set(dataID, json.dumps(output, cls=NumpyEncoder))
rdb.ltrim(settings['data_stream']['data_queue'], len(dataIDs), -1)
time.sleep(settings['data_stream']['server_sleep'])
if __name__ == "__main__":
classify_process()
| [
"os.path.abspath",
"yaml.load",
"json.loads",
"sys.path.insert",
"json.dumps",
"time.sleep",
"keras.models.model_from_json",
"servers.helpers.helperfunctions.base64_decoding",
"redis.StrictRedis",
"tensorflow.get_default_graph",
"numpy.vstack"
] | [((245, 269), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (260, 269), False, 'import os, sys, inspect, time, json, yaml\n'), ((270, 301), 'sys.path.insert', 'sys.path.insert', (['(0)', 'mlpipe_root'], {}), '(0, mlpipe_root)\n', (285, 301), False, 'import os, sys, inspect, time, json, yaml\n'), ((626, 740), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': "settings['redis']['host']", 'port': "settings['redis']['port']", 'db': "settings['redis']['db']"}), "(host=settings['redis']['host'], port=settings['redis'][\n 'port'], db=settings['redis']['db'])\n", (643, 740), False, 'import redis\n'), ((1305, 1339), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (1320, 1339), False, 'from keras.models import model_from_json\n'), ((1432, 1454), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1452, 1454), True, 'import tensorflow as tf\n'), ((547, 564), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (556, 564), False, 'import os, sys, inspect, time, json, yaml\n'), ((2086, 2099), 'json.loads', 'json.loads', (['q'], {}), '(q)\n', (2096, 2099), False, 'import os, sys, inspect, time, json, yaml\n'), ((2119, 2169), 'servers.helpers.helperfunctions.base64_decoding', 'base64_decoding', (["q['data']", "q['dtype']", "q['shape']"], {}), "(q['data'], q['dtype'], q['shape'])\n", (2134, 2169), False, 'from servers.helpers.helperfunctions import base64_decoding, NumpyEncoder\n'), ((4078, 4129), 'time.sleep', 'time.sleep', (["settings['data_stream']['server_sleep']"], {}), "(settings['data_stream']['server_sleep'])\n", (4088, 4129), False, 'import os, sys, inspect, time, json, yaml\n'), ((2341, 2365), 'numpy.vstack', 'np.vstack', (['[batch, data]'], {}), '([batch, data])\n', (2350, 2365), True, 'import numpy as np\n'), ((3945, 3981), 'json.dumps', 'json.dumps', (['output'], {'cls': 'NumpyEncoder'}), '(output, cls=NumpyEncoder)\n', (3955, 3981), False, 'import os, sys, inspect, time, json, yaml\n')] |
import argparse
import numpy as np
import scipy.sparse as sp
import torch
import random
import networkx as nx
import dgl
from dgl import DGLGraph
from dgl.data import *
def preprocess_data(dataset, train_ratio):
if dataset in ['cora', 'citeseer', 'pubmed']:
edge = np.loadtxt('../low_freq/{}.edge'.format(dataset), dtype=int).tolist()
feat = np.loadtxt('../low_freq/{}.feature'.format(dataset))
labels = np.loadtxt('../low_freq/{}.label'.format(dataset), dtype=int)
train = np.loadtxt('../low_freq/{}.train'.format(dataset), dtype=int)
val = np.loadtxt('../low_freq/{}.val'.format(dataset), dtype=int)
test = np.loadtxt('../low_freq/{}.test'.format(dataset), dtype=int)
nclass = len(set(labels.tolist()))
print(dataset, nclass)
U = [e[0] for e in edge]
V = [e[1] for e in edge]
g = dgl.graph((U, V))
g = dgl.to_simple(g)
g = dgl.remove_self_loop(g)
g = dgl.to_bidirected(g)
feat = normalize_features(feat)
feat = torch.FloatTensor(feat)
labels = torch.LongTensor(labels)
train = torch.LongTensor(train)
val = torch.LongTensor(val)
test = torch.LongTensor(test)
return g, nclass, feat, labels, train, val, test
elif 'syn' in dataset:
edge = np.loadtxt('../syn/{}.edge'.format(dataset), dtype=int).tolist()
labels = np.loadtxt('../syn/{}.lab'.format(dataset), dtype=int)
features = np.loadtxt('../syn/{}.feat'.format(dataset), dtype=float)
n = labels.shape[0]
idx = [i for i in range(n)]
random.shuffle(idx)
idx_train = np.array(idx[:100])
idx_test = np.array(idx[100:])
U = [e[0] for e in edge]
V = [e[1] for e in edge]
g = dgl.graph((U, V))
c1 = 0
c2 = 0
lab = labels.tolist()
for e in edge:
if lab[e[0]] == lab[e[1]]:
c1 += 1
else:
c2 += 1
print(c1/len(edge), c2/len(edge))
#normalization will make features degenerated
#features = normalize_features(features)
features = torch.FloatTensor(features)
nclass = 2
labels = torch.LongTensor(labels)
train = torch.LongTensor(idx_train)
test = torch.LongTensor(idx_test)
print(dataset, nclass)
return g, nclass, features, labels, train, train, test
elif dataset in ['film']:
graph_adjacency_list_file_path = '../high_freq/{}/out1_graph_edges.txt'.format(dataset)
graph_node_features_and_labels_file_path = '../high_freq/{}/out1_node_feature_label.txt'.format(dataset)
G = nx.DiGraph()
graph_node_features_dict = {}
graph_labels_dict = {}
if dataset == 'film':
with open(graph_node_features_and_labels_file_path) as graph_node_features_and_labels_file:
graph_node_features_and_labels_file.readline()
for line in graph_node_features_and_labels_file:
line = line.rstrip().split('\t')
assert (len(line) == 3)
assert (int(line[0]) not in graph_node_features_dict and int(line[0]) not in graph_labels_dict)
feature_blank = np.zeros(932, dtype=np.uint16)
feature_blank[np.array(line[1].split(','), dtype=np.uint16)] = 1
graph_node_features_dict[int(line[0])] = feature_blank
graph_labels_dict[int(line[0])] = int(line[2])
else:
with open(graph_node_features_and_labels_file_path) as graph_node_features_and_labels_file:
graph_node_features_and_labels_file.readline()
for line in graph_node_features_and_labels_file:
line = line.rstrip().split('\t')
assert (len(line) == 3)
assert (int(line[0]) not in graph_node_features_dict and int(line[0]) not in graph_labels_dict)
graph_node_features_dict[int(line[0])] = np.array(line[1].split(','), dtype=np.uint8)
graph_labels_dict[int(line[0])] = int(line[2])
with open(graph_adjacency_list_file_path) as graph_adjacency_list_file:
graph_adjacency_list_file.readline()
for line in graph_adjacency_list_file:
line = line.rstrip().split('\t')
assert (len(line) == 2)
if int(line[0]) not in G:
G.add_node(int(line[0]), features=graph_node_features_dict[int(line[0])],
label=graph_labels_dict[int(line[0])])
if int(line[1]) not in G:
G.add_node(int(line[1]), features=graph_node_features_dict[int(line[1])],
label=graph_labels_dict[int(line[1])])
G.add_edge(int(line[0]), int(line[1]))
adj = nx.adjacency_matrix(G, sorted(G.nodes()))
row, col = np.where(adj.todense() > 0)
U = row.tolist()
V = col.tolist()
g = dgl.graph((U, V))
g = dgl.to_simple(g)
g = dgl.to_bidirected(g)
g = dgl.remove_self_loop(g)
features = np.array([features for _, features in sorted(G.nodes(data='features'), key=lambda x: x[0])], dtype=float)
labels = np.array([label for _, label in sorted(G.nodes(data='label'), key=lambda x: x[0])], dtype=int)
n = labels.shape[0]
idx = [i for i in range(n)]
#random.shuffle(idx)
r0 = int(n * train_ratio)
r1 = int(n * 0.6)
r2 = int(n * 0.8)
idx_train = np.array(idx[:r0])
idx_val = np.array(idx[r1:r2])
idx_test = np.array(idx[r2:])
features = normalize_features(features)
features = torch.FloatTensor(features)
nclass = 5
labels = torch.LongTensor(labels)
train = torch.LongTensor(idx_train)
val = torch.LongTensor(idx_val)
test = torch.LongTensor(idx_test)
print(dataset, nclass)
return g, nclass, features, labels, train, val, test
# datasets in Geom-GCN
elif dataset in ['cornell', 'texas', 'wisconsin', 'chameleon', 'squirrel']:
graph_adjacency_list_file_path = '../high_freq/{}/out1_graph_edges.txt'.format(dataset)
graph_node_features_and_labels_file_path = '../high_freq/{}/out1_node_feature_label.txt'.format(dataset)
G = nx.DiGraph()
graph_node_features_dict = {}
graph_labels_dict = {}
with open(graph_node_features_and_labels_file_path) as graph_node_features_and_labels_file:
graph_node_features_and_labels_file.readline()
for line in graph_node_features_and_labels_file:
line = line.rstrip().split('\t')
assert (len(line) == 3)
assert (int(line[0]) not in graph_node_features_dict and int(line[0]) not in graph_labels_dict)
graph_node_features_dict[int(line[0])] = np.array(line[1].split(','), dtype=np.uint8)
graph_labels_dict[int(line[0])] = int(line[2])
with open(graph_adjacency_list_file_path) as graph_adjacency_list_file:
graph_adjacency_list_file.readline()
for line in graph_adjacency_list_file:
line = line.rstrip().split('\t')
assert (len(line) == 2)
if int(line[0]) not in G:
G.add_node(int(line[0]), features=graph_node_features_dict[int(line[0])],
label=graph_labels_dict[int(line[0])])
if int(line[1]) not in G:
G.add_node(int(line[1]), features=graph_node_features_dict[int(line[1])],
label=graph_labels_dict[int(line[1])])
G.add_edge(int(line[0]), int(line[1]))
adj = nx.adjacency_matrix(G, sorted(G.nodes()))
features = np.array([features for _, features in sorted(G.nodes(data='features'), key=lambda x: x[0])])
labels = np.array([label for _, label in sorted(G.nodes(data='label'), key=lambda x: x[0])])
features = normalize_features(features)
g = DGLGraph(adj)
g = dgl.to_simple(g)
g = dgl.to_bidirected(g)
g = dgl.remove_self_loop(g)
n = len(labels.tolist())
idx = [i for i in range(n)]
#random.shuffle(idx)
r0 = int(n * train_ratio)
r1 = int(n * 0.6)
r2 = int(n * 0.8)
train = np.array(idx[:r0])
val = np.array(idx[r1:r2])
test = np.array(idx[r2:])
nclass = len(set(labels.tolist()))
features = torch.FloatTensor(features)
labels = torch.LongTensor(labels)
train = torch.LongTensor(train)
val = torch.LongTensor(val)
test = torch.LongTensor(test)
print(dataset, nclass)
return g, nclass, features, labels, train, val, test
# datasets in FAGCN
elif dataset in ['new_chameleon', 'new_squirrel']:
edge = np.loadtxt('../high_freq/{}/edges.txt'.format(dataset), dtype=int)
labels = np.loadtxt('../high_freq/{}/labels.txt'.format(dataset), dtype=int).tolist()
features = np.loadtxt('../high_freq/{}/features.txt'.format(dataset), dtype=float)
U = [e[0] for e in edge]
V = [e[1] for e in edge]
g = dgl.graph((U, V))
g = dgl.to_simple(g)
g = dgl.to_bidirected(g)
g = dgl.remove_self_loop(g)
n = len(labels)
idx = [i for i in range(n)]
#random.shuffle(idx)
r0 = int(n * train_ratio)
r1 = int(n * 0.6)
r2 = int(n * 0.8)
train = np.array(idx[:r0])
val = np.array(idx[r1:r2])
test = np.array(idx[r2:])
features = normalize_features(features)
features = torch.FloatTensor(features)
nclass = 3
labels = torch.LongTensor(labels)
train = torch.LongTensor(train)
val = torch.LongTensor(val)
test = torch.LongTensor(test)
print(dataset, nclass)
return g, nclass, features, labels, train, val, test
def normalize_features(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(logits, labels):
_, indices = torch.max(logits, dim=1)
correct = torch.sum(indices == labels)
return correct.item() * 1.0 / len(labels)
| [
"dgl.graph",
"scipy.sparse.diags",
"torch.sum",
"torch.LongTensor",
"random.shuffle",
"numpy.power",
"dgl.to_bidirected",
"torch.FloatTensor",
"numpy.isinf",
"dgl.DGLGraph",
"numpy.zeros",
"torch.max",
"numpy.array",
"networkx.DiGraph",
"dgl.to_simple",
"dgl.remove_self_loop"
] | [((10310, 10325), 'scipy.sparse.diags', 'sp.diags', (['r_inv'], {}), '(r_inv)\n', (10318, 10325), True, 'import scipy.sparse as sp\n'), ((10416, 10440), 'torch.max', 'torch.max', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (10425, 10440), False, 'import torch\n'), ((10455, 10483), 'torch.sum', 'torch.sum', (['(indices == labels)'], {}), '(indices == labels)\n', (10464, 10483), False, 'import torch\n'), ((879, 896), 'dgl.graph', 'dgl.graph', (['(U, V)'], {}), '((U, V))\n', (888, 896), False, 'import dgl\n'), ((909, 925), 'dgl.to_simple', 'dgl.to_simple', (['g'], {}), '(g)\n', (922, 925), False, 'import dgl\n'), ((938, 961), 'dgl.remove_self_loop', 'dgl.remove_self_loop', (['g'], {}), '(g)\n', (958, 961), False, 'import dgl\n'), ((974, 994), 'dgl.to_bidirected', 'dgl.to_bidirected', (['g'], {}), '(g)\n', (991, 994), False, 'import dgl\n'), ((1051, 1074), 'torch.FloatTensor', 'torch.FloatTensor', (['feat'], {}), '(feat)\n', (1068, 1074), False, 'import torch\n'), ((1092, 1116), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (1108, 1116), False, 'import torch\n'), ((1133, 1156), 'torch.LongTensor', 'torch.LongTensor', (['train'], {}), '(train)\n', (1149, 1156), False, 'import torch\n'), ((1171, 1192), 'torch.LongTensor', 'torch.LongTensor', (['val'], {}), '(val)\n', (1187, 1192), False, 'import torch\n'), ((1208, 1230), 'torch.LongTensor', 'torch.LongTensor', (['test'], {}), '(test)\n', (1224, 1230), False, 'import torch\n'), ((10272, 10287), 'numpy.isinf', 'np.isinf', (['r_inv'], {}), '(r_inv)\n', (10280, 10287), True, 'import numpy as np\n'), ((1620, 1639), 'random.shuffle', 'random.shuffle', (['idx'], {}), '(idx)\n', (1634, 1639), False, 'import random\n'), ((1660, 1679), 'numpy.array', 'np.array', (['idx[:100]'], {}), '(idx[:100])\n', (1668, 1679), True, 'import numpy as np\n'), ((1699, 1718), 'numpy.array', 'np.array', (['idx[100:]'], {}), '(idx[100:])\n', (1707, 1718), True, 'import numpy as np\n'), ((1798, 1815), 'dgl.graph', 'dgl.graph', (['(U, V)'], {}), '((U, V))\n', (1807, 1815), False, 'import dgl\n'), ((2170, 2197), 'torch.FloatTensor', 'torch.FloatTensor', (['features'], {}), '(features)\n', (2187, 2197), False, 'import torch\n'), ((2235, 2259), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (2251, 2259), False, 'import torch\n'), ((2276, 2303), 'torch.LongTensor', 'torch.LongTensor', (['idx_train'], {}), '(idx_train)\n', (2292, 2303), False, 'import torch\n'), ((2319, 2345), 'torch.LongTensor', 'torch.LongTensor', (['idx_test'], {}), '(idx_test)\n', (2335, 2345), False, 'import torch\n'), ((10231, 10251), 'numpy.power', 'np.power', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (10239, 10251), True, 'import numpy as np\n'), ((2703, 2715), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2713, 2715), True, 'import networkx as nx\n'), ((5091, 5108), 'dgl.graph', 'dgl.graph', (['(U, V)'], {}), '((U, V))\n', (5100, 5108), False, 'import dgl\n'), ((5121, 5137), 'dgl.to_simple', 'dgl.to_simple', (['g'], {}), '(g)\n', (5134, 5137), False, 'import dgl\n'), ((5150, 5170), 'dgl.to_bidirected', 'dgl.to_bidirected', (['g'], {}), '(g)\n', (5167, 5170), False, 'import dgl\n'), ((5183, 5206), 'dgl.remove_self_loop', 'dgl.remove_self_loop', (['g'], {}), '(g)\n', (5203, 5206), False, 'import dgl\n'), ((5646, 5664), 'numpy.array', 'np.array', (['idx[:r0]'], {}), '(idx[:r0])\n', (5654, 5664), True, 'import numpy as np\n'), ((5683, 5703), 'numpy.array', 'np.array', (['idx[r1:r2]'], {}), '(idx[r1:r2])\n', (5691, 5703), True, 'import numpy as np\n'), ((5723, 5741), 'numpy.array', 'np.array', (['idx[r2:]'], {}), '(idx[r2:])\n', (5731, 5741), True, 'import numpy as np\n'), ((5810, 5837), 'torch.FloatTensor', 'torch.FloatTensor', (['features'], {}), '(features)\n', (5827, 5837), False, 'import torch\n'), ((5875, 5899), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (5891, 5899), False, 'import torch\n'), ((5916, 5943), 'torch.LongTensor', 'torch.LongTensor', (['idx_train'], {}), '(idx_train)\n', (5932, 5943), False, 'import torch\n'), ((5958, 5983), 'torch.LongTensor', 'torch.LongTensor', (['idx_val'], {}), '(idx_val)\n', (5974, 5983), False, 'import torch\n'), ((5999, 6025), 'torch.LongTensor', 'torch.LongTensor', (['idx_test'], {}), '(idx_test)\n', (6015, 6025), False, 'import torch\n'), ((6451, 6463), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (6461, 6463), True, 'import networkx as nx\n'), ((8189, 8202), 'dgl.DGLGraph', 'DGLGraph', (['adj'], {}), '(adj)\n', (8197, 8202), False, 'from dgl import DGLGraph\n'), ((8215, 8231), 'dgl.to_simple', 'dgl.to_simple', (['g'], {}), '(g)\n', (8228, 8231), False, 'import dgl\n'), ((8244, 8264), 'dgl.to_bidirected', 'dgl.to_bidirected', (['g'], {}), '(g)\n', (8261, 8264), False, 'import dgl\n'), ((8277, 8300), 'dgl.remove_self_loop', 'dgl.remove_self_loop', (['g'], {}), '(g)\n', (8297, 8300), False, 'import dgl\n'), ((8502, 8520), 'numpy.array', 'np.array', (['idx[:r0]'], {}), '(idx[:r0])\n', (8510, 8520), True, 'import numpy as np\n'), ((8535, 8555), 'numpy.array', 'np.array', (['idx[r1:r2]'], {}), '(idx[r1:r2])\n', (8543, 8555), True, 'import numpy as np\n'), ((8571, 8589), 'numpy.array', 'np.array', (['idx[r2:]'], {}), '(idx[r2:])\n', (8579, 8589), True, 'import numpy as np\n'), ((8653, 8680), 'torch.FloatTensor', 'torch.FloatTensor', (['features'], {}), '(features)\n', (8670, 8680), False, 'import torch\n'), ((8698, 8722), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (8714, 8722), False, 'import torch\n'), ((8739, 8762), 'torch.LongTensor', 'torch.LongTensor', (['train'], {}), '(train)\n', (8755, 8762), False, 'import torch\n'), ((8777, 8798), 'torch.LongTensor', 'torch.LongTensor', (['val'], {}), '(val)\n', (8793, 8798), False, 'import torch\n'), ((8814, 8836), 'torch.LongTensor', 'torch.LongTensor', (['test'], {}), '(test)\n', (8830, 8836), False, 'import torch\n'), ((9357, 9374), 'dgl.graph', 'dgl.graph', (['(U, V)'], {}), '((U, V))\n', (9366, 9374), False, 'import dgl\n'), ((9387, 9403), 'dgl.to_simple', 'dgl.to_simple', (['g'], {}), '(g)\n', (9400, 9403), False, 'import dgl\n'), ((9416, 9436), 'dgl.to_bidirected', 'dgl.to_bidirected', (['g'], {}), '(g)\n', (9433, 9436), False, 'import dgl\n'), ((9449, 9472), 'dgl.remove_self_loop', 'dgl.remove_self_loop', (['g'], {}), '(g)\n', (9469, 9472), False, 'import dgl\n'), ((9665, 9683), 'numpy.array', 'np.array', (['idx[:r0]'], {}), '(idx[:r0])\n', (9673, 9683), True, 'import numpy as np\n'), ((9698, 9718), 'numpy.array', 'np.array', (['idx[r1:r2]'], {}), '(idx[r1:r2])\n', (9706, 9718), True, 'import numpy as np\n'), ((9734, 9752), 'numpy.array', 'np.array', (['idx[r2:]'], {}), '(idx[r2:])\n', (9742, 9752), True, 'import numpy as np\n'), ((9821, 9848), 'torch.FloatTensor', 'torch.FloatTensor', (['features'], {}), '(features)\n', (9838, 9848), False, 'import torch\n'), ((9886, 9910), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (9902, 9910), False, 'import torch\n'), ((9927, 9950), 'torch.LongTensor', 'torch.LongTensor', (['train'], {}), '(train)\n', (9943, 9950), False, 'import torch\n'), ((9965, 9986), 'torch.LongTensor', 'torch.LongTensor', (['val'], {}), '(val)\n', (9981, 9986), False, 'import torch\n'), ((10002, 10024), 'torch.LongTensor', 'torch.LongTensor', (['test'], {}), '(test)\n', (10018, 10024), False, 'import torch\n'), ((3297, 3327), 'numpy.zeros', 'np.zeros', (['(932)'], {'dtype': 'np.uint16'}), '(932, dtype=np.uint16)\n', (3305, 3327), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
import keras.backend as K
from sklearn.metrics import balanced_accuracy_score
from multilingual_title_classifier.src.path_helpers import get_path, get_resources_path
def predict_with_uncertainty(model, x, y, num_classes, beta=0.4, n_iter=100) -> None:
"""
Predict using dropout ensemble, failed attempt to reduce variance.
Motivated by:
https://github.com/keras-team/keras/issues/9412
https://arxiv.org/pdf/1506.02142.pdf
"""
f = K.function(model.inputs + [K.learning_phase()], model.outputs)
preds = model.predict(x, verbose=1)
avg_preds = np.zeros((x.shape[0], num_classes))
for i in range(n_iter):
avg_preds += np.concatenate([f((s, 1))[0] for s in np.array_split(x, 300)])
final_preds = beta * preds + (1 - beta) * avg_preds / (i + 1)
predicted_class = np.argmax(final_preds, axis=1)
print(balanced_accuracy_score(y, predicted_class))
def get_transfer_categories(dataset: pd.DataFrame) -> pd.DataFrame:
"""
Get categories where we will probably require transfer learning between languages.
"""
category_counts = (dataset
.groupby(['category', 'language'])
.count()
.reset_index()
.pivot(index='category',
columns='language',
values='title').reset_index())
return category_counts[category_counts['portuguese'].isna() | category_counts['spanish'].isna()]
def distribution_analysis(submission_fname: str) -> pd.DataFrame:
"""
Analyze distribution mismatch between training and test set.
:param submission_fname: Filename of submission.
:return: Pandas dataframe with distribution analysis.
"""
training_distribution = pd.read_csv(get_resources_path('train.csv')).groupby('category').count()[['title']]
training_distribution = training_distribution.rename(columns={"title": "train_count"})
training_distribution['pmf_train'] = training_distribution[
'train_count'] / training_distribution.train_count.sum() * 100
submission_distribution = pd.read_csv(get_path(submission_fname, dirs=['submissions'])).groupby('category').count()
submission_distribution = submission_distribution.rename(columns={"id": "val_count"})
submission_distribution['pmf_val'] = submission_distribution[
'val_count'] / submission_distribution.val_count.sum() * 100
dist_comp = submission_distribution.join(training_distribution)
dist_comp['dif'] = dist_comp['pmf_val'] - dist_comp['pmf_train']
return dist_comp.sort_values('dif')
def ensemble_analyzer() -> None:
"""
Ensemble analyzer, useful to avoid multiple submissions where not many predictions change.
:return: Pandas dataframe with observations that have changed.
"""
test_set = pd.read_csv(get_resources_path('test.csv'))
base_ensemble = None
for filepath in sorted(list(os.walk(get_path(dirs=['submissions'])))[0][2]):
if 'ensemble' in filepath:
if base_ensemble is None:
base_ensemble = pd.read_csv(get_path(filepath, dirs=['submissions']))
else:
print('Analyzing ensemble {}'.format(filepath))
current_ensemble = pd.read_csv(get_path(filepath, dirs=['submissions']))
merged_df = pd.merge(base_ensemble, current_ensemble, suffixes=('_base', '_curr'), on='id', how='inner')
dif = merged_df.category_base != merged_df.category_curr
print('Different predictions:', np.sum(dif))
base_ensemble = current_ensemble
return pd.merge(merged_df[merged_df.category_base != merged_df.category_curr], test_set, on='id', how='inner')
| [
"numpy.sum",
"keras.backend.learning_phase",
"numpy.argmax",
"pandas.merge",
"sklearn.metrics.balanced_accuracy_score",
"numpy.zeros",
"multilingual_title_classifier.src.path_helpers.get_resources_path",
"multilingual_title_classifier.src.path_helpers.get_path",
"numpy.array_split"
] | [((629, 664), 'numpy.zeros', 'np.zeros', (['(x.shape[0], num_classes)'], {}), '((x.shape[0], num_classes))\n', (637, 664), True, 'import numpy as np\n'), ((3778, 3885), 'pandas.merge', 'pd.merge', (['merged_df[merged_df.category_base != merged_df.category_curr]', 'test_set'], {'on': '"""id"""', 'how': '"""inner"""'}), "(merged_df[merged_df.category_base != merged_df.category_curr],\n test_set, on='id', how='inner')\n", (3786, 3885), True, 'import pandas as pd\n'), ((874, 904), 'numpy.argmax', 'np.argmax', (['final_preds'], {'axis': '(1)'}), '(final_preds, axis=1)\n', (883, 904), True, 'import numpy as np\n'), ((2994, 3024), 'multilingual_title_classifier.src.path_helpers.get_resources_path', 'get_resources_path', (['"""test.csv"""'], {}), "('test.csv')\n", (3012, 3024), False, 'from multilingual_title_classifier.src.path_helpers import get_path, get_resources_path\n'), ((919, 962), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y', 'predicted_class'], {}), '(y, predicted_class)\n', (942, 962), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((537, 555), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (553, 555), True, 'import keras.backend as K\n'), ((3490, 3587), 'pandas.merge', 'pd.merge', (['base_ensemble', 'current_ensemble'], {'suffixes': "('_base', '_curr')", 'on': '"""id"""', 'how': '"""inner"""'}), "(base_ensemble, current_ensemble, suffixes=('_base', '_curr'), on=\n 'id', how='inner')\n", (3498, 3587), True, 'import pandas as pd\n'), ((753, 775), 'numpy.array_split', 'np.array_split', (['x', '(300)'], {}), '(x, 300)\n', (767, 775), True, 'import numpy as np\n'), ((3249, 3289), 'multilingual_title_classifier.src.path_helpers.get_path', 'get_path', (['filepath'], {'dirs': "['submissions']"}), "(filepath, dirs=['submissions'])\n", (3257, 3289), False, 'from multilingual_title_classifier.src.path_helpers import get_path, get_resources_path\n'), ((3420, 3460), 'multilingual_title_classifier.src.path_helpers.get_path', 'get_path', (['filepath'], {'dirs': "['submissions']"}), "(filepath, dirs=['submissions'])\n", (3428, 3460), False, 'from multilingual_title_classifier.src.path_helpers import get_path, get_resources_path\n'), ((3704, 3715), 'numpy.sum', 'np.sum', (['dif'], {}), '(dif)\n', (3710, 3715), True, 'import numpy as np\n'), ((2235, 2283), 'multilingual_title_classifier.src.path_helpers.get_path', 'get_path', (['submission_fname'], {'dirs': "['submissions']"}), "(submission_fname, dirs=['submissions'])\n", (2243, 2283), False, 'from multilingual_title_classifier.src.path_helpers import get_path, get_resources_path\n'), ((3091, 3121), 'multilingual_title_classifier.src.path_helpers.get_path', 'get_path', ([], {'dirs': "['submissions']"}), "(dirs=['submissions'])\n", (3099, 3121), False, 'from multilingual_title_classifier.src.path_helpers import get_path, get_resources_path\n'), ((1857, 1888), 'multilingual_title_classifier.src.path_helpers.get_resources_path', 'get_resources_path', (['"""train.csv"""'], {}), "('train.csv')\n", (1875, 1888), False, 'from multilingual_title_classifier.src.path_helpers import get_path, get_resources_path\n')] |
import sys
import os
sys.path.append('../')
from models.mobilenet import mbv2
from models.resnet import rf_lw_model
from models.mobilenet import rf_lw_mbv2_model
from utils.helpers import prepare_img
import cv2
import numpy as np
import torch
from PIL import Image
cmap = np.load('../utils/cmap.npy')
cmap = cmap[:21,:]
has_cuda = torch.cuda.is_available()
n_classes = 21
test_list = '/home/xiejinluo/data/PASCAL_VOC/test/VOCdevkit/VOC2012/ImageSets/Segmentation/test.txt'
#test_list = '/home/xiejinluo/data/PASCAL_VOC/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt'
imgs_dir = '/home/xiejinluo/data/PASCAL_VOC/test/VOCdevkit/VOC2012/JPEGImages/'
#imgs_dir = '/home/xiejinluo/data/PASCAL_VOC/VOCdevkit/VOC2012/JPEGImages/'
gt_dir = '/home/xiejinluo/data/PASCAL_VOC/VOCdevkit/VOC2012/SegmentationClass/'
# get one model
#model_url = '/home/xiejinluo/.torch/models/rf_lwmbv2_voc.pth.tar'
#model_url = '/home/xiejinluo/yww/light-weight-refinenet/ckpt/aug_res50_bs6_1n/checkpoint.pth.tar'
#model_url = '/home/xiejinluo/yww/light-weight-refinenet/ckpt_mbv2/aug_mbv2_bs6_1n/checkpoint306.pth.tar'
model_url = '/home/xiejinluo/yww/light-weight-refinenet/ckpt_coco_mbv2/coco_mbv2_bs6_1n/checkpoint330epochwith_coco.pth.tar'
#net = rf_lw_model(n_classes, "res50", model_url)
net = rf_lw_mbv2_model(n_classes, "mbv2", model_url)
if has_cuda:
print("has cuda")
net = torch.nn.DataParallel(net).cuda()
else:
print("has not cuda")
net = net.eval()
cnt = 0
with torch.no_grad():
for img_name in open(test_list,'r').readlines():
img_name = img_name.strip()
img_path = os.path.join(imgs_dir, img_name+'.jpg')
if not os.path.exists(img_path):
print("Can not find "+img_path)
continue
img = np.array(Image.open(img_path))
orig_size = img.shape[:2][::-1]
img_inp = torch.autograd.Variable(torch.tensor(prepare_img(img).transpose(2, 0, 1)[None])).float()
if has_cuda:
img_inp = img_inp.cuda()
segm = net(img_inp)[0, :n_classes].data.cpu().numpy().transpose(1, 2, 0)
segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC)
segm2 = segm.argmax(axis=2).astype(np.uint8)
#segm = cmap[segm2]
#segm=cv2.cvtColor(segm, cv2.COLOR_BGR2RGB)
#print (segm.shape)
#cv2.imwrite('tests/'+img_name+'_grey.png',segm2)
cv2.imwrite('tests/'+img_name+'.png',segm2)
#os.system("cp "+img_path+" tests/")
#gt_path = os.path.join(gt_dir, img_name+'.png')
#os.system("cp "+gt_path+" tests/"+img_name+'_gt.png')
#cv2.imwrite('seg_color.png',segm2)
#tee = np.array(Image.open(gt_path))
#print(tee.shape)
#tee = cmap[tee]
#cv2.imwrite('tests/'+img_name+'gt_color.png',tee)
cnt+=1
if cnt%20==0:
print (cnt)
| [
"sys.path.append",
"numpy.load",
"cv2.imwrite",
"models.mobilenet.rf_lw_mbv2_model",
"os.path.exists",
"PIL.Image.open",
"utils.helpers.prepare_img",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.no_grad",
"os.path.join",
"cv2.resize"
] | [((21, 43), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (36, 43), False, 'import sys\n'), ((276, 304), 'numpy.load', 'np.load', (['"""../utils/cmap.npy"""'], {}), "('../utils/cmap.npy')\n", (283, 304), True, 'import numpy as np\n'), ((335, 360), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (358, 360), False, 'import torch\n'), ((1279, 1325), 'models.mobilenet.rf_lw_mbv2_model', 'rf_lw_mbv2_model', (['n_classes', '"""mbv2"""', 'model_url'], {}), "(n_classes, 'mbv2', model_url)\n", (1295, 1325), False, 'from models.mobilenet import rf_lw_mbv2_model\n'), ((1467, 1482), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1480, 1482), False, 'import torch\n'), ((1592, 1633), 'os.path.join', 'os.path.join', (['imgs_dir', "(img_name + '.jpg')"], {}), "(imgs_dir, img_name + '.jpg')\n", (1604, 1633), False, 'import os\n'), ((2095, 2153), 'cv2.resize', 'cv2.resize', (['segm', 'orig_size'], {'interpolation': 'cv2.INTER_CUBIC'}), '(segm, orig_size, interpolation=cv2.INTER_CUBIC)\n', (2105, 2153), False, 'import cv2\n'), ((2381, 2429), 'cv2.imwrite', 'cv2.imwrite', (["('tests/' + img_name + '.png')", 'segm2'], {}), "('tests/' + img_name + '.png', segm2)\n", (2392, 2429), False, 'import cv2\n'), ((1371, 1397), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (1392, 1397), False, 'import torch\n'), ((1647, 1671), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (1661, 1671), False, 'import os\n'), ((1762, 1782), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1772, 1782), False, 'from PIL import Image\n'), ((1889, 1905), 'utils.helpers.prepare_img', 'prepare_img', (['img'], {}), '(img)\n', (1900, 1905), False, 'from utils.helpers import prepare_img\n')] |
import tensorflow as tf
import numpy as np
import tf_utils
# hyper parameters
ALPHA=1.
STEP_SIZE=1e-4
LAYER1_SIZE = 400
LAYER2_SIZE = 300
class SVPG:
def __init__(self,sess,actor_nets,actor_pg_list,state_dim,action_dim,independent_flag=0):
self.alpha=ALPHA;
self.step_size=STEP_SIZE;
self.n_particles=len(actor_nets);
self.params_num=len(actor_nets[0]);
self.state_dim=state_dim;
self.action_dim=action_dim;
self.independent_flag=independent_flag;
self.sess=sess;
# make svgd
self.svgd_set(actor_nets,actor_pg_list);
def run(self):
self.sess.run(self.optimizer);
def svgd_set(self,p_list,l_list):
layer1_size=LAYER1_SIZE;
layer2_size=LAYER2_SIZE;
p_flat_list=self.make_flat(p_list);
l_flat_list=self.make_flat(l_list);
# gradients
if(self.n_particles==1):
grad=(1/self.alpha)*l_flat_list[0];
else:
kernel_mat,grad_kernel=self.kernel(p_flat_list);
# independently learning or not
if(self.independent_flag!=1):
# delta prior is assumed as 1.0
grad=(tf.matmul(kernel_mat,((1/self.alpha)*l_flat_list))-grad_kernel)/(self.n_particles);
else:
# when independently learning, each particle is just learned as topology of original DDPG
grad=l_flat_list;
# reshape gradient
if(self.n_particles>1):
grad=tf.unstack(grad,axis=0);
else:
grad=[grad];
grad_list=np.zeros((self.n_particles,self.params_num),dtype=object);
for i in range(self.n_particles):
# W1
st_idx=0;length=self.state_dim*layer1_size;
grad_list[i,0]=tf.reshape(tf.slice(grad[i],[st_idx],[length]),[self.state_dim,layer1_size]);
# b1
st_idx+=length;length=layer1_size;
grad_list[i,1]=tf.slice(grad[i],[st_idx],[length]);
# W2
st_idx+=length;length=layer1_size*layer2_size;
grad_list[i,2]=tf.reshape(tf.slice(grad[i],[st_idx],[length]),[layer1_size,layer2_size]);
# b2
st_idx+=length;length=layer2_size;
grad_list[i,3]=tf.slice(grad[i],[st_idx],[length]);
# W3
st_idx+=length;length=layer2_size*self.action_dim;
grad_list[i,4]=tf.reshape(tf.slice(grad[i],[st_idx],[length]),[layer2_size,self.action_dim]);
# b3
st_idx+=length;length=self.action_dim;
grad_list[i,5]=tf.slice(grad[i],[st_idx],[length]);
# optimizer
grad_list=list(np.reshape(grad_list,[-1]));
p_list=list(np.reshape(p_list,[-1]));
self.optimizer=tf.train.AdamOptimizer(self.step_size).apply_gradients(zip(grad_list,p_list));
def make_flat(self,p_list):
p_list2=np.zeros((len(p_list),len(p_list[0])),dtype=object);
for i in range(len(p_list)):
for j in range(len(p_list[0])):
p_list2[i,j]=tf.reshape(p_list[i][j],[-1]);
p_flat_list=[];
for i in range(len(p_list2)):
p_flat_list.append(tf.concat(list(p_list2[i]),axis=0));
return tf.stack(p_flat_list,axis=0);
def kernel(self, particle_tensor):
# kernel
h = -1
euclidean_dists = tf_utils.pdist(particle_tensor)
pairwise_dists = tf_utils.squareform(euclidean_dists) ** 2
# kernel trick
h = tf.sqrt(0.5 * tf_utils.median(pairwise_dists) / tf.log(self.n_particles + 1.))
kernel_matrix = tf.exp(-pairwise_dists / h ** 2 / 2)
kernel_sum = tf.reduce_sum(kernel_matrix, axis=1)
grad_kernel = tf.add(-tf.matmul(kernel_matrix, particle_tensor),tf.multiply(particle_tensor, tf.expand_dims(kernel_sum, axis=1))) / (h ** 2)
return kernel_matrix, grad_kernel
| [
"tf_utils.pdist",
"tensorflow.reduce_sum",
"tf_utils.median",
"tensorflow.reshape",
"numpy.zeros",
"tensorflow.stack",
"tensorflow.matmul",
"tensorflow.exp",
"numpy.reshape",
"tensorflow.log",
"tensorflow.slice",
"tensorflow.train.AdamOptimizer",
"tensorflow.unstack",
"tensorflow.expand_di... | [((1420, 1479), 'numpy.zeros', 'np.zeros', (['(self.n_particles, self.params_num)'], {'dtype': 'object'}), '((self.n_particles, self.params_num), dtype=object)\n', (1428, 1479), True, 'import numpy as np\n'), ((2894, 2923), 'tensorflow.stack', 'tf.stack', (['p_flat_list'], {'axis': '(0)'}), '(p_flat_list, axis=0)\n', (2902, 2923), True, 'import tensorflow as tf\n'), ((3008, 3039), 'tf_utils.pdist', 'tf_utils.pdist', (['particle_tensor'], {}), '(particle_tensor)\n', (3022, 3039), False, 'import tf_utils\n'), ((3229, 3265), 'tensorflow.exp', 'tf.exp', (['(-pairwise_dists / h ** 2 / 2)'], {}), '(-pairwise_dists / h ** 2 / 2)\n', (3235, 3265), True, 'import tensorflow as tf\n'), ((3283, 3319), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kernel_matrix'], {'axis': '(1)'}), '(kernel_matrix, axis=1)\n', (3296, 3319), True, 'import tensorflow as tf\n'), ((1352, 1376), 'tensorflow.unstack', 'tf.unstack', (['grad'], {'axis': '(0)'}), '(grad, axis=0)\n', (1362, 1376), True, 'import tensorflow as tf\n'), ((1750, 1787), 'tensorflow.slice', 'tf.slice', (['grad[i]', '[st_idx]', '[length]'], {}), '(grad[i], [st_idx], [length])\n', (1758, 1787), True, 'import tensorflow as tf\n'), ((2020, 2057), 'tensorflow.slice', 'tf.slice', (['grad[i]', '[st_idx]', '[length]'], {}), '(grad[i], [st_idx], [length])\n', (2028, 2057), True, 'import tensorflow as tf\n'), ((2302, 2339), 'tensorflow.slice', 'tf.slice', (['grad[i]', '[st_idx]', '[length]'], {}), '(grad[i], [st_idx], [length])\n', (2310, 2339), True, 'import tensorflow as tf\n'), ((2375, 2402), 'numpy.reshape', 'np.reshape', (['grad_list', '[-1]'], {}), '(grad_list, [-1])\n', (2385, 2402), True, 'import numpy as np\n'), ((2420, 2444), 'numpy.reshape', 'np.reshape', (['p_list', '[-1]'], {}), '(p_list, [-1])\n', (2430, 2444), True, 'import numpy as np\n'), ((3061, 3097), 'tf_utils.squareform', 'tf_utils.squareform', (['euclidean_dists'], {}), '(euclidean_dists)\n', (3080, 3097), False, 'import tf_utils\n'), ((1610, 1647), 'tensorflow.slice', 'tf.slice', (['grad[i]', '[st_idx]', '[length]'], {}), '(grad[i], [st_idx], [length])\n', (1618, 1647), True, 'import tensorflow as tf\n'), ((1883, 1920), 'tensorflow.slice', 'tf.slice', (['grad[i]', '[st_idx]', '[length]'], {}), '(grad[i], [st_idx], [length])\n', (1891, 1920), True, 'import tensorflow as tf\n'), ((2157, 2194), 'tensorflow.slice', 'tf.slice', (['grad[i]', '[st_idx]', '[length]'], {}), '(grad[i], [st_idx], [length])\n', (2165, 2194), True, 'import tensorflow as tf\n'), ((2465, 2503), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.step_size'], {}), '(self.step_size)\n', (2487, 2503), True, 'import tensorflow as tf\n'), ((2736, 2766), 'tensorflow.reshape', 'tf.reshape', (['p_list[i][j]', '[-1]'], {}), '(p_list[i][j], [-1])\n', (2746, 2766), True, 'import tensorflow as tf\n'), ((3178, 3208), 'tensorflow.log', 'tf.log', (['(self.n_particles + 1.0)'], {}), '(self.n_particles + 1.0)\n', (3184, 3208), True, 'import tensorflow as tf\n'), ((3144, 3175), 'tf_utils.median', 'tf_utils.median', (['pairwise_dists'], {}), '(pairwise_dists)\n', (3159, 3175), False, 'import tf_utils\n'), ((3346, 3387), 'tensorflow.matmul', 'tf.matmul', (['kernel_matrix', 'particle_tensor'], {}), '(kernel_matrix, particle_tensor)\n', (3355, 3387), True, 'import tensorflow as tf\n'), ((3417, 3451), 'tensorflow.expand_dims', 'tf.expand_dims', (['kernel_sum'], {'axis': '(1)'}), '(kernel_sum, axis=1)\n', (3431, 3451), True, 'import tensorflow as tf\n'), ((1069, 1120), 'tensorflow.matmul', 'tf.matmul', (['kernel_mat', '(1 / self.alpha * l_flat_list)'], {}), '(kernel_mat, 1 / self.alpha * l_flat_list)\n', (1078, 1120), True, 'import tensorflow as tf\n')] |
"""Tests for NomialArray class"""
import unittest
import warnings as pywarnings
import numpy as np
from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial
from gpkit.constraints.set import ConstraintSet
from gpkit.exceptions import DimensionalityError
import gpkit
class TestNomialArray(unittest.TestCase):
"""TestCase for the NomialArray class.
Also tests VectorVariable, since VectorVariable returns a NomialArray
"""
def test_shape(self):
x = VectorVariable((2, 3), 'x')
self.assertEqual(x.shape, (2, 3))
self.assertIsInstance(x.str_without(), str)
self.assertIsInstance(x.latex(), str)
def test_ndim(self):
x = VectorVariable((3, 4), 'x')
self.assertEqual(x.ndim, 2)
def test_array_mult(self):
x = VectorVariable(3, 'x', label='dummy variable')
x_0 = Variable('x', idx=(0,), shape=(3,), label='dummy variable')
x_1 = Variable('x', idx=(1,), shape=(3,), label='dummy variable')
x_2 = Variable('x', idx=(2,), shape=(3,), label='dummy variable')
p = x_0**2 + x_1**2 + x_2**2
self.assertEqual(x.dot(x), p)
m = NomialArray([[x_0**2, x_0*x_1, x_0*x_2],
[x_0*x_1, x_1**2, x_1*x_2],
[x_0*x_2, x_1*x_2, x_2**2]])
self.assertEqual(x.outer(x), m)
def test_elementwise_mult(self):
m = Variable('m')
x = VectorVariable(3, 'x', label='dummy variable')
x_0 = Variable('x', idx=(0,), shape=(3,), label='dummy variable')
x_1 = Variable('x', idx=(1,), shape=(3,), label='dummy variable')
x_2 = Variable('x', idx=(2,), shape=(3,), label='dummy variable')
# multiplication with numbers
v = NomialArray([2, 2, 3]).T
p = NomialArray([2*x_0, 2*x_1, 3*x_2]).T
self.assertEqual(x*v, p)
# division with numbers
p2 = NomialArray([x_0/2, x_1/2, x_2/3]).T
self.assertEqual(x/v, p2)
# power
p3 = NomialArray([x_0**2, x_1**2, x_2**2]).T
self.assertEqual(x**2, p3)
# multiplication with monomials
p = NomialArray([m*x_0, m*x_1, m*x_2]).T
self.assertEqual(x*m, p)
# division with monomials
p2 = NomialArray([x_0/m, x_1/m, x_2/m]).T
self.assertEqual(x/m, p2)
self.assertIsInstance(v.str_without(), str)
self.assertIsInstance(v.latex(), str)
self.assertIsInstance(p.str_without(), str)
self.assertIsInstance(p.latex(), str)
def test_constraint_gen(self):
x = VectorVariable(3, 'x', label='dummy variable')
x_0 = Variable('x', idx=(0,), shape=(3,), label='dummy variable')
x_1 = Variable('x', idx=(1,), shape=(3,), label='dummy variable')
x_2 = Variable('x', idx=(2,), shape=(3,), label='dummy variable')
v = NomialArray([1, 2, 3]).T
p = [x_0, x_1/2, x_2/3]
constraint = ConstraintSet([x <= v])
self.assertEqual(list(constraint.as_hmapslt1({})), [e.hmap for e in p])
def test_substition(self): # pylint: disable=no-member
x = VectorVariable(3, 'x', label='dummy variable')
c = {x: [1, 2, 3]}
self.assertEqual(x.sub(c), [Monomial({}, e) for e in [1, 2, 3]])
p = x**2
self.assertEqual(p.sub(c), [Monomial({}, e) for e in [1, 4, 9]]) # pylint: disable=no-member
d = p.sum()
self.assertEqual(d.sub(c), Monomial({}, 14)) # pylint: disable=no-member
def test_units(self):
# inspired by gpkit issue #106
c = VectorVariable(5, "c", "m", "Local Chord")
constraints = (c == 1*gpkit.units.m)
self.assertEqual(len(constraints), 5)
# test an array with inconsistent units
with pywarnings.catch_warnings(): # skip the UnitStrippedWarning
pywarnings.simplefilter("ignore")
mismatch = NomialArray([1*gpkit.units.m, 1*gpkit.ureg.ft, 1.0])
self.assertRaises(DimensionalityError, mismatch.sum)
self.assertEqual(mismatch[:2].sum().c, 1.3048*gpkit.ureg.m) # pylint:disable=no-member
self.assertEqual(mismatch.prod().c, 1*gpkit.ureg.m*gpkit.ureg.ft) # pylint:disable=no-member
def test_sum(self):
x = VectorVariable(5, 'x')
p = x.sum()
self.assertTrue(isinstance(p, Posynomial))
self.assertEqual(p, sum(x))
x = VectorVariable((2, 3), 'x')
rowsum = x.sum(axis=1)
colsum = x.sum(axis=0)
self.assertTrue(isinstance(rowsum, NomialArray))
self.assertTrue(isinstance(colsum, NomialArray))
self.assertEqual(rowsum[0], sum(x[0]))
self.assertEqual(colsum[0], sum(x[:, 0]))
self.assertEqual(len(rowsum), 2)
self.assertEqual(len(colsum), 3)
def test_getitem(self):
x = VectorVariable((2, 4), 'x')
self.assertTrue(isinstance(x[0][0], Monomial))
self.assertTrue(isinstance(x[0, 0], Monomial))
def test_prod(self):
x = VectorVariable(3, 'x')
m = x.prod()
self.assertTrue(isinstance(m, Monomial))
self.assertEqual(m, x[0]*x[1]*x[2])
self.assertEqual(m, np.prod(x))
pows = NomialArray([x[0], x[0]**2, x[0]**3])
self.assertEqual(pows.prod(), x[0]**6)
def test_outer(self):
x = VectorVariable(3, 'x')
y = VectorVariable(3, 'y')
self.assertEqual(np.outer(x, y), x.outer(y))
self.assertEqual(np.outer(y, x), y.outer(x))
self.assertTrue(isinstance(x.outer(y), NomialArray))
def test_empty(self):
x = VectorVariable(3, 'x')
# have to create this using slicing, to get object dtype
empty_posy_array = x[:0]
self.assertRaises(ValueError, empty_posy_array.sum)
self.assertRaises(ValueError, empty_posy_array.prod)
self.assertEqual(len(empty_posy_array), 0)
self.assertEqual(empty_posy_array.ndim, 1)
TESTS = [TestNomialArray]
if __name__ == "__main__": # pragma: no cover
# pylint: disable=wrong-import-position
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
| [
"gpkit.NomialArray",
"numpy.outer",
"gpkit.VectorVariable",
"warnings.simplefilter",
"gpkit.constraints.set.ConstraintSet",
"gpkit.tests.helpers.run_tests",
"gpkit.Monomial",
"warnings.catch_warnings",
"gpkit.Variable",
"numpy.prod"
] | [((6052, 6068), 'gpkit.tests.helpers.run_tests', 'run_tests', (['TESTS'], {}), '(TESTS)\n', (6061, 6068), False, 'from gpkit.tests.helpers import run_tests\n'), ((495, 522), 'gpkit.VectorVariable', 'VectorVariable', (['(2, 3)', '"""x"""'], {}), "((2, 3), 'x')\n", (509, 522), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((701, 728), 'gpkit.VectorVariable', 'VectorVariable', (['(3, 4)', '"""x"""'], {}), "((3, 4), 'x')\n", (715, 728), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((809, 855), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {'label': '"""dummy variable"""'}), "(3, 'x', label='dummy variable')\n", (823, 855), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((870, 929), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(0,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(0,), shape=(3,), label='dummy variable')\n", (878, 929), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((944, 1003), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(1,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(1,), shape=(3,), label='dummy variable')\n", (952, 1003), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1018, 1077), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(2,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(2,), shape=(3,), label='dummy variable')\n", (1026, 1077), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1165, 1284), 'gpkit.NomialArray', 'NomialArray', (['[[x_0 ** 2, x_0 * x_1, x_0 * x_2], [x_0 * x_1, x_1 ** 2, x_1 * x_2], [x_0 *\n x_2, x_1 * x_2, x_2 ** 2]]'], {}), '([[x_0 ** 2, x_0 * x_1, x_0 * x_2], [x_0 * x_1, x_1 ** 2, x_1 *\n x_2], [x_0 * x_2, x_1 * x_2, x_2 ** 2]])\n', (1176, 1284), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1403, 1416), 'gpkit.Variable', 'Variable', (['"""m"""'], {}), "('m')\n", (1411, 1416), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1429, 1475), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {'label': '"""dummy variable"""'}), "(3, 'x', label='dummy variable')\n", (1443, 1475), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1490, 1549), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(0,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(0,), shape=(3,), label='dummy variable')\n", (1498, 1549), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1564, 1623), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(1,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(1,), shape=(3,), label='dummy variable')\n", (1572, 1623), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1638, 1697), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(2,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(2,), shape=(3,), label='dummy variable')\n", (1646, 1697), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2559, 2605), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {'label': '"""dummy variable"""'}), "(3, 'x', label='dummy variable')\n", (2573, 2605), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2620, 2679), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(0,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(0,), shape=(3,), label='dummy variable')\n", (2628, 2679), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2694, 2753), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(1,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(1,), shape=(3,), label='dummy variable')\n", (2702, 2753), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2768, 2827), 'gpkit.Variable', 'Variable', (['"""x"""'], {'idx': '(2,)', 'shape': '(3,)', 'label': '"""dummy variable"""'}), "('x', idx=(2,), shape=(3,), label='dummy variable')\n", (2776, 2827), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2918, 2941), 'gpkit.constraints.set.ConstraintSet', 'ConstraintSet', (['[x <= v]'], {}), '([x <= v])\n', (2931, 2941), False, 'from gpkit.constraints.set import ConstraintSet\n'), ((3095, 3141), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {'label': '"""dummy variable"""'}), "(3, 'x', label='dummy variable')\n", (3109, 3141), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((3541, 3583), 'gpkit.VectorVariable', 'VectorVariable', (['(5)', '"""c"""', '"""m"""', '"""Local Chord"""'], {}), "(5, 'c', 'm', 'Local Chord')\n", (3555, 3583), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((4215, 4237), 'gpkit.VectorVariable', 'VectorVariable', (['(5)', '"""x"""'], {}), "(5, 'x')\n", (4229, 4237), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((4358, 4385), 'gpkit.VectorVariable', 'VectorVariable', (['(2, 3)', '"""x"""'], {}), "((2, 3), 'x')\n", (4372, 4385), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((4782, 4809), 'gpkit.VectorVariable', 'VectorVariable', (['(2, 4)', '"""x"""'], {}), "((2, 4), 'x')\n", (4796, 4809), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((4958, 4980), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {}), "(3, 'x')\n", (4972, 4980), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((5150, 5191), 'gpkit.NomialArray', 'NomialArray', (['[x[0], x[0] ** 2, x[0] ** 3]'], {}), '([x[0], x[0] ** 2, x[0] ** 3])\n', (5161, 5191), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((5274, 5296), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {}), "(3, 'x')\n", (5288, 5296), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((5309, 5331), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""y"""'], {}), "(3, 'y')\n", (5323, 5331), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((5538, 5560), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {}), "(3, 'x')\n", (5552, 5560), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1748, 1770), 'gpkit.NomialArray', 'NomialArray', (['[2, 2, 3]'], {}), '([2, 2, 3])\n', (1759, 1770), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1785, 1825), 'gpkit.NomialArray', 'NomialArray', (['[2 * x_0, 2 * x_1, 3 * x_2]'], {}), '([2 * x_0, 2 * x_1, 3 * x_2])\n', (1796, 1825), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((1900, 1940), 'gpkit.NomialArray', 'NomialArray', (['[x_0 / 2, x_1 / 2, x_2 / 3]'], {}), '([x_0 / 2, x_1 / 2, x_2 / 3])\n', (1911, 1940), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2000, 2043), 'gpkit.NomialArray', 'NomialArray', (['[x_0 ** 2, x_1 ** 2, x_2 ** 2]'], {}), '([x_0 ** 2, x_1 ** 2, x_2 ** 2])\n', (2011, 2043), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2127, 2167), 'gpkit.NomialArray', 'NomialArray', (['[m * x_0, m * x_1, m * x_2]'], {}), '([m * x_0, m * x_1, m * x_2])\n', (2138, 2167), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2244, 2284), 'gpkit.NomialArray', 'NomialArray', (['[x_0 / m, x_1 / m, x_2 / m]'], {}), '([x_0 / m, x_1 / m, x_2 / m])\n', (2255, 2284), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((2840, 2862), 'gpkit.NomialArray', 'NomialArray', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2851, 2862), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((3416, 3432), 'gpkit.Monomial', 'Monomial', (['{}', '(14)'], {}), '({}, 14)\n', (3424, 3432), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((3736, 3763), 'warnings.catch_warnings', 'pywarnings.catch_warnings', ([], {}), '()\n', (3761, 3763), True, 'import warnings as pywarnings\n'), ((3809, 3842), 'warnings.simplefilter', 'pywarnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3832, 3842), True, 'import warnings as pywarnings\n'), ((3866, 3922), 'gpkit.NomialArray', 'NomialArray', (['[1 * gpkit.units.m, 1 * gpkit.ureg.ft, 1.0]'], {}), '([1 * gpkit.units.m, 1 * gpkit.ureg.ft, 1.0])\n', (3877, 3922), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((5123, 5133), 'numpy.prod', 'np.prod', (['x'], {}), '(x)\n', (5130, 5133), True, 'import numpy as np\n'), ((5357, 5371), 'numpy.outer', 'np.outer', (['x', 'y'], {}), '(x, y)\n', (5365, 5371), True, 'import numpy as np\n'), ((5410, 5424), 'numpy.outer', 'np.outer', (['y', 'x'], {}), '(y, x)\n', (5418, 5424), True, 'import numpy as np\n'), ((3205, 3220), 'gpkit.Monomial', 'Monomial', (['{}', 'e'], {}), '({}, e)\n', (3213, 3220), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n'), ((3295, 3310), 'gpkit.Monomial', 'Monomial', (['{}', 'e'], {}), '({}, e)\n', (3303, 3310), False, 'from gpkit import Variable, Posynomial, NomialArray, VectorVariable, Monomial\n')] |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2021 New York University <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools dialect identification component.
This Dialect Identification system can identify between 25 Arabic city dialects
as well as Modern Standard Arabic. It is based on the system described by
`Salameh, Bouamor and Habash <http://www.aclweb.org/anthology/C18-1113>`_.
"""
import collections
from pathlib import Path
import sys
if sys.platform == 'win32':
raise ModuleNotFoundError(
'camel_tools.dialectid is not available on Windows.')
else:
import kenlm
import numpy as np
import pandas as pd
import scipy as sp
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score, f1_score, recall_score
from sklearn.metrics import precision_score
import dill
from camel_tools.data import DataCatalogue
from camel_tools.tokenizers.word import simple_word_tokenize
from camel_tools.utils.dediac import dediac_ar
_DEFAULT_LABELS = frozenset(['ALE', 'ALG', 'ALX', 'AMM', 'ASW', 'BAG', 'BAS',
'BEI', 'BEN', 'CAI', 'DAM', 'DOH', 'FES', 'JED',
'JER', 'KHA', 'MOS', 'MSA', 'MUS', 'RAB', 'RIY',
'SAL', 'SAN', 'SFX', 'TRI', 'TUN'])
_DEFAULT_LABELS_EXTRA = frozenset(['BEI', 'CAI', 'DOH', 'MSA', 'RAB', 'TUN'])
_DEFAULT_COUNTRIES = frozenset(['Algeria', 'Egypt', 'Iraq', 'Jordan',
'Lebanon', 'Libya', 'Modern Standard Arabic',
'Morocco', 'Oman', 'Palestine', 'Qatar',
'Saudi Arabia', 'Sudan', 'Syria', 'Tunisia',
'Yemen'])
_DEFAULT_REGIONS = frozenset(['Gulf', 'Gulf of Aden', 'Levant', 'Maghreb',
'Modern Standard Arabic', 'Nile Basin'])
_LABEL_TO_CITY_MAP = {
'ALE': 'Aleppo',
'ALG': 'Algiers',
'ALX': 'Alexandria',
'AMM': 'Amman',
'ASW': 'Aswan',
'BAG': 'Baghdad',
'BAS': 'Basra',
'BEI': 'Beirut',
'BEN': 'Benghazi',
'CAI': 'Cairo',
'DAM': 'Damascus',
'DOH': 'Doha',
'FES': 'Fes',
'JED': 'Jeddha',
'JER': 'Jerusalem',
'KHA': 'Khartoum',
'MOS': 'Mosul',
'MSA': 'Modern Standard Arabic',
'MUS': 'Muscat',
'RAB': 'Rabat',
'RIY': 'Riyadh',
'SAL': 'Salt',
'SAN': 'Sana\'a',
'SFX': 'Sfax',
'TRI': 'Tripoli',
'TUN': 'Tunis'
}
_LABEL_TO_COUNTRY_MAP = {
'ALE': 'Syria',
'ALG': 'Algeria',
'ALX': 'Egypt',
'AMM': 'Jordan',
'ASW': 'Egypt',
'BAG': 'Iraq',
'BAS': 'Iraq',
'BEI': 'Lebanon',
'BEN': 'Libya',
'CAI': 'Egypt',
'DAM': 'Syria',
'DOH': 'Qatar',
'FES': 'Morocco',
'JED': 'Saudi Arabia',
'JER': 'Palestine',
'KHA': 'Sudan',
'MOS': 'Iraq',
'MSA': 'Modern Standard Arabic',
'MUS': 'Oman',
'RAB': 'Morocco',
'RIY': 'Saudi Arabia',
'SAL': 'Jordan',
'SAN': 'Yemen',
'SFX': 'Tunisia',
'TRI': 'Libya',
'TUN': 'Tunisia'
}
_LABEL_TO_REGION_MAP = {
'ALE': 'Levant',
'ALG': 'Maghreb',
'ALX': 'Nile Basin',
'AMM': 'Levant',
'ASW': 'Nile Basin',
'BAG': 'Gulf',
'BAS': 'Gulf',
'BEI': 'Levant',
'BEN': 'Maghreb',
'CAI': 'Nile Basin',
'DAM': 'Levant',
'DOH': 'Gulf',
'FES': 'Maghreb',
'JED': 'Gulf',
'JER': 'Levant',
'KHA': 'Nile Basin',
'MOS': 'Gulf',
'MSA': 'Modern Standard Arabic',
'MUS': 'Gulf',
'RAB': 'Maghreb',
'RIY': 'Gulf',
'SAL': 'Levant',
'SAN': 'Gulf of Aden',
'SFX': 'Maghreb',
'TRI': 'Maghreb',
'TUN': 'Maghreb'
}
_DATA_DIR = DataCatalogue.get_dataset_info('DialectID').path
_CHAR_LM_DIR = Path(_DATA_DIR, 'lm', 'char')
_WORD_LM_DIR = Path(_DATA_DIR, 'lm', 'word')
_TRAIN_DATA_PATH = Path(_DATA_DIR, 'corpus_26_train.tsv')
_TRAIN_DATA_EXTRA_PATH = Path(_DATA_DIR, 'corpus_6_train.tsv')
_DEV_DATA_PATH = Path(_DATA_DIR, 'corpus_26_dev.tsv')
_TEST_DATA_PATH = Path(_DATA_DIR, 'corpus_26_test.tsv')
class DIDPred(collections.namedtuple('DIDPred', ['top', 'scores'])):
"""A named tuple containing dialect ID prediction results.
Attributes:
top (:obj:`str`): The dialect label with the highest score. See
:ref:`dialectid_labels` for a list of output labels.
scores (:obj:`dict`): A dictionary mapping each dialect label to it's
computed score.
"""
class DialectIdError(Exception):
"""Base class for all CAMeL Dialect ID errors.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg)
class UntrainedModelError(DialectIdError):
"""Error thrown when attempting to use an untrained DialectIdentifier
instance.
"""
def __init__(self, msg):
DialectIdError.__init__(self, msg)
class InvalidDataSetError(DialectIdError, ValueError):
"""Error thrown when an invalid data set name is given to eval.
"""
def __init__(self, dataset):
msg = ('Invalid data set name {}. Valid names are "TEST" and '
'"VALIDATION"'.format(repr(dataset)))
DialectIdError.__init__(self, msg)
class PretrainedModelError(DialectIdError):
"""Error thrown when attempting to load a pretrained model provided with
camel-tools.
"""
def __init__(self, msg):
DialectIdError.__init__(self, msg)
def _normalize_lm_scores(scores):
norm_scores = np.exp(scores)
norm_scores = normalize(norm_scores)
return norm_scores
def _word_to_char(txt):
return ' '.join(list(txt.replace(' ', 'X')))
def _max_score(score_tups):
max_score = -1
max_dialect = None
for dialect, score in score_tups:
if score > max_score:
max_score = score
max_dialect = dialect
return max_dialect
def label_to_city(prediction):
"""Converts a dialect prediction using labels to use city names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { _LABEL_TO_CITY_MAP[l]: s for l, s in prediction.scores.items() }
top = _LABEL_TO_CITY_MAP[prediction.top]
return DIDPred(top, scores)
def label_to_country(prediction):
"""Converts a dialect prediction using labels to use country names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { i: 0.0 for i in _DEFAULT_COUNTRIES }
for label, prob in prediction.scores.items():
scores[_LABEL_TO_COUNTRY_MAP[label]] += prob
top = max(scores.items(), key=lambda x: x[1])
return DIDPred(top[0], scores)
def label_to_region(prediction):
"""Converts a dialect prediction using labels to use region names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { i: 0.0 for i in _DEFAULT_REGIONS }
for label, prob in prediction.scores.items():
scores[_LABEL_TO_REGION_MAP[label]] += prob
top = max(scores.items(), key=lambda x: x[1])
return DIDPred(top[0], scores)
class DialectIdentifier(object):
"""A class for training, evaluating and running the dialect identification
model described by Salameh et al. After initializing an instance, you must
run the train method once before using it.
Args:
labels (:obj:`set` of :obj:`str`, optional): The set of dialect labels
used in the training data in the main model.
If None, the default labels are used.
Defaults to None.
labels_extra (:obj:`set` of :obj:`str`, optional): The set of dialect
labels used in the training data in the extra features model.
If None, the default labels are used.
Defaults to None.
char_lm_dir (:obj:`str`, optional): Path to the directory containing
the character-based language models. If None, use the language
models that come with this package. Defaults to None.
word_lm_dir (:obj:`str`, optional): Path to the directory containing
the word-based language models. If None, use the language models
that come with this package. Defaults to None.
"""
def __init__(self, labels=None,
labels_extra=None,
char_lm_dir=None,
word_lm_dir=None):
if labels is None:
labels = _DEFAULT_LABELS
if labels_extra is None:
labels_extra = _DEFAULT_LABELS_EXTRA
if char_lm_dir is None:
char_lm_dir = _CHAR_LM_DIR
if word_lm_dir is None:
word_lm_dir = _WORD_LM_DIR
self._labels = labels
self._labels_extra = labels_extra
self._labels_sorted = sorted(labels)
self._labels_extra_sorted = sorted(labels_extra)
self._char_lms = collections.defaultdict(kenlm.Model)
self._word_lms = collections.defaultdict(kenlm.Model)
self._load_lms(char_lm_dir, word_lm_dir)
self._is_trained = False
def _load_lms(self, char_lm_dir, word_lm_dir):
config = kenlm.Config()
config.show_progress = False
config.arpa_complain = kenlm.ARPALoadComplain.NONE
for label in self._labels:
char_lm_path = Path(char_lm_dir, '{}.arpa'.format(label))
word_lm_path = Path(word_lm_dir, '{}.arpa'.format(label))
self._char_lms[label] = kenlm.Model(str(char_lm_path), config)
self._word_lms[label] = kenlm.Model(str(word_lm_path), config)
def _get_char_lm_scores(self, txt):
chars = _word_to_char(txt)
return np.array([self._char_lms[label].score(chars, bos=True, eos=True)
for label in self._labels_sorted])
def _get_word_lm_scores(self, txt):
return np.array([self._word_lms[label].score(txt, bos=True, eos=True)
for label in self._labels_sorted])
def _get_lm_feats(self, txt):
word_lm_scores = self._get_word_lm_scores(txt).reshape(1, -1)
word_lm_scores = _normalize_lm_scores(word_lm_scores)
char_lm_scores = self._get_char_lm_scores(txt).reshape(1, -1)
char_lm_scores = _normalize_lm_scores(char_lm_scores)
feats = np.concatenate((word_lm_scores, char_lm_scores), axis=1)
return feats
def _get_lm_feats_multi(self, sentences):
feats_list = collections.deque()
for sentence in sentences:
feats_list.append(self._get_lm_feats(sentence))
feats_matrix = np.array(feats_list)
feats_matrix = feats_matrix.reshape((-1, 52))
return feats_matrix
def _prepare_sentences(self, sentences):
tokenized = [' '.join(simple_word_tokenize(dediac_ar(s)))
for s in sentences]
sent_array = np.array(tokenized)
x_trans = self._feat_union.transform(sent_array)
x_trans_extra = self._feat_union_extra.transform(sent_array)
x_predict_extra = self._classifier_extra.predict_proba(x_trans_extra)
x_lm_feats = self._get_lm_feats_multi(sentences)
x_final = sp.sparse.hstack((x_trans, x_lm_feats, x_predict_extra))
return x_final
def train(self, data_path=None,
data_extra_path=None,
char_ngram_range=(1, 3),
word_ngram_range=(1, 1),
n_jobs=None):
"""Trains the model on a given data set.
Args:
data_path (:obj:`str`, optional): Path to main training data.
If None, use the provided training data.
Defaults to None.
data_extra_path (:obj:`str`, optional): Path to extra features
training data. If None,cuse the provided training data.
Defaults to None.
char_ngram_range (:obj:`tuple`, optional): The n-gram ranges to
consider in the character-based language models.
Defaults to (1, 3).
word_ngram_range (:obj:`tuple`, optional): The n-gram ranges to
consider in the word-based language models.
Defaults to (1, 1).
n_jobs (:obj:`int`, optional): The number of parallel jobs to use
for computation. If None, then only 1 job is used.
If -1 then all processors are used. Defaults to None.
"""
if data_path is None:
data_path = _TRAIN_DATA_PATH
if data_extra_path is None:
data_extra_path = _TRAIN_DATA_EXTRA_PATH
# Load training data and extract
train_data = pd.read_csv(data_path, sep='\t', index_col=0)
train_data_extra = pd.read_csv(data_extra_path, sep='\t', index_col=0)
x = train_data['ar'].values
y = train_data['dialect'].values
x_extra = train_data_extra['ar'].values
y_extra = train_data_extra['dialect'].values
# Build and train extra classifier
self._label_encoder_extra = LabelEncoder()
self._label_encoder_extra.fit(y_extra)
y_trans = self._label_encoder_extra.transform(y_extra)
word_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=word_ngram_range,
analyzer='word',
tokenizer=lambda x: x.split(' '))
char_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=char_ngram_range,
analyzer='char',
tokenizer=lambda x: x.split(' '))
self._feat_union_extra = FeatureUnion([('wordgrams', word_vectorizer),
('chargrams', char_vectorizer)])
x_trans = self._feat_union_extra.fit_transform(x_extra)
self._classifier_extra = OneVsRestClassifier(MultinomialNB(),
n_jobs=n_jobs)
self._classifier_extra.fit(x_trans, y_trans)
# Build and train main classifier
self._label_encoder = LabelEncoder()
self._label_encoder.fit(y)
y_trans = self._label_encoder.transform(y)
word_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=word_ngram_range,
analyzer='word',
tokenizer=lambda x: x.split(' '))
char_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=char_ngram_range,
analyzer='char',
tokenizer=lambda x: x.split(' '))
self._feat_union = FeatureUnion([('wordgrams', word_vectorizer),
('chargrams', char_vectorizer)])
self._feat_union.fit(x)
x_prepared = self._prepare_sentences(x)
self._classifier = OneVsRestClassifier(MultinomialNB(), n_jobs=n_jobs)
self._classifier.fit(x_prepared, y_trans)
self._is_trained = True
def eval(self, data_path=None, data_set='DEV'):
"""Evaluate the trained model on a given data set.
Args:
data_path (:obj:`str`, optional): Path to an evaluation data set.
If None, use one of the provided data sets instead.
Defaults to None.
data_set (:obj:`str`, optional): Name of the provided data set to
use. This is ignored if data_path is not None. Can be either
'VALIDATION' or 'TEST'. Defaults to 'VALIDATION'.
Returns:
:obj:`dict`: A dictionary mapping an evaluation metric to its
computed value. The metrics used are accuracy, f1_micro, f1_macro,
recall_micro, recall_macro, precision_micro and precision_macro.
"""
if not self._is_trained:
raise UntrainedModelError(
'Can\'t evaluate an untrained model.')
if data_path is None:
if data_set == 'DEV':
data_path = _DEV_DATA_PATH
elif data_set == 'TEST':
data_path = _TEST_DATA_PATH
else:
raise InvalidDataSetError(data_set)
# Load eval data
eval_data = pd.read_csv(data_path, sep='\t', index_col=0)
sentences = eval_data['ar'].values
did_true_city = eval_data['dialect'].values
did_true_country = [_LABEL_TO_COUNTRY_MAP[d] for d in did_true_city]
did_true_region = [_LABEL_TO_REGION_MAP[d] for d in did_true_city]
# Generate predictions
did_pred = self.predict(sentences)
did_pred_city = [d.top for d in did_pred]
did_pred_country = [d.top for d in map(label_to_country, did_pred)]
did_pred_region = [d.top for d in map(label_to_region, did_pred)]
# Get scores
scores = {
'city': {
'accuracy': accuracy_score(did_true_city, did_pred_city),
'f1_macro': f1_score(did_true_city, did_pred_city,
average='macro'),
'recall_macro': recall_score(did_true_city, did_pred_city,
average='macro'),
'precision_macro': precision_score(did_true_city,
did_pred_city,
average='macro')
},
'country': {
'accuracy': accuracy_score(did_true_country, did_pred_country),
'f1_macro': f1_score(did_true_country, did_pred_country,
average='macro'),
'recall_macro': recall_score(did_true_country,
did_pred_country,
average='macro'),
'precision_macro': precision_score(did_true_country,
did_pred_country,
average='macro')
},
'region': {
'accuracy': accuracy_score(did_true_region, did_pred_region),
'f1_macro': f1_score(did_true_region, did_pred_region,
average='macro'),
'recall_macro': recall_score(did_true_region, did_pred_region,
average='macro'),
'precision_macro': precision_score(did_true_region,
did_pred_region,
average='macro')
},
}
return scores
def predict(self, sentences, output='label'):
"""Predict the dialect probability scores for a given list of
sentences.
Args:
sentences (:obj:`list` of :obj:`str`): The list of sentences.
output (:obj:`str`): The output label type. Possible values are
'label', 'city', 'country', or 'region'. Defaults to 'label'.
Returns:
:obj:`list` of :obj:`DIDPred`: A list of prediction results,
each corresponding to its respective sentence.
"""
if not self._is_trained:
raise UntrainedModelError(
'Can\'t predict with an untrained model.')
if output == 'label':
convert = lambda x: x
elif output == 'city':
convert = label_to_city
elif output == 'country':
convert = label_to_country
elif output == 'region':
convert = label_to_region
else:
convert = lambda x: x
x_prepared = self._prepare_sentences(sentences)
predicted_scores = self._classifier.predict_proba(x_prepared)
result = collections.deque()
for scores in predicted_scores:
score_tups = list(zip(self._labels_sorted, scores))
predicted_dialect = max(score_tups, key=lambda x: x[1])[0]
dialect_scores = dict(score_tups)
result.append(convert(DIDPred(predicted_dialect, dialect_scores)))
return list(result)
@staticmethod
def pretrained():
"""Load the default pre-trained model provided with camel-tools.
Raises:
:obj:`PretrainedModelError`: When a pre-trained model compatible
with the current Python version isn't available.
Returns:
:obj:`DialectIdentifier`: The loaded model.
"""
suffix = '{}{}'.format(sys.version_info.major, sys.version_info.minor)
model_file_name = 'did_pretrained_{}.dill'.format(suffix)
model_path = Path(_DATA_DIR, model_file_name)
if not model_path.is_file():
raise PretrainedModelError(
'No pretrained model for current Python version found.')
with model_path.open('rb') as model_fp:
model = dill.load(model_fp)
# We need to reload LMs since they were set to None when
# serialized.
model._char_lms = collections.defaultdict(kenlm.Model)
model._word_lms = collections.defaultdict(kenlm.Model)
model._load_lms(_CHAR_LM_DIR, _WORD_LM_DIR)
return model
def train_default_model():
print(_DATA_DIR)
did = DialectIdentifier()
did.train()
# We don't want to serialize kenlm models as they will utilize the
# absolute LM paths used in training. They will be reloaded when using
# DialectIdentifer.pretrained().
did._char_lms = None
did._word_lms = None
suffix = '{}{}'.format(sys.version_info.major, sys.version_info.minor)
model_file_name = 'did_pretrained_{}.dill'.format(suffix)
model_path = Path(_DATA_DIR, model_file_name)
with model_path.open('wb') as model_fp:
dill.dump(did, model_fp)
def label_city_pairs():
"""Returns the set of default label-city pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-dialect
pairs.
"""
return frozenset(_LABEL_TO_CITY_MAP.items())
def label_country_pairs():
"""Returns the set of default label-country pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-country
pairs.
"""
return frozenset(_LABEL_TO_COUNTRY_MAP.items())
def label_region_pairs():
"""Returns the set of default label-region pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-region
pairs.
"""
return frozenset(_LABEL_TO_REGION_MAP.items())
| [
"sklearn.pipeline.FeatureUnion",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"collections.defaultdict",
"pathlib.Path",
"sklearn.metrics.f1_score",
"numpy.exp",
"kenlm.Config",
"collections.deque",
"sklearn.preprocessing.LabelEncoder",
"dill.load",
"sklearn.metrics.recall_score",
"s... | [((5012, 5041), 'pathlib.Path', 'Path', (['_DATA_DIR', '"""lm"""', '"""char"""'], {}), "(_DATA_DIR, 'lm', 'char')\n", (5016, 5041), False, 'from pathlib import Path\n'), ((5057, 5086), 'pathlib.Path', 'Path', (['_DATA_DIR', '"""lm"""', '"""word"""'], {}), "(_DATA_DIR, 'lm', 'word')\n", (5061, 5086), False, 'from pathlib import Path\n'), ((5106, 5144), 'pathlib.Path', 'Path', (['_DATA_DIR', '"""corpus_26_train.tsv"""'], {}), "(_DATA_DIR, 'corpus_26_train.tsv')\n", (5110, 5144), False, 'from pathlib import Path\n'), ((5170, 5207), 'pathlib.Path', 'Path', (['_DATA_DIR', '"""corpus_6_train.tsv"""'], {}), "(_DATA_DIR, 'corpus_6_train.tsv')\n", (5174, 5207), False, 'from pathlib import Path\n'), ((5225, 5261), 'pathlib.Path', 'Path', (['_DATA_DIR', '"""corpus_26_dev.tsv"""'], {}), "(_DATA_DIR, 'corpus_26_dev.tsv')\n", (5229, 5261), False, 'from pathlib import Path\n'), ((5280, 5317), 'pathlib.Path', 'Path', (['_DATA_DIR', '"""corpus_26_test.tsv"""'], {}), "(_DATA_DIR, 'corpus_26_test.tsv')\n", (5284, 5317), False, 'from pathlib import Path\n'), ((5334, 5386), 'collections.namedtuple', 'collections.namedtuple', (['"""DIDPred"""', "['top', 'scores']"], {}), "('DIDPred', ['top', 'scores'])\n", (5356, 5386), False, 'import collections\n'), ((4948, 4991), 'camel_tools.data.DataCatalogue.get_dataset_info', 'DataCatalogue.get_dataset_info', (['"""DialectID"""'], {}), "('DialectID')\n", (4978, 4991), False, 'from camel_tools.data import DataCatalogue\n'), ((6743, 6757), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (6749, 6757), True, 'import numpy as np\n'), ((6776, 6798), 'sklearn.preprocessing.normalize', 'normalize', (['norm_scores'], {}), '(norm_scores)\n', (6785, 6798), False, 'from sklearn.preprocessing import normalize\n'), ((23340, 23372), 'pathlib.Path', 'Path', (['_DATA_DIR', 'model_file_name'], {}), '(_DATA_DIR, model_file_name)\n', (23344, 23372), False, 'from pathlib import Path\n'), ((10301, 10337), 'collections.defaultdict', 'collections.defaultdict', (['kenlm.Model'], {}), '(kenlm.Model)\n', (10324, 10337), False, 'import collections\n'), ((10363, 10399), 'collections.defaultdict', 'collections.defaultdict', (['kenlm.Model'], {}), '(kenlm.Model)\n', (10386, 10399), False, 'import collections\n'), ((10552, 10566), 'kenlm.Config', 'kenlm.Config', ([], {}), '()\n', (10564, 10566), False, 'import kenlm\n'), ((11699, 11755), 'numpy.concatenate', 'np.concatenate', (['(word_lm_scores, char_lm_scores)'], {'axis': '(1)'}), '((word_lm_scores, char_lm_scores), axis=1)\n', (11713, 11755), True, 'import numpy as np\n'), ((11845, 11864), 'collections.deque', 'collections.deque', ([], {}), '()\n', (11862, 11864), False, 'import collections\n'), ((11983, 12003), 'numpy.array', 'np.array', (['feats_list'], {}), '(feats_list)\n', (11991, 12003), True, 'import numpy as np\n'), ((12260, 12279), 'numpy.array', 'np.array', (['tokenized'], {}), '(tokenized)\n', (12268, 12279), True, 'import numpy as np\n'), ((12559, 12615), 'scipy.sparse.hstack', 'sp.sparse.hstack', (['(x_trans, x_lm_feats, x_predict_extra)'], {}), '((x_trans, x_lm_feats, x_predict_extra))\n', (12575, 12615), True, 'import scipy as sp\n'), ((14028, 14073), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_path, sep='\\t', index_col=0)\n", (14039, 14073), True, 'import pandas as pd\n'), ((14101, 14152), 'pandas.read_csv', 'pd.read_csv', (['data_extra_path'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_extra_path, sep='\\t', index_col=0)\n", (14112, 14152), True, 'import pandas as pd\n'), ((14412, 14426), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (14424, 14426), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((15103, 15181), 'sklearn.pipeline.FeatureUnion', 'FeatureUnion', (["[('wordgrams', word_vectorizer), ('chargrams', char_vectorizer)]"], {}), "([('wordgrams', word_vectorizer), ('chargrams', char_vectorizer)])\n", (15115, 15181), False, 'from sklearn.pipeline import FeatureUnion\n'), ((15558, 15572), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (15570, 15572), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((16219, 16297), 'sklearn.pipeline.FeatureUnion', 'FeatureUnion', (["[('wordgrams', word_vectorizer), ('chargrams', char_vectorizer)]"], {}), "([('wordgrams', word_vectorizer), ('chargrams', char_vectorizer)])\n", (16231, 16297), False, 'from sklearn.pipeline import FeatureUnion\n'), ((17804, 17849), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(data_path, sep='\\t', index_col=0)\n", (17815, 17849), True, 'import pandas as pd\n'), ((21395, 21414), 'collections.deque', 'collections.deque', ([], {}), '()\n', (21412, 21414), False, 'import collections\n'), ((22270, 22302), 'pathlib.Path', 'Path', (['_DATA_DIR', 'model_file_name'], {}), '(_DATA_DIR, model_file_name)\n', (22274, 22302), False, 'from pathlib import Path\n'), ((23426, 23450), 'dill.dump', 'dill.dump', (['did', 'model_fp'], {}), '(did, model_fp)\n', (23435, 23450), False, 'import dill\n'), ((15347, 15362), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (15360, 15362), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((16468, 16483), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (16481, 16483), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((22523, 22542), 'dill.load', 'dill.load', (['model_fp'], {}), '(model_fp)\n', (22532, 22542), False, 'import dill\n'), ((22669, 22705), 'collections.defaultdict', 'collections.defaultdict', (['kenlm.Model'], {}), '(kenlm.Model)\n', (22692, 22705), False, 'import collections\n'), ((22736, 22772), 'collections.defaultdict', 'collections.defaultdict', (['kenlm.Model'], {}), '(kenlm.Model)\n', (22759, 22772), False, 'import collections\n'), ((18463, 18507), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['did_true_city', 'did_pred_city'], {}), '(did_true_city, did_pred_city)\n', (18477, 18507), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((18537, 18592), 'sklearn.metrics.f1_score', 'f1_score', (['did_true_city', 'did_pred_city'], {'average': '"""macro"""'}), "(did_true_city, did_pred_city, average='macro')\n", (18545, 18592), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((18663, 18722), 'sklearn.metrics.recall_score', 'recall_score', (['did_true_city', 'did_pred_city'], {'average': '"""macro"""'}), "(did_true_city, did_pred_city, average='macro')\n", (18675, 18722), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((18804, 18866), 'sklearn.metrics.precision_score', 'precision_score', (['did_true_city', 'did_pred_city'], {'average': '"""macro"""'}), "(did_true_city, did_pred_city, average='macro')\n", (18819, 18866), False, 'from sklearn.metrics import precision_score\n'), ((19037, 19087), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['did_true_country', 'did_pred_country'], {}), '(did_true_country, did_pred_country)\n', (19051, 19087), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((19117, 19178), 'sklearn.metrics.f1_score', 'f1_score', (['did_true_country', 'did_pred_country'], {'average': '"""macro"""'}), "(did_true_country, did_pred_country, average='macro')\n", (19125, 19178), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((19249, 19314), 'sklearn.metrics.recall_score', 'recall_score', (['did_true_country', 'did_pred_country'], {'average': '"""macro"""'}), "(did_true_country, did_pred_country, average='macro')\n", (19261, 19314), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((19441, 19509), 'sklearn.metrics.precision_score', 'precision_score', (['did_true_country', 'did_pred_country'], {'average': '"""macro"""'}), "(did_true_country, did_pred_country, average='macro')\n", (19456, 19509), False, 'from sklearn.metrics import precision_score\n'), ((19679, 19727), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['did_true_region', 'did_pred_region'], {}), '(did_true_region, did_pred_region)\n', (19693, 19727), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((19757, 19816), 'sklearn.metrics.f1_score', 'f1_score', (['did_true_region', 'did_pred_region'], {'average': '"""macro"""'}), "(did_true_region, did_pred_region, average='macro')\n", (19765, 19816), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((19887, 19950), 'sklearn.metrics.recall_score', 'recall_score', (['did_true_region', 'did_pred_region'], {'average': '"""macro"""'}), "(did_true_region, did_pred_region, average='macro')\n", (19899, 19950), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score\n'), ((20032, 20098), 'sklearn.metrics.precision_score', 'precision_score', (['did_true_region', 'did_pred_region'], {'average': '"""macro"""'}), "(did_true_region, did_pred_region, average='macro')\n", (20047, 20098), False, 'from sklearn.metrics import precision_score\n'), ((12183, 12195), 'camel_tools.utils.dediac.dediac_ar', 'dediac_ar', (['s'], {}), '(s)\n', (12192, 12195), False, 'from camel_tools.utils.dediac import dediac_ar\n')] |
from time import sleep
import numpy as np
import os
import allogger
import logging
def main():
allogger.basic_configure(logdir='/tmp/allogger/singleprocess', default_outputs=['tensorboard'], hdf_writer_params=dict(
min_time_diff_btw_disc_writes=10, precision=2,
), debug=True, default_path_exists='ask')
allogger.utils.report_env(to_stdout=True)
logger = allogger.get_logger(scope='main')
start = 0
if os.path.exists('/tmp/allogger/singleprocess/checkpoint.npy'):
start, step_per_key = np.load('/tmp/allogger/singleprocess/checkpoint.npy', allow_pickle=True)
allogger.get_logger('root').step_per_key = allogger.get_logger('root').manager.dict(step_per_key)
print(f'Resuming from step {start}')
for step in range(start, start+10):
logger.log(step, 'value')
logger.info(f'We are in step {step}')
logger.log(np.random.rand(1, 5, 5), 'blub')
logger.log(np.random.rand(1, 5, 5), 'blub')
logger.log(np.random.rand(10), 'array', data_type='array')
logger.log(np.random.rand(10), 'array', data_type='array')
np.save(os.path.join(allogger.get_logger('root').logdir, 'checkpoint'), (step+1, dict(allogger.get_logger('root').step_per_key)))
allogger.close()
if __name__ == '__main__':
main() | [
"allogger.close",
"allogger.get_logger",
"allogger.utils.report_env",
"numpy.load",
"os.path.exists",
"numpy.random.rand"
] | [((327, 368), 'allogger.utils.report_env', 'allogger.utils.report_env', ([], {'to_stdout': '(True)'}), '(to_stdout=True)\n', (352, 368), False, 'import allogger\n'), ((382, 415), 'allogger.get_logger', 'allogger.get_logger', ([], {'scope': '"""main"""'}), "(scope='main')\n", (401, 415), False, 'import allogger\n'), ((438, 498), 'os.path.exists', 'os.path.exists', (['"""/tmp/allogger/singleprocess/checkpoint.npy"""'], {}), "('/tmp/allogger/singleprocess/checkpoint.npy')\n", (452, 498), False, 'import os\n'), ((1239, 1255), 'allogger.close', 'allogger.close', ([], {}), '()\n', (1253, 1255), False, 'import allogger\n'), ((530, 602), 'numpy.load', 'np.load', (['"""/tmp/allogger/singleprocess/checkpoint.npy"""'], {'allow_pickle': '(True)'}), "('/tmp/allogger/singleprocess/checkpoint.npy', allow_pickle=True)\n", (537, 602), True, 'import numpy as np\n'), ((891, 914), 'numpy.random.rand', 'np.random.rand', (['(1)', '(5)', '(5)'], {}), '(1, 5, 5)\n', (905, 914), True, 'import numpy as np\n'), ((939, 962), 'numpy.random.rand', 'np.random.rand', (['(1)', '(5)', '(5)'], {}), '(1, 5, 5)\n', (953, 962), True, 'import numpy as np\n'), ((988, 1006), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (1002, 1006), True, 'import numpy as np\n'), ((1051, 1069), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (1065, 1069), True, 'import numpy as np\n'), ((611, 638), 'allogger.get_logger', 'allogger.get_logger', (['"""root"""'], {}), "('root')\n", (630, 638), False, 'import allogger\n'), ((1125, 1152), 'allogger.get_logger', 'allogger.get_logger', (['"""root"""'], {}), "('root')\n", (1144, 1152), False, 'import allogger\n'), ((654, 681), 'allogger.get_logger', 'allogger.get_logger', (['"""root"""'], {}), "('root')\n", (673, 681), False, 'import allogger\n'), ((1190, 1217), 'allogger.get_logger', 'allogger.get_logger', (['"""root"""'], {}), "('root')\n", (1209, 1217), False, 'import allogger\n')] |
# -*- coding: ISO-8859-1 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import numpy.linalg as linalg
from numpy import pi, exp, array, zeros, sqrt
from numpy.lib.stride_tricks import broadcast_arrays, as_strided
from hftools.dataset import make_same_dims_list, hfarray,\
DimMatrix_i, DimMatrix_j
from hftools.dataset.arrayobj import _hfarray, make_same_dims
def angle(z, deg=False, branch=None):
"""Like numpy angle but you can specify the starting point for the angle.
branch = x means the angle will be in the interval [x, x+360[ when
deg=True and [x, x+pi[ when deg=False
"""
if deg:
b = -180 if branch is None else branch
B = (b - (-180))
b = B / 180. * np.pi
else:
b = -np.pi if branch is None else branch
B = (b - (-np.pi))
b = B
Z = np.asanyarray(z) / exp(1j * b)
res = np.angle(Z, deg)
res = res + B
if isinstance(z, _hfarray):
return z.__class__(res, dims=z.dims, copy=False)
return res
def make_matrix(a, b, c, d):
a, b, c, d = make_same_dims_list([a, b, c, d])
abcdshape = zip(a.shape, b.shape, c.shape, d.shape)
maxshape = (tuple(max(x) for x in abcdshape) + (2, 2))
res = zeros(maxshape, a.dtype)
res[..., 0, 0] = a
res[..., 0, 1] = b
res[..., 1, 0] = c
res[..., 1, 1] = d
dims = a.dims + (DimMatrix_i("i", 2), DimMatrix_j("j", 2),)
out = hfarray(res, dims=dims)
return out
def chop(x, threshold=1e-16):
"""Round numbers with magnitude smaller than *threshold* to zero
"""
if isinstance(x, np.ndarray):
x = x.copy()
x[abs(x) < threshold] = 0
return x
else:
if abs(x) < threshold:
return 0 * x
else:
return x
def dB(x):
"""Convert x from dB to linear (voltage).
..math:: dB(x) = 20 ln(|x|)
"""
return 20 * np.log10(abs(x))
def dBinv(x):
"""Convert x from dB to linear (voltage).
..math:: dBinv(x) = 10^{x/20}
"""
return 10**(x / 20.)
#
# Convert to complexform
#
def dB_angle_to_complex(mag, ang):
"""Convert magnitude and angle to complex value
*mag* magnitude in dB
*ang* angle in degrees
"""
return dBinv(mag) * exp(1j * pi * ang / 180.)
def mag_angle_to_complex(mag, ang):
"""Convert magnitude and angle to complex value
*mag* magnitude in linear scale
*ang* angle in degrees
"""
return mag * exp(1j * pi * ang / 180.)
def re_im_to_complex(realpart, imaginarypart):
"""Convert real and imaginary parts to complex value
*realpart* real part linear units
*imaginarypart* imaginary part linear units
"""
return realpart + 1j*imaginarypart
def continous_phase_sqrt(q):
phase = unwrap_phase(q)
Q = sqrt(abs(q)) * exp(1j * phase / 2)
return Q
def _unwrap_phase(data, deg=False):
"""Virar upp fasen, dvs forsoker ta bort eventuella fashopp.
>>> x = np.arange(0, 11, 1.5)
>>> y = np.exp(1j*x)
>>> np.angle(y)
array([ 0. , 1.5 , 3. , -1.78318531, -0.28318531,
1.21681469, 2.71681469, -2.06637061])
>>> unwrap_phase(hfarray(y))
hfarray([ 0. , 1.5, 3. , 4.5, 6. , 7.5, 9. , 10.5])
"""
result = data.real * 0
a = angle(data[0])
a0 = np.median(array(a.flat[0], copy=False))
add_pos = abs(a - a0 + 2 * pi) < 2
add_neg = abs(a - a0 - 2 * pi) < 2
result[0] = a
result[1:] = (np.add.accumulate(angle(data[1:] / data[:-1]), 0) +
angle(data[0]))
result1 = np.where(array(add_pos), array(result + 2 * pi), array(result))
result2 = np.where(array(add_neg), array(result1 - 2 * pi), array(result1))
if deg:
result2 = result2 / pi * 180
return hfarray(result2, dims=data.dims)
def unwrap_phase(data, deg=False):
a1 = _unwrap_phase(data, deg=deg)
return a1
def delay(freq, var):
r"""Berakna grupploptid :math:`\tau=-\frac{d\varphi}{d\omega}` for data
Invariabler
*freq*
frekvenser som *var* galler for
*var*
komplexa data som skall anvandas i berakningen
Resultat (f,tau)
*f*
frekvenserna som motsvarar mittpunkter i *freq*
*tau*
beraknad grupploptid med hjalp av mittpunkts approximation for
derivatan
>>> x=hfarray(np.arange(0,10.))
>>> y=hfarray(np.exp(-2j*np.pi*x/10))
>>> f,tau=delay(x,y)
>>> f
hfarray([ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5])
>>> tau
hfarray([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
freq, var = make_same_dims(freq, var)
f_mean = (freq[:-1] + freq[1:]) / 2
domega = (2 * pi * (freq[:-1] - freq[1:]))
dfi = angle(var[1:] / var[:-1])
return (f_mean, dfi / domega)
def smooth(data, aperture, axis=0):
"""Smoothar *data* med aritmetiskt medelvarde langs med *axis* [=0].
Invariabler
*data*
data som skall smoothas
*aperture*
hur manga sampel som skall medlvardesbildas for varje punkt
i smoothingen
*axis*
axel som skall smoothas langs default = 1
"""
data = np.asanyarray(data)
newdata = np.empty_like(data)
len = data.shape[axis] - 1
if aperture % 2 == 0:
odd = 0
else:
odd = 1
for i in range(data.shape[axis]):
wid = min(i, aperture // 2, abs(len - i))
if wid != 0:
newdata[i] = np.mean(data[i - wid: i + wid + odd], axis)
else:
newdata[i] = data[i]
return newdata
def poly_smooth(x, y, aperture, axis=0, N=3):
"""Smoothar *data* med hjalp av *N* te gradens polynom, langs med
*axis* [=0].
Invariabler
*x*
x-varden for data som skall smoothas
*y*
y-varden for data som skall smoothas
*aperture*
hur manga sampel som skall medlvardesbildas for varje punkt i
smoothingen
*axis*
axel som skall smoothas langs
*N*
Vilken grad skallpolynomet ha
"""
import scipy
newdata = np.empty_like(y)
size = x.shape[axis] - 1
if aperture % 2 == 0:
odd = 0
else:
odd = 1
wid = aperture // 2
for i in range(x.shape[axis]):
start = min(max(i - wid, 0), max(0, size - aperture))
stop = max(min(i + wid + odd, size), aperture)
assert stop - start == aperture
poly = scipy.polyfit(x[start:stop], y[start:stop], N)
newdata[i] = scipy.polyval(poly, x[i])
return newdata
def poly_smooth_magphase(x, data, aperture, axis=0, N=3):
"""Smoothar magnitud och vinkel hos *data* med hjalp av
:func:`poly_smooth`.
"""
m = poly_smooth(x, abs(data), aperture, axis, N)
p = poly_smooth(x, unwrap_phase(data), aperture, axis, N)
return m * exp(1j * p)
def smooth_magphase(data, aperture, axis=0):
"""Smoothar magnitud och vinkel hos *data* med hjalp av
:func:`smooth`.
"""
m = smooth(abs(data), aperture, axis)
p = smooth(unwrap_phase(data), aperture, axis)
return m * exp(1j * p)
def linear_extrapolate(x, y, xi):
x = np.asanyarray(x)
y = np.asanyarray(y)
xi = np.asanyarray(xi)
idx0 = np.searchsorted(x[1:-1], xi)
idx1 = idx0 + 1
x0 = x[idx0]
x1 = x[idx1]
y0 = y[idx0]
y1 = y[idx1]
d0 = (xi - x0)
d1 = (x1 - xi)
d = (x1 - x0)
return (d0 * y1 + d1 * y0) / d
def interpolate(x, y, xi):
x = np.asanyarray(x)
y = np.asanyarray(y)
xi = np.asanyarray(xi)
if xi.min() < x.min():
raise ValueError("Can not interpolate below lowest value")
elif xi.max() > x.max():
raise ValueError("Can not interpolate below highest value")
else:
return linear_extrapolate(x, y, xi)
#def check_is_multi_matrix(x):
# """A multimatrix is a matrix on the last N indices"""
def firstpos(x, N=2):
return as_strided(x, x.shape[:-N], x.strides[:-N])
def firstelement(x, N=2):
return as_strided(x, x.shape[-N:], x.strides[-N:])
def get_shape_helper(x, N):
if N == 0:
return tuple()
else:
return x[-N:]
def get_dims_helper(x, N):
if N == 0:
return tuple()
else:
return x[:-N]
class broadcast_matrices(object):
def __init__(self, a, N=2):
arrays = a
if all(isinstance(x, _hfarray) for x in a):
arrays = make_same_dims_list(arrays)
else:
raise Exception("Can only broadcast hfarrays")
try:
N[0]
except TypeError:
N = [N] * len(a)
self.Nlist = Nlist = N
_matrixshapes = [get_shape_helper(x.shape, Nelem)
for x, Nelem in zip(arrays, Nlist)]
_matrixstrides = [get_shape_helper(x.strides, Nelem)
for x, Nelem in zip(arrays, Nlist)]
self._matrixshapes = _matrixshapes
self._matrixstrides = _matrixstrides
dimslist = [x.dims for x, Nelem in zip(arrays, Nlist)]
firstelems = broadcast_arrays(*[firstpos(x, Nelem)
for x, Nelem in zip(arrays, Nlist)])
self._broadcasted = broadcasted = []
for o, endshapes, endstrides, dims in zip(firstelems, _matrixshapes,
_matrixstrides, dimslist):
x = as_strided(o, o.shape + endshapes, o.strides + endstrides)
broadcasted.append(hfarray(x, dims=dims, copy=False))
self.outershape = broadcasted[0].shape[:-Nlist[0]]
def __iter__(self):
broadcasted = self._broadcasted
to_enumerate = firstpos(broadcasted[0], self.Nlist[0])
for index, _ in np.ndenumerate(to_enumerate):
yield [x.__getitem__(index) for x in broadcasted]
def inv(A):
inv = linalg.inv
result = inv(A)
result = hfarray(result, dims=A.dims)
return result
def matrix_multiply_old(A, B):
dot = np.dot
res = broadcast_matrices((A, B))
x = dot(firstelement(A), firstelement(B))
resdims = res._broadcasted[0].dims
resempty = np.empty(res.outershape + x.shape, dtype=x.dtype)
result = hfarray(resempty, dims=resdims)
for a, b, r in broadcast_matrices((A, B, result)):
r[...] = dot(a, b)
return result
def matrix_multiply(a, b):
"""Multiply arrays of matrices.
a and b are hfarrays containing dimensions DimMatrix_i and DimMatrix_j.
Matrix multiplication is done by broadcasting the other dimensions first.
"""
A, B = make_same_dims(a, b)
res = np.einsum("...ij,...jk->...ik", A, B)
return hfarray(res, dims=A.dims)
def flatten_non_matrix(A):
out = np.ascontiguousarray(A)
shape = (int(np.multiply.reduce(A.shape[:-2])),) + A.shape[-2:]
out.shape = shape
return out
def det(A):
det = linalg.det
result = det(A)
result = hfarray(result, dims=A.dims[:-2])
return result
def solve_Ab(A, b, squeeze=True):
AA, bb = make_same_dims(A, b)
x = np.linalg.solve(AA, bb)
result = hfarray(x, dims=bb.dims)
if squeeze:
result = result.squeeze()
return result
def lstsq(A, b, squeeze=True):
lstsq = np.linalg.lstsq
res = broadcast_matrices((A, b))
x, residuals, rank, s = lstsq(firstelement(res._broadcasted[0]), firstelement(res._broadcasted[1]))
xdims = res._broadcasted[0].dims
xempty = np.empty(res.outershape + x.shape, dtype=x.dtype)
xresult = hfarray(xempty, dims=xdims)
for a, b, rx in broadcast_matrices((A, b, xresult)):
rx[...], _, _, _ = lstsq(a, b)
return xresult
if __name__ == '__main__':
a = array([[[1., 2], [3, 4]]])
da = array([0.])
b = array([[[4., 3], [2, 1]],
[[5, 3], [2, 1]],
[[7, 3], [2, 1]]])
db = array([0, 0, 0.])
m = broadcast_matrices((a, b))
m2 = broadcast_matrices((b, db), (2, 0))
| [
"numpy.angle",
"numpy.empty",
"numpy.einsum",
"hftools.dataset.arrayobj.make_same_dims",
"numpy.mean",
"numpy.exp",
"numpy.multiply.reduce",
"numpy.linalg.solve",
"numpy.empty_like",
"scipy.polyval",
"hftools.dataset.hfarray",
"scipy.polyfit",
"numpy.ndenumerate",
"numpy.lib.stride_tricks.... | [((1200, 1216), 'numpy.angle', 'np.angle', (['Z', 'deg'], {}), '(Z, deg)\n', (1208, 1216), True, 'import numpy as np\n'), ((1387, 1420), 'hftools.dataset.make_same_dims_list', 'make_same_dims_list', (['[a, b, c, d]'], {}), '([a, b, c, d])\n', (1406, 1420), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((1546, 1570), 'numpy.zeros', 'zeros', (['maxshape', 'a.dtype'], {}), '(maxshape, a.dtype)\n', (1551, 1570), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((1737, 1760), 'hftools.dataset.hfarray', 'hfarray', (['res'], {'dims': 'dims'}), '(res, dims=dims)\n', (1744, 1760), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((4128, 4160), 'hftools.dataset.hfarray', 'hfarray', (['result2'], {'dims': 'data.dims'}), '(result2, dims=data.dims)\n', (4135, 4160), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((4988, 5013), 'hftools.dataset.arrayobj.make_same_dims', 'make_same_dims', (['freq', 'var'], {}), '(freq, var)\n', (5002, 5013), False, 'from hftools.dataset.arrayobj import _hfarray, make_same_dims\n'), ((5558, 5577), 'numpy.asanyarray', 'np.asanyarray', (['data'], {}), '(data)\n', (5571, 5577), True, 'import numpy as np\n'), ((5592, 5611), 'numpy.empty_like', 'np.empty_like', (['data'], {}), '(data)\n', (5605, 5611), True, 'import numpy as np\n'), ((6502, 6518), 'numpy.empty_like', 'np.empty_like', (['y'], {}), '(y)\n', (6515, 6518), True, 'import numpy as np\n'), ((7557, 7573), 'numpy.asanyarray', 'np.asanyarray', (['x'], {}), '(x)\n', (7570, 7573), True, 'import numpy as np\n'), ((7582, 7598), 'numpy.asanyarray', 'np.asanyarray', (['y'], {}), '(y)\n', (7595, 7598), True, 'import numpy as np\n'), ((7608, 7625), 'numpy.asanyarray', 'np.asanyarray', (['xi'], {}), '(xi)\n', (7621, 7625), True, 'import numpy as np\n'), ((7637, 7665), 'numpy.searchsorted', 'np.searchsorted', (['x[1:-1]', 'xi'], {}), '(x[1:-1], xi)\n', (7652, 7665), True, 'import numpy as np\n'), ((7882, 7898), 'numpy.asanyarray', 'np.asanyarray', (['x'], {}), '(x)\n', (7895, 7898), True, 'import numpy as np\n'), ((7907, 7923), 'numpy.asanyarray', 'np.asanyarray', (['y'], {}), '(y)\n', (7920, 7923), True, 'import numpy as np\n'), ((7933, 7950), 'numpy.asanyarray', 'np.asanyarray', (['xi'], {}), '(xi)\n', (7946, 7950), True, 'import numpy as np\n'), ((8322, 8365), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['x', 'x.shape[:-N]', 'x.strides[:-N]'], {}), '(x, x.shape[:-N], x.strides[:-N])\n', (8332, 8365), False, 'from numpy.lib.stride_tricks import broadcast_arrays, as_strided\n'), ((8405, 8448), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['x', 'x.shape[-N:]', 'x.strides[-N:]'], {}), '(x, x.shape[-N:], x.strides[-N:])\n', (8415, 8448), False, 'from numpy.lib.stride_tricks import broadcast_arrays, as_strided\n'), ((10311, 10339), 'hftools.dataset.hfarray', 'hfarray', (['result'], {'dims': 'A.dims'}), '(result, dims=A.dims)\n', (10318, 10339), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((10545, 10594), 'numpy.empty', 'np.empty', (['(res.outershape + x.shape)'], {'dtype': 'x.dtype'}), '(res.outershape + x.shape, dtype=x.dtype)\n', (10553, 10594), True, 'import numpy as np\n'), ((10608, 10639), 'hftools.dataset.hfarray', 'hfarray', (['resempty'], {'dims': 'resdims'}), '(resempty, dims=resdims)\n', (10615, 10639), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((10979, 10999), 'hftools.dataset.arrayobj.make_same_dims', 'make_same_dims', (['a', 'b'], {}), '(a, b)\n', (10993, 10999), False, 'from hftools.dataset.arrayobj import _hfarray, make_same_dims\n'), ((11010, 11047), 'numpy.einsum', 'np.einsum', (['"""...ij,...jk->...ik"""', 'A', 'B'], {}), "('...ij,...jk->...ik', A, B)\n", (11019, 11047), True, 'import numpy as np\n'), ((11059, 11084), 'hftools.dataset.hfarray', 'hfarray', (['res'], {'dims': 'A.dims'}), '(res, dims=A.dims)\n', (11066, 11084), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((11124, 11147), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['A'], {}), '(A)\n', (11144, 11147), True, 'import numpy as np\n'), ((11321, 11354), 'hftools.dataset.hfarray', 'hfarray', (['result'], {'dims': 'A.dims[:-2]'}), '(result, dims=A.dims[:-2])\n', (11328, 11354), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((11422, 11442), 'hftools.dataset.arrayobj.make_same_dims', 'make_same_dims', (['A', 'b'], {}), '(A, b)\n', (11436, 11442), False, 'from hftools.dataset.arrayobj import _hfarray, make_same_dims\n'), ((11451, 11474), 'numpy.linalg.solve', 'np.linalg.solve', (['AA', 'bb'], {}), '(AA, bb)\n', (11466, 11474), True, 'import numpy as np\n'), ((11488, 11512), 'hftools.dataset.hfarray', 'hfarray', (['x'], {'dims': 'bb.dims'}), '(x, dims=bb.dims)\n', (11495, 11512), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((11833, 11882), 'numpy.empty', 'np.empty', (['(res.outershape + x.shape)'], {'dtype': 'x.dtype'}), '(res.outershape + x.shape, dtype=x.dtype)\n', (11841, 11882), True, 'import numpy as np\n'), ((11897, 11924), 'hftools.dataset.hfarray', 'hfarray', (['xempty'], {'dims': 'xdims'}), '(xempty, dims=xdims)\n', (11904, 11924), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((12078, 12105), 'numpy.array', 'array', (['[[[1.0, 2], [3, 4]]]'], {}), '([[[1.0, 2], [3, 4]]])\n', (12083, 12105), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((12114, 12126), 'numpy.array', 'array', (['[0.0]'], {}), '([0.0])\n', (12119, 12126), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((12134, 12197), 'numpy.array', 'array', (['[[[4.0, 3], [2, 1]], [[5, 3], [2, 1]], [[7, 3], [2, 1]]]'], {}), '([[[4.0, 3], [2, 1]], [[5, 3], [2, 1]], [[7, 3], [2, 1]]])\n', (12139, 12197), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((12236, 12254), 'numpy.array', 'array', (['[0, 0, 0.0]'], {}), '([0, 0, 0.0])\n', (12241, 12254), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((1159, 1175), 'numpy.asanyarray', 'np.asanyarray', (['z'], {}), '(z)\n', (1172, 1175), True, 'import numpy as np\n'), ((1178, 1191), 'numpy.exp', 'exp', (['(1.0j * b)'], {}), '(1.0j * b)\n', (1181, 1191), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((2570, 2598), 'numpy.exp', 'exp', (['(1.0j * pi * ang / 180.0)'], {}), '(1.0j * pi * ang / 180.0)\n', (2573, 2598), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((2779, 2807), 'numpy.exp', 'exp', (['(1.0j * pi * ang / 180.0)'], {}), '(1.0j * pi * ang / 180.0)\n', (2782, 2807), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((3136, 3157), 'numpy.exp', 'exp', (['(1.0j * phase / 2)'], {}), '(1.0j * phase / 2)\n', (3139, 3157), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((3680, 3708), 'numpy.array', 'array', (['a.flat[0]'], {'copy': '(False)'}), '(a.flat[0], copy=False)\n', (3685, 3708), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((3933, 3947), 'numpy.array', 'array', (['add_pos'], {}), '(add_pos)\n', (3938, 3947), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((3949, 3971), 'numpy.array', 'array', (['(result + 2 * pi)'], {}), '(result + 2 * pi)\n', (3954, 3971), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((3973, 3986), 'numpy.array', 'array', (['result'], {}), '(result)\n', (3978, 3986), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((4011, 4025), 'numpy.array', 'array', (['add_neg'], {}), '(add_neg)\n', (4016, 4025), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((4027, 4050), 'numpy.array', 'array', (['(result1 - 2 * pi)'], {}), '(result1 - 2 * pi)\n', (4032, 4050), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((4052, 4066), 'numpy.array', 'array', (['result1'], {}), '(result1)\n', (4057, 4066), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((6848, 6894), 'scipy.polyfit', 'scipy.polyfit', (['x[start:stop]', 'y[start:stop]', 'N'], {}), '(x[start:stop], y[start:stop], N)\n', (6861, 6894), False, 'import scipy\n'), ((6916, 6941), 'scipy.polyval', 'scipy.polyval', (['poly', 'x[i]'], {}), '(poly, x[i])\n', (6929, 6941), False, 'import scipy\n'), ((7245, 7258), 'numpy.exp', 'exp', (['(1.0j * p)'], {}), '(1.0j * p)\n', (7248, 7258), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((7501, 7514), 'numpy.exp', 'exp', (['(1.0j * p)'], {}), '(1.0j * p)\n', (7504, 7514), False, 'from numpy import pi, exp, array, zeros, sqrt\n'), ((10151, 10179), 'numpy.ndenumerate', 'np.ndenumerate', (['to_enumerate'], {}), '(to_enumerate)\n', (10165, 10179), True, 'import numpy as np\n'), ((1684, 1703), 'hftools.dataset.DimMatrix_i', 'DimMatrix_i', (['"""i"""', '(2)'], {}), "('i', 2)\n", (1695, 1703), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((1705, 1724), 'hftools.dataset.DimMatrix_j', 'DimMatrix_j', (['"""j"""', '(2)'], {}), "('j', 2)\n", (1716, 1724), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((5845, 5887), 'numpy.mean', 'np.mean', (['data[i - wid:i + wid + odd]', 'axis'], {}), '(data[i - wid:i + wid + odd], axis)\n', (5852, 5887), True, 'import numpy as np\n'), ((8808, 8835), 'hftools.dataset.make_same_dims_list', 'make_same_dims_list', (['arrays'], {}), '(arrays)\n', (8827, 8835), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((9814, 9872), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['o', '(o.shape + endshapes)', '(o.strides + endstrides)'], {}), '(o, o.shape + endshapes, o.strides + endstrides)\n', (9824, 9872), False, 'from numpy.lib.stride_tricks import broadcast_arrays, as_strided\n'), ((9904, 9937), 'hftools.dataset.hfarray', 'hfarray', (['x'], {'dims': 'dims', 'copy': '(False)'}), '(x, dims=dims, copy=False)\n', (9911, 9937), False, 'from hftools.dataset import make_same_dims_list, hfarray, DimMatrix_i, DimMatrix_j\n'), ((11165, 11197), 'numpy.multiply.reduce', 'np.multiply.reduce', (['A.shape[:-2]'], {}), '(A.shape[:-2])\n', (11183, 11197), True, 'import numpy as np\n')] |
import PIL
from PIL.Image import Image
import numpy as np
import torch
import matplotlib.pyplot as plt
import cv2
from imutils import face_utils
def detect_faces(image,model):
face_coordinates, prob = model.detect(image)
return face_coordinates, prob
def rect_to_bb(rect):
# take a bounding predicted by dlib and convert it
# to the format (x, y, w, h) as we would normally do
# with OpenCV
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
# return a tuple of (x, y, w, h)
return (x, y, w, h)
def shape_to_np(shape, dtype="int"):
# initialize the list of (x, y)-coordinates
coords = np.zeros((68, 2), dtype=dtype)
# loop over the 68 facial landmarks and convert them
# to a 2-tuple of (x, y)-coordinates
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
# return the list of (x, y)-coordinates
return coords
# this is for a single image
def do_mtcnn_detect(mtcnn,img,can_detect,cant_detect_paths,image_name,image_types,detected_images_number,total_images_number):
boxes, probablities, landmarks_points = mtcnn.detect(img,landmarks=True)
#print(landmarks_points,boxes,image_name)
fig, ax = plt.subplots(figsize=(16, 12))
ax.imshow(img)
ax.axis('off')
if landmarks_points is not None:
for box, landmark in zip(boxes, landmarks_points):
#ax.plot(*np.meshgrid(box[[0, 2]], box[[1, 3]]))
#ax.plot(*np.meshgrid(box[[1, 2]], box[[0, 3]]))
#ax.plot(*np.meshgrid(box[[0,1,2,3]]))
ax.scatter(landmark[:, 0], landmark[:, 1], s=35)
can_detect = can_detect + 1
print(can_detect)
#fig.show()
plt.savefig("./output_images/pic_%s.png"%(str(image_name)))
elif landmarks_points is None:
if cant_detect_paths is not None:
cant_detect_paths.append(image_name)
plt.savefig("./output_images/pic_%s_undetected.png"%(str(image_name)))
print(cant_detect_paths)
# incrementing the specific class of image counter accoring to the image path
if landmarks_points is not None:
for idx, image_type in enumerate(image_types):
if str(image_type) in image_name:
detected_images_number[idx] = detected_images_number[idx] + 1
for idx, image_type in enumerate(image_types):
if str(image_type) in image_name:
total_images_number[idx] = total_images_number[idx] + 1
def do_dlib_detect(detector,predictor,image_name,img):
rects = detector(img, 1)
for (i, rect) in enumerate(rects):
shape = predictor(img, rect)
shape = face_utils.shape_to_np(shape)
# convert dlib's rectangle to a OpenCV-style bounding box
# [i.e., (x, y, w, h)], then draw the face bounding box
(x, y, w, h) = face_utils.rect_to_bb(rect)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# show the face number
cv2.putText(img, "Face #{}".format(i + 1), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
print("something")
cv2.circle(img, (x, y), 1, (250, 0, 0), -1)
# show the output image with the face detections + facial landmarks
cv2.imshow("Output", img)
cv2.waitKey(0)
pass | [
"cv2.circle",
"cv2.waitKey",
"numpy.zeros",
"imutils.face_utils.shape_to_np",
"imutils.face_utils.rect_to_bb",
"cv2.rectangle",
"cv2.imshow",
"matplotlib.pyplot.subplots"
] | [((630, 660), 'numpy.zeros', 'np.zeros', (['(68, 2)'], {'dtype': 'dtype'}), '((68, 2), dtype=dtype)\n', (638, 660), True, 'import numpy as np\n'), ((1176, 1206), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (1188, 1206), True, 'import matplotlib.pyplot as plt\n'), ((3351, 3376), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'img'], {}), "('Output', img)\n", (3361, 3376), False, 'import cv2\n'), ((3381, 3395), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3392, 3395), False, 'import cv2\n'), ((2607, 2636), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (2629, 2636), False, 'from imutils import face_utils\n'), ((2790, 2817), 'imutils.face_utils.rect_to_bb', 'face_utils.rect_to_bb', (['rect'], {}), '(rect)\n', (2811, 2817), False, 'from imutils import face_utils\n'), ((2826, 2884), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (2839, 2884), False, 'import cv2\n'), ((3230, 3273), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(1)', '(250, 0, 0)', '(-1)'], {}), '(img, (x, y), 1, (250, 0, 0), -1)\n', (3240, 3273), False, 'import cv2\n')] |
# import modules
import numpy as np
import scipy.fftpack as fft
def mkwhite(t, fs=48000):
tap = int(t*fs)
white = np.random.randn(tap)
white /= np.max(np.abs(white))
return white
def mkpink(t, fs=48000):
tap = int(t*fs)
white = mkwhite(t, fs)
WHITE = fft.fft(white)
pink_filter = np.concatenate((np.array([1]), 1/np.sqrt(np.arange(start=fs/tap, stop=fs, step=fs/tap))))
PINK = WHITE * pink_filter
pink = np.real(fft.ifft(PINK))
pink /= np.max(np.abs(pink))
return pink
# TODO mkTSP
| [
"numpy.abs",
"numpy.random.randn",
"scipy.fftpack.fft",
"scipy.fftpack.ifft",
"numpy.array",
"numpy.arange"
] | [((125, 145), 'numpy.random.randn', 'np.random.randn', (['tap'], {}), '(tap)\n', (140, 145), True, 'import numpy as np\n'), ((285, 299), 'scipy.fftpack.fft', 'fft.fft', (['white'], {}), '(white)\n', (292, 299), True, 'import scipy.fftpack as fft\n'), ((166, 179), 'numpy.abs', 'np.abs', (['white'], {}), '(white)\n', (172, 179), True, 'import numpy as np\n'), ((459, 473), 'scipy.fftpack.ifft', 'fft.ifft', (['PINK'], {}), '(PINK)\n', (467, 473), True, 'import scipy.fftpack as fft\n'), ((494, 506), 'numpy.abs', 'np.abs', (['pink'], {}), '(pink)\n', (500, 506), True, 'import numpy as np\n'), ((335, 348), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (343, 348), True, 'import numpy as np\n'), ((360, 409), 'numpy.arange', 'np.arange', ([], {'start': '(fs / tap)', 'stop': 'fs', 'step': '(fs / tap)'}), '(start=fs / tap, stop=fs, step=fs / tap)\n', (369, 409), True, 'import numpy as np\n')] |
"""Note: Keep in sync with changes to VTracePolicyGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import gym
import copy
import ray
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.evaluation.policy_graph import PolicyGraph
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph, \
LearningRateSchedule
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.annotations import override
def kl_div(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete probability dists
Assumes the probability dist is over the last dimension.
Taken from: https://gist.github.com/swayson/86c296aa354a555536e6765bbe726ff7
p, q : array-like, dtype=float
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
mask = np.where(q == 0)
# Prevent division by 0 errors.
p[mask] = 0
p = p / p.sum(axis=2, keepdims=1)
return np.sum(np.where((p != 0) & (q != 0), p * np.log(p / q), 0), axis=-1)
def agent_name_to_visibility_idx(name, self_id):
agent_num = int(name[6])
self_num = int(self_id[6])
if agent_num > self_num:
return agent_num - 1
else:
return agent_num
def agent_name_to_idx(name):
agent_num = int(name[6])
return agent_num
class A3CLoss(object):
def __init__(self,
action_dist,
actions,
advantages,
v_target,
vf,
vf_loss_coeff=0.5,
entropy_coeff=0.01):
log_prob = action_dist.logp(actions)
# The "policy gradients" loss
self.pi_loss = -tf.reduce_sum(log_prob * advantages)
delta = vf - v_target
self.vf_loss = 0.5 * tf.reduce_sum(tf.square(delta))
self.entropy = tf.reduce_sum(action_dist.entropy())
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
class MOALoss(object):
def __init__(self, action_logits, true_actions, num_actions,
loss_weight=1.0, others_visibility=None):
"""Train MOA model with supervised cross entropy loss on a trajectory.
The model is trying to predict others' actions at timestep t+1 given all
actions at timestep t.
Returns:
A scalar loss tensor (cross-entropy loss).
"""
# Remove the prediction for the final step, since t+1 is not known for
# this step.
action_logits = action_logits[:-1, :, :] # [B, N, A]
# Remove first agent (self) and first action, because we want to predict
# the t+1 actions of other agents from all actions at t.
true_actions = true_actions[1:, 1:] # [B, N]
# Compute softmax cross entropy
flat_logits = tf.reshape(action_logits, [-1, num_actions])
flat_labels = tf.reshape(true_actions, [-1])
self.ce_per_entry = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=flat_labels, logits=flat_logits)
# Zero out the loss if the other agent isn't visible to this one.
if others_visibility is not None:
# Remove first entry in ground truth visibility and flatten
others_visibility = tf.reshape(others_visibility[1:, :], [-1])
self.ce_per_entry *= tf.cast(others_visibility, tf.float32)
self.total_loss = tf.reduce_mean(self.ce_per_entry) * loss_weight
tf.print("MOA CE loss", self.total_loss, [self.total_loss])
class A3CPolicyGraph(LearningRateSchedule, TFPolicyGraph):
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG, **config)
self.config = config
self.sess = tf.get_default_session()
# Extract info from config
self.num_other_agents = config['num_other_agents']
self.agent_id = config['agent_id']
# Extract influence options
cust_opts = config['model']['custom_options']
self.moa_weight = cust_opts['moa_weight']
self.train_moa_only_when_visible = cust_opts['train_moa_only_when_visible']
self.influence_reward_clip = cust_opts['influence_reward_clip']
self.influence_divergence_measure = cust_opts['influence_divergence_measure']
self.influence_reward_weight = cust_opts['influence_reward_weight']
self.influence_curriculum_steps = cust_opts['influence_curriculum_steps']
self.influence_only_when_visible = cust_opts['influence_only_when_visible']
self.inf_scale_start = cust_opts['influence_scaledown_start']
self.inf_scale_end = cust_opts['influence_scaledown_end']
self.inf_scale_final_val = cust_opts['influence_scaledown_final_val']
# Use to compute increasing influence curriculum weight
self.steps_processed = 0
# Setup the policy
self.observations = tf.placeholder(
tf.float32, [None] + list(observation_space.shape))
# Add other agents actions placeholder for MOA preds
# Add 1 to include own action so it can be conditioned on. Note: agent's
# own actions will always form the first column of this tensor.
self.others_actions = tf.placeholder(tf.int32,
shape=(None, self.num_other_agents + 1),
name="others_actions")
# 0/1 multiplier array representing whether each agent is visible to
# the current agent.
if self.train_moa_only_when_visible:
self.others_visibility = tf.placeholder(tf.int32,
shape=(None, self.num_other_agents),
name="others_visibility")
else:
self.others_visibility = None
dist_class, self.num_actions = ModelCatalog.get_action_dist(
action_space, self.config["model"])
prev_actions = ModelCatalog.get_action_placeholder(action_space)
prev_rewards = tf.placeholder(tf.float32, [None], name="prev_reward")
# Compute output size of model of other agents (MOA)
self.moa_dim = self.num_actions * self.num_other_agents
# We now create two models, one for the policy, and one for the model
# of other agents (MOA)
self.rl_model, self.moa = ModelCatalog.get_double_lstm_model({
"obs": self.observations,
"others_actions": self.others_actions,
"prev_actions": prev_actions,
"prev_rewards": prev_rewards,
"is_training": self._get_is_training_placeholder(),
}, observation_space, self.num_actions, self.moa_dim,
self.config["model"], lstm1_name="policy", lstm2_name="moa")
action_dist = dist_class(self.rl_model.outputs)
self.action_probs = tf.nn.softmax(self.rl_model.outputs)
self.vf = self.rl_model.value_function()
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
# Setup the policy loss
if isinstance(action_space, gym.spaces.Box):
ac_size = action_space.shape[0]
actions = tf.placeholder(tf.float32, [None, ac_size], name="ac")
elif isinstance(action_space, gym.spaces.Discrete):
actions = tf.placeholder(tf.int64, [None], name="ac")
else:
raise UnsupportedSpaceException(
"Action space {} is not supported for A3C.".format(
action_space))
advantages = tf.placeholder(tf.float32, [None], name="advantages")
self.v_target = tf.placeholder(tf.float32, [None], name="v_target")
self.rl_loss = A3CLoss(action_dist, actions, advantages, self.v_target,
self.vf, self.config["vf_loss_coeff"],
self.config["entropy_coeff"])
# Setup the MOA loss
self.moa_preds = tf.reshape( # Reshape to [B,N,A]
self.moa.outputs, [-1, self.num_other_agents, self.num_actions])
self.moa_loss = MOALoss(self.moa_preds, self.others_actions,
self.num_actions, loss_weight=self.moa_weight,
others_visibility=self.others_visibility)
self.moa_action_probs = tf.nn.softmax(self.moa_preds)
# Total loss
self.total_loss = self.rl_loss.total_loss + self.moa_loss.total_loss
# Initialize TFPolicyGraph
loss_in = [
("obs", self.observations),
("others_actions", self.others_actions),
("actions", actions),
("prev_actions", prev_actions),
("prev_rewards", prev_rewards),
("advantages", advantages),
("value_targets", self.v_target),
]
if self.train_moa_only_when_visible:
loss_in.append(('others_visibility', self.others_visibility))
LearningRateSchedule.__init__(self, self.config["lr"],
self.config["lr_schedule"])
TFPolicyGraph.__init__(
self,
observation_space,
action_space,
self.sess,
obs_input=self.observations,
action_sampler=action_dist.sample(),
action_prob=action_dist.sampled_action_prob(),
loss=self.total_loss,
model=self.rl_model,
loss_inputs=loss_in,
state_inputs=self.rl_model.state_in + self.moa.state_in,
state_outputs=self.rl_model.state_out + self.moa.state_out,
prev_action_input=prev_actions,
prev_reward_input=prev_rewards,
seq_lens=self.rl_model.seq_lens,
max_seq_len=self.config["model"]["max_seq_len"])
self.total_influence = tf.get_variable("total_influence", initializer=tf.constant(0.0))
self.stats = {
"cur_lr": tf.cast(self.cur_lr, tf.float64),
"policy_loss": self.rl_loss.pi_loss,
"policy_entropy": self.rl_loss.entropy,
"grad_gnorm": tf.global_norm(self._grads),
"var_gnorm": tf.global_norm(self.var_list),
"vf_loss": self.rl_loss.vf_loss,
"vf_explained_var": explained_variance(self.v_target, self.vf),
"moa_loss": self.moa_loss.total_loss,
"total_influence": self.total_influence
}
self.sess.run(tf.global_variables_initializer())
@override(PolicyGraph)
def get_initial_state(self):
return self.rl_model.state_init + self.moa.state_init
@override(TFPolicyGraph)
def _build_compute_actions(self,
builder,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None):
state_batches = state_batches or []
if len(self._state_inputs) != len(state_batches):
raise ValueError(
"Must pass in RNN state batches for placeholders {}, got {}".
format(self._state_inputs, state_batches))
builder.add_feed_dict(self.extra_compute_action_feed_dict())
# Extract matrix of other agents' past actions, including agent's own
if type(episodes) == dict and 'all_agents_actions' in episodes.keys():
# Call from visualizer_rllib, change episodes format so it complies with the default format.
self_index = agent_name_to_idx(self.agent_id)
# First get own action
all_actions = [episodes['all_agents_actions'][self_index]]
others_actions = [e for i, e in enumerate(
episodes['all_agents_actions']) if self_index != i]
all_actions.extend(others_actions)
all_actions = np.reshape(np.array(all_actions), [1, -1])
else:
own_actions = np.atleast_2d(np.array(
[e.prev_action for e in episodes[self.agent_id]]))
all_actions = self.extract_last_actions_from_episodes(
episodes, own_actions=own_actions)
builder.add_feed_dict({self._obs_input: obs_batch,
self.others_actions: all_actions})
if state_batches:
seq_lens = np.ones(len(obs_batch))
builder.add_feed_dict({self._seq_lens: seq_lens,
self.moa.seq_lens: seq_lens})
if self._prev_action_input is not None and prev_action_batch:
builder.add_feed_dict({self._prev_action_input: prev_action_batch})
if self._prev_reward_input is not None and prev_reward_batch:
builder.add_feed_dict({self._prev_reward_input: prev_reward_batch})
builder.add_feed_dict({self._is_training: False})
builder.add_feed_dict(dict(zip(self._state_inputs, state_batches)))
fetches = builder.add_fetches([self._sampler] + self._state_outputs +
[self.extra_compute_action_fetches()])
return fetches[0], fetches[1:-1], fetches[-1]
def _get_loss_inputs_dict(self, batch):
# Override parent function to add seq_lens to tensor for additional LSTM
loss_inputs = super(A3CPolicyGraph, self)._get_loss_inputs_dict(batch)
loss_inputs[self.moa.seq_lens] = loss_inputs[self._seq_lens]
return loss_inputs
@override(TFPolicyGraph)
def gradients(self, optimizer):
grads = tf.gradients(self._loss, self.var_list)
grads, _ = tf.clip_by_global_norm(grads, self.config["grad_clip"])
clipped_grads = list(zip(grads, self.var_list))
return clipped_grads
@override(TFPolicyGraph)
def extra_compute_grad_fetches(self):
return {
"stats": self.stats,
}
@override(TFPolicyGraph)
def extra_compute_action_fetches(self):
return dict(
TFPolicyGraph.extra_compute_action_fetches(self),
**{"vf_preds": self.vf})
def _value(self, ob, others_actions, prev_action, prev_reward, *args):
feed_dict = {self.observations: [ob],
self.others_actions: [others_actions],
self.rl_model.seq_lens: [1],
self._prev_action_input: [prev_action],
self._prev_reward_input: [prev_reward]}
assert len(args) == len(self.rl_model.state_in), \
(args, self.rl_model.state_in)
for k, v in zip(self.rl_model.state_in, args):
feed_dict[k] = v
vf = self.sess.run(self.vf, feed_dict)
return vf[0]
def extract_last_actions_from_episodes(self, episodes, batch_type=False,
own_actions=None):
"""Pulls every other agent's previous actions out of structured data.
Args:
episodes: the structured data type. Typically a dict of episode
objects.
batch_type: if True, the structured data is a dict of tuples,
where the second tuple element is the relevant dict containing
previous actions.
own_actions: an array of the agents own actions. If provided, will
be the first column of the created action matrix.
Returns: a real valued array of size [batch, num_other_agents] (meaning
each agents' actions goes down one column, each row is a timestep)
"""
if episodes is None:
print("Why are there no episodes?")
import pdb
pdb.set_trace()
# Need to sort agent IDs so same agent is consistently in
# same part of input space.
agent_ids = sorted(episodes.keys())
prev_actions = []
for agent_id in agent_ids:
if agent_id == self.agent_id:
continue
if batch_type:
prev_actions.append(episodes[agent_id][1]['actions'])
else:
prev_actions.append(
[e.prev_action for e in episodes[agent_id]])
all_actions = np.transpose(np.array(prev_actions))
# Attach agents own actions as column 1
if own_actions is not None:
all_actions = np.hstack((own_actions, all_actions))
return all_actions
@override(PolicyGraph)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
# Extract matrix of self and other agents' actions.
own_actions = np.atleast_2d(np.array(sample_batch['actions']))
own_actions = np.reshape(own_actions, [-1, 1])
all_actions = self.extract_last_actions_from_episodes(
other_agent_batches, own_actions=own_actions, batch_type=True)
sample_batch['others_actions'] = all_actions
if self.train_moa_only_when_visible:
sample_batch['others_visibility'] = \
self.get_agent_visibility_multiplier(sample_batch)
# Compute causal social influence reward and add to batch.
sample_batch = self.compute_influence_reward(sample_batch)
completed = sample_batch["dones"][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(len(self.rl_model.state_in)):
next_state.append([sample_batch["state_out_{}".format(i)][-1]])
prev_action = sample_batch['prev_actions'][-1]
prev_reward = sample_batch['prev_rewards'][-1]
last_r = self._value(sample_batch["new_obs"][-1],
all_actions[-1], prev_action, prev_reward,
*next_state)
sample_batch = compute_advantages(sample_batch, last_r, self.config["gamma"],
self.config["lambda"])
return sample_batch
def compute_influence_reward(self, trajectory):
"""Compute influence of this agent on other agents and add to rewards.
"""
# Predict the next action for all other agents. Shape is [B, N, A]
true_logits, true_probs = self.predict_others_next_action(trajectory)
# Get marginal predictions where effect of self is marginalized out
(marginal_logits,
marginal_probs) = self.marginalize_predictions_over_own_actions(
trajectory) # [B, N, A]
# Compute influence per agent/step ([B, N]) using different metrics
if self.influence_divergence_measure == 'kl':
influence_per_agent_step = kl_div(true_probs, marginal_probs)
elif self.influence_divergence_measure == 'jsd':
mean_probs = 0.5 * (true_probs + marginal_probs)
influence_per_agent_step = (0.5 * kl_div(true_probs, mean_probs) +
0.5 * kl_div(marginal_probs, mean_probs))
# TODO(natashamjaques): more policy comparison functions here.
# Zero out influence for steps where the other agent isn't visible.
if self.influence_only_when_visible:
if 'others_visibility' in trajectory.keys():
visibility = trajectory['others_visibility']
else:
visibility = self.get_agent_visibility_multiplier(trajectory)
influence_per_agent_step *= visibility
# Logging influence metrics
total_influence = np.sum(influence_per_agent_step)
self.total_influence.load(total_influence, session=self.sess)
# Summarize and clip influence reward
influence = np.sum(influence_per_agent_step, axis=-1)
influence = np.clip(influence, -self.influence_reward_clip,
self.influence_reward_clip)
# Get influence curriculum weight
self.steps_processed += len(trajectory['obs'])
inf_weight = self.current_influence_curriculum_weight()
# Add to trajectory
trajectory['rewards'] = trajectory['rewards'] + (influence * inf_weight)
return trajectory
def get_agent_visibility_multiplier(self, trajectory):
traj_len = len(trajectory['infos'])
visibility = np.zeros((traj_len, self.num_other_agents))
vis_lists = [info['visible_agents'] for info in trajectory['infos']]
for i, v in enumerate(vis_lists):
vis_agents = [agent_name_to_visibility_idx(a, self.agent_id) for a in v]
visibility[i, vis_agents] = 1
return visibility
def current_influence_curriculum_weight(self):
""" Computes multiplier for influence reward based on training steps
taken and curriculum parameters.
Returns: scalar float influence weight
"""
if self.steps_processed < self.influence_curriculum_steps:
percent = float(self.steps_processed) / self.influence_curriculum_steps
return percent * self.influence_reward_weight
elif self.steps_processed > self.influence_opts['influence_scaledown_start']:
percent = (self.steps_processed - self.inf_scale_start) \
/ float(self.inf_scale_end - self.inf_scale_start)
diff = self.influence_reward_weight - self.inf_scale_final_val
scaled = self.influence_reward_weight - diff * percent
return max(self.inf_scale_final_val, scaled)
else:
return self.influence_reward_weight
def marginalize_predictions_over_own_actions(self, trajectory):
# Run policy to get probability of each action in original trajectory
action_probs = self.get_action_probabilities(trajectory)
# Normalize to reduce numerical inaccuracies
action_probs = action_probs / action_probs.sum(axis=1, keepdims=1)
others_actions = trajectory['others_actions'][:, 1:]
traj = copy.deepcopy(trajectory)
traj_len = len(trajectory['obs'])
counter_preds = []
counter_probs = []
# Cycle through all possible actions and get predictions for what other
# agents would do if this action was taken at each trajectory step.
for i in range(self.num_actions):
counters = np.tile([i], [traj_len, 1])
traj['others_actions'] = np.hstack((counters, others_actions))
preds, probs = self.predict_others_next_action(traj)
counter_preds.append(preds)
counter_probs.append(probs)
counter_preds = np.array(counter_preds)
counter_probs = np.array(counter_probs)
marginal_preds = np.sum(counter_preds, axis=0)
marginal_probs = np.sum(counter_probs, axis=0)
# Multiply by probability of each action to renormalize probability
tiled_probs = np.tile(action_probs, 4),
tiled_probs = np.reshape(
tiled_probs, [traj_len, self.num_other_agents, self.num_actions])
marginal_preds = np.multiply(marginal_preds, tiled_probs)
marginal_probs = np.multiply(marginal_probs, tiled_probs)
# Normalize to reduce numerical inaccuracies
marginal_probs = marginal_probs / marginal_probs.sum(axis=2, keepdims=1)
return marginal_preds, marginal_probs
def predict_others_next_action(self, trajectory):
traj_len = len(trajectory['obs'])
feed_dict = {
self.observations: trajectory['obs'],
self.others_actions: trajectory['others_actions'],
self.moa.seq_lens: [traj_len],
self._prev_action_input: trajectory['prev_actions'],
self._prev_reward_input: trajectory['prev_rewards']
}
start_state = len(self.rl_model.state_in)
for i, v in enumerate(self.moa.state_in):
feed_dict[v] = [trajectory['state_in_' + str(i + start_state)][0, :]]
return self.sess.run([self.moa_preds, self.moa_action_probs], feed_dict)
def get_action_probabilities(self, trajectory):
traj_len = len(trajectory['obs'])
feed_dict = {
self.observations: trajectory['obs'],
self.others_actions: trajectory['others_actions'],
self.rl_model.seq_lens: [traj_len],
self._prev_action_input: trajectory['prev_actions'],
self._prev_reward_input: trajectory['prev_rewards']
}
for i, v in enumerate(self.rl_model.state_in):
feed_dict[v] = [trajectory['state_in_' + str(i)][0, :]]
return self.sess.run(self.action_probs, feed_dict)
| [
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.print",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"numpy.clip",
"numpy.tile",
"tensorflow.clip_by_global_norm",
"tensorflow.nn.softmax",
"numpy.multiply",
"ray.rllib.utils.explained_variance.explained_variance",
"ray.rllib.utils.an... | [((987, 1016), 'numpy.asarray', 'np.asarray', (['p'], {'dtype': 'np.float'}), '(p, dtype=np.float)\n', (997, 1016), True, 'import numpy as np\n'), ((1025, 1054), 'numpy.asarray', 'np.asarray', (['q'], {'dtype': 'np.float'}), '(q, dtype=np.float)\n', (1035, 1054), True, 'import numpy as np\n'), ((1066, 1082), 'numpy.where', 'np.where', (['(q == 0)'], {}), '(q == 0)\n', (1074, 1082), True, 'import numpy as np\n'), ((10866, 10887), 'ray.rllib.utils.annotations.override', 'override', (['PolicyGraph'], {}), '(PolicyGraph)\n', (10874, 10887), False, 'from ray.rllib.utils.annotations import override\n'), ((10989, 11012), 'ray.rllib.utils.annotations.override', 'override', (['TFPolicyGraph'], {}), '(TFPolicyGraph)\n', (10997, 11012), False, 'from ray.rllib.utils.annotations import override\n'), ((13869, 13892), 'ray.rllib.utils.annotations.override', 'override', (['TFPolicyGraph'], {}), '(TFPolicyGraph)\n', (13877, 13892), False, 'from ray.rllib.utils.annotations import override\n'), ((14151, 14174), 'ray.rllib.utils.annotations.override', 'override', (['TFPolicyGraph'], {}), '(TFPolicyGraph)\n', (14159, 14174), False, 'from ray.rllib.utils.annotations import override\n'), ((14283, 14306), 'ray.rllib.utils.annotations.override', 'override', (['TFPolicyGraph'], {}), '(TFPolicyGraph)\n', (14291, 14306), False, 'from ray.rllib.utils.annotations import override\n'), ((16781, 16802), 'ray.rllib.utils.annotations.override', 'override', (['PolicyGraph'], {}), '(PolicyGraph)\n', (16789, 16802), False, 'from ray.rllib.utils.annotations import override\n'), ((3075, 3119), 'tensorflow.reshape', 'tf.reshape', (['action_logits', '[-1, num_actions]'], {}), '(action_logits, [-1, num_actions])\n', (3085, 3119), True, 'import tensorflow as tf\n'), ((3142, 3172), 'tensorflow.reshape', 'tf.reshape', (['true_actions', '[-1]'], {}), '(true_actions, [-1])\n', (3152, 3172), True, 'import tensorflow as tf\n'), ((3201, 3292), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'flat_labels', 'logits': 'flat_logits'}), '(labels=flat_labels, logits=\n flat_logits)\n', (3247, 3292), True, 'import tensorflow as tf\n'), ((3720, 3779), 'tensorflow.print', 'tf.print', (['"""MOA CE loss"""', 'self.total_loss', '[self.total_loss]'], {}), "('MOA CE loss', self.total_loss, [self.total_loss])\n", (3728, 3779), True, 'import tensorflow as tf\n'), ((4028, 4052), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (4050, 4052), True, 'import tensorflow as tf\n'), ((5510, 5603), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, self.num_other_agents + 1)', 'name': '"""others_actions"""'}), "(tf.int32, shape=(None, self.num_other_agents + 1), name=\n 'others_actions')\n", (5524, 5603), True, 'import tensorflow as tf\n'), ((6175, 6239), 'ray.rllib.models.catalog.ModelCatalog.get_action_dist', 'ModelCatalog.get_action_dist', (['action_space', "self.config['model']"], {}), "(action_space, self.config['model'])\n", (6203, 6239), False, 'from ray.rllib.models.catalog import ModelCatalog\n'), ((6276, 6325), 'ray.rllib.models.catalog.ModelCatalog.get_action_placeholder', 'ModelCatalog.get_action_placeholder', (['action_space'], {}), '(action_space)\n', (6311, 6325), False, 'from ray.rllib.models.catalog import ModelCatalog\n'), ((6349, 6403), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""prev_reward"""'}), "(tf.float32, [None], name='prev_reward')\n", (6363, 6403), True, 'import tensorflow as tf\n'), ((7210, 7246), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.rl_model.outputs'], {}), '(self.rl_model.outputs)\n', (7223, 7246), True, 'import tensorflow as tf\n'), ((7960, 8013), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""advantages"""'}), "(tf.float32, [None], name='advantages')\n", (7974, 8013), True, 'import tensorflow as tf\n'), ((8038, 8089), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""v_target"""'}), "(tf.float32, [None], name='v_target')\n", (8052, 8089), True, 'import tensorflow as tf\n'), ((8356, 8431), 'tensorflow.reshape', 'tf.reshape', (['self.moa.outputs', '[-1, self.num_other_agents, self.num_actions]'], {}), '(self.moa.outputs, [-1, self.num_other_agents, self.num_actions])\n', (8366, 8431), True, 'import tensorflow as tf\n'), ((8722, 8751), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.moa_preds'], {}), '(self.moa_preds)\n', (8735, 8751), True, 'import tensorflow as tf\n'), ((9345, 9432), 'ray.rllib.evaluation.tf_policy_graph.LearningRateSchedule.__init__', 'LearningRateSchedule.__init__', (['self', "self.config['lr']", "self.config['lr_schedule']"], {}), "(self, self.config['lr'], self.config[\n 'lr_schedule'])\n", (9374, 9432), False, 'from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph, LearningRateSchedule\n'), ((13945, 13984), 'tensorflow.gradients', 'tf.gradients', (['self._loss', 'self.var_list'], {}), '(self._loss, self.var_list)\n', (13957, 13984), True, 'import tensorflow as tf\n'), ((14004, 14059), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', "self.config['grad_clip']"], {}), "(grads, self.config['grad_clip'])\n", (14026, 14059), True, 'import tensorflow as tf\n'), ((17141, 17173), 'numpy.reshape', 'np.reshape', (['own_actions', '[-1, 1]'], {}), '(own_actions, [-1, 1])\n', (17151, 17173), True, 'import numpy as np\n'), ((18277, 18367), 'ray.rllib.evaluation.postprocessing.compute_advantages', 'compute_advantages', (['sample_batch', 'last_r', "self.config['gamma']", "self.config['lambda']"], {}), "(sample_batch, last_r, self.config['gamma'], self.config[\n 'lambda'])\n", (18295, 18367), False, 'from ray.rllib.evaluation.postprocessing import compute_advantages\n'), ((19959, 19991), 'numpy.sum', 'np.sum', (['influence_per_agent_step'], {}), '(influence_per_agent_step)\n', (19965, 19991), True, 'import numpy as np\n'), ((20129, 20170), 'numpy.sum', 'np.sum', (['influence_per_agent_step'], {'axis': '(-1)'}), '(influence_per_agent_step, axis=-1)\n', (20135, 20170), True, 'import numpy as np\n'), ((20191, 20266), 'numpy.clip', 'np.clip', (['influence', '(-self.influence_reward_clip)', 'self.influence_reward_clip'], {}), '(influence, -self.influence_reward_clip, self.influence_reward_clip)\n', (20198, 20266), True, 'import numpy as np\n'), ((20720, 20763), 'numpy.zeros', 'np.zeros', (['(traj_len, self.num_other_agents)'], {}), '((traj_len, self.num_other_agents))\n', (20728, 20763), True, 'import numpy as np\n'), ((22378, 22403), 'copy.deepcopy', 'copy.deepcopy', (['trajectory'], {}), '(trajectory)\n', (22391, 22403), False, 'import copy\n'), ((23003, 23026), 'numpy.array', 'np.array', (['counter_preds'], {}), '(counter_preds)\n', (23011, 23026), True, 'import numpy as np\n'), ((23051, 23074), 'numpy.array', 'np.array', (['counter_probs'], {}), '(counter_probs)\n', (23059, 23074), True, 'import numpy as np\n'), ((23101, 23130), 'numpy.sum', 'np.sum', (['counter_preds'], {'axis': '(0)'}), '(counter_preds, axis=0)\n', (23107, 23130), True, 'import numpy as np\n'), ((23156, 23185), 'numpy.sum', 'np.sum', (['counter_probs'], {'axis': '(0)'}), '(counter_probs, axis=0)\n', (23162, 23185), True, 'import numpy as np\n'), ((23334, 23410), 'numpy.reshape', 'np.reshape', (['tiled_probs', '[traj_len, self.num_other_agents, self.num_actions]'], {}), '(tiled_probs, [traj_len, self.num_other_agents, self.num_actions])\n', (23344, 23410), True, 'import numpy as np\n'), ((23449, 23489), 'numpy.multiply', 'np.multiply', (['marginal_preds', 'tiled_probs'], {}), '(marginal_preds, tiled_probs)\n', (23460, 23489), True, 'import numpy as np\n'), ((23515, 23555), 'numpy.multiply', 'np.multiply', (['marginal_probs', 'tiled_probs'], {}), '(marginal_probs, tiled_probs)\n', (23526, 23555), True, 'import numpy as np\n'), ((1902, 1938), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_prob * advantages)'], {}), '(log_prob * advantages)\n', (1915, 1938), True, 'import tensorflow as tf\n'), ((3522, 3564), 'tensorflow.reshape', 'tf.reshape', (['others_visibility[1:, :]', '[-1]'], {}), '(others_visibility[1:, :], [-1])\n', (3532, 3564), True, 'import tensorflow as tf\n'), ((3598, 3636), 'tensorflow.cast', 'tf.cast', (['others_visibility', 'tf.float32'], {}), '(others_visibility, tf.float32)\n', (3605, 3636), True, 'import tensorflow as tf\n'), ((3664, 3697), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.ce_per_entry'], {}), '(self.ce_per_entry)\n', (3678, 3697), True, 'import tensorflow as tf\n'), ((5887, 5979), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, self.num_other_agents)', 'name': '"""others_visibility"""'}), "(tf.int32, shape=(None, self.num_other_agents), name=\n 'others_visibility')\n", (5901, 5979), True, 'import tensorflow as tf\n'), ((7596, 7650), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, ac_size]'], {'name': '"""ac"""'}), "(tf.float32, [None, ac_size], name='ac')\n", (7610, 7650), True, 'import tensorflow as tf\n'), ((10323, 10355), 'tensorflow.cast', 'tf.cast', (['self.cur_lr', 'tf.float64'], {}), '(self.cur_lr, tf.float64)\n', (10330, 10355), True, 'import tensorflow as tf\n'), ((10484, 10511), 'tensorflow.global_norm', 'tf.global_norm', (['self._grads'], {}), '(self._grads)\n', (10498, 10511), True, 'import tensorflow as tf\n'), ((10538, 10567), 'tensorflow.global_norm', 'tf.global_norm', (['self.var_list'], {}), '(self.var_list)\n', (10552, 10567), True, 'import tensorflow as tf\n'), ((10646, 10688), 'ray.rllib.utils.explained_variance.explained_variance', 'explained_variance', (['self.v_target', 'self.vf'], {}), '(self.v_target, self.vf)\n', (10664, 10688), False, 'from ray.rllib.utils.explained_variance import explained_variance\n'), ((10825, 10858), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10856, 10858), True, 'import tensorflow as tf\n'), ((14384, 14432), 'ray.rllib.evaluation.tf_policy_graph.TFPolicyGraph.extra_compute_action_fetches', 'TFPolicyGraph.extra_compute_action_fetches', (['self'], {}), '(self)\n', (14426, 14432), False, 'from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph, LearningRateSchedule\n'), ((16029, 16044), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (16042, 16044), False, 'import pdb\n'), ((16574, 16596), 'numpy.array', 'np.array', (['prev_actions'], {}), '(prev_actions)\n', (16582, 16596), True, 'import numpy as np\n'), ((16709, 16746), 'numpy.hstack', 'np.hstack', (['(own_actions, all_actions)'], {}), '((own_actions, all_actions))\n', (16718, 16746), True, 'import numpy as np\n'), ((17084, 17117), 'numpy.array', 'np.array', (["sample_batch['actions']"], {}), "(sample_batch['actions'])\n", (17092, 17117), True, 'import numpy as np\n'), ((22731, 22758), 'numpy.tile', 'np.tile', (['[i]', '[traj_len, 1]'], {}), '([i], [traj_len, 1])\n', (22738, 22758), True, 'import numpy as np\n'), ((22796, 22833), 'numpy.hstack', 'np.hstack', (['(counters, others_actions)'], {}), '((counters, others_actions))\n', (22805, 22833), True, 'import numpy as np\n'), ((23285, 23309), 'numpy.tile', 'np.tile', (['action_probs', '(4)'], {}), '(action_probs, 4)\n', (23292, 23309), True, 'import numpy as np\n'), ((1226, 1239), 'numpy.log', 'np.log', (['(p / q)'], {}), '(p / q)\n', (1232, 1239), True, 'import numpy as np\n'), ((2013, 2029), 'tensorflow.square', 'tf.square', (['delta'], {}), '(delta)\n', (2022, 2029), True, 'import tensorflow as tf\n'), ((7414, 7437), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (7435, 7437), True, 'import tensorflow as tf\n'), ((7733, 7776), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {'name': '"""ac"""'}), "(tf.int64, [None], name='ac')\n", (7747, 7776), True, 'import tensorflow as tf\n'), ((10259, 10275), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (10270, 10275), True, 'import tensorflow as tf\n'), ((12312, 12333), 'numpy.array', 'np.array', (['all_actions'], {}), '(all_actions)\n', (12320, 12333), True, 'import numpy as np\n'), ((12398, 12456), 'numpy.array', 'np.array', (['[e.prev_action for e in episodes[self.agent_id]]'], {}), '([e.prev_action for e in episodes[self.agent_id]])\n', (12406, 12456), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from src.models import Frame, FrameBatch, Prediction
NUM_FRAMES = 10
class PredictionTest(unittest.TestCase):
def test_should_check_if_batch_frames_equivalent_to_number_of_predictions(
self):
batch = FrameBatch(frames=[Frame(1, np.ones((1, 1)), None)], info=None)
predictions = []
scores = []
self.assertRaises(AssertionError,
lambda x=None:
Prediction.predictions_from_batch_and_lists(
batch, predictions, scores))
def test_should_check_if_batch_frames_equivalent_to_number_of_scores(self):
batch = FrameBatch(frames=[Frame(1, np.ones((1, 1)), None)], info=None)
predictions = [['A', 'B']]
scores = []
self.assertRaises(AssertionError,
lambda x=None:
Prediction.predictions_from_batch_and_lists(
batch, predictions, scores))
def test_should_check_if_batch_frames_equivalent_to_no_of_boxes_if_given(
self):
batch = FrameBatch(frames=[Frame(1, np.ones((1, 1)), None)], info=None)
predictions = [['A', 'B']]
scores = [[1, 1]]
boxes = []
self.assertRaises(AssertionError,
lambda x=None:
Prediction.predictions_from_batch_and_lists(
batch, predictions, scores,
boxes=boxes))
def test_should_return_list_of_predictions_for_each_frame_in_batch(self):
batch = FrameBatch(frames=[Frame(1, np.ones((1, 1)), None)], info=None)
predictions = [['A', 'B']]
scores = [[1, 1]]
expected = [Prediction(batch.frames[0], predictions[0], scores[0])]
actual = Prediction.predictions_from_batch_and_lists(batch,
predictions,
scores)
self.assertEqual(expected,
actual)
| [
"src.models.Prediction.predictions_from_batch_and_lists",
"src.models.Prediction",
"numpy.ones"
] | [((1843, 1914), 'src.models.Prediction.predictions_from_batch_and_lists', 'Prediction.predictions_from_batch_and_lists', (['batch', 'predictions', 'scores'], {}), '(batch, predictions, scores)\n', (1886, 1914), False, 'from src.models import Frame, FrameBatch, Prediction\n'), ((1770, 1824), 'src.models.Prediction', 'Prediction', (['batch.frames[0]', 'predictions[0]', 'scores[0]'], {}), '(batch.frames[0], predictions[0], scores[0])\n', (1780, 1824), False, 'from src.models import Frame, FrameBatch, Prediction\n'), ((483, 554), 'src.models.Prediction.predictions_from_batch_and_lists', 'Prediction.predictions_from_batch_and_lists', (['batch', 'predictions', 'scores'], {}), '(batch, predictions, scores)\n', (526, 554), False, 'from src.models import Frame, FrameBatch, Prediction\n'), ((912, 983), 'src.models.Prediction.predictions_from_batch_and_lists', 'Prediction.predictions_from_batch_and_lists', (['batch', 'predictions', 'scores'], {}), '(batch, predictions, scores)\n', (955, 983), False, 'from src.models import Frame, FrameBatch, Prediction\n'), ((1383, 1471), 'src.models.Prediction.predictions_from_batch_and_lists', 'Prediction.predictions_from_batch_and_lists', (['batch', 'predictions', 'scores'], {'boxes': 'boxes'}), '(batch, predictions, scores,\n boxes=boxes)\n', (1426, 1471), False, 'from src.models import Frame, FrameBatch, Prediction\n'), ((293, 308), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (300, 308), True, 'import numpy as np\n'), ((712, 727), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (719, 727), True, 'import numpy as np\n'), ((1158, 1173), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1165, 1173), True, 'import numpy as np\n'), ((1653, 1668), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (1660, 1668), True, 'import numpy as np\n')] |
import numpy as np
import scipy.fft
def levy_stable(alpha: float, beta: float, size: int, mu: float = 0.0, sigma: float = 1.0) -> np.ndarray:
"""
Generate random values sampled from an alpha-stable distribution.
Notice that this method is "exact", in the sense that is derived
directly from the definition of stable variable.
:param alpha: stability parameter in (0, 2]
:param beta: skewness parameter in [-1, -1]
:param mu: location parameter in (-inf, inf)
:param sigma: scale parameter in (0, inf)
:param size: size of resulting array
"""
if alpha == 2:
return mu + np.random.standard_normal(size) * np.sqrt(2.0) * sigma # variance is 2*sigma**2 when alpha=2 (Gaussian)
# Fails for alpha exactly equal to 1.0
# but works fine for alpha infinitesimally greater or lower than 1.0
radius = 1e-15 # <<< this number is *very* small
if np.absolute(alpha - 1.0) < radius:
# So doing this will make almost exactly no difference at all
alpha = 1.0 + radius
r1 = np.random.random(size)
r2 = np.random.random(size)
pi = np.pi
a = 1.0 - alpha
b = r1 - 0.5
c = a * b * pi
e = beta * np.tan(np.pi * alpha / 2.0)
f = (-(np.cos(c) + e * np.sin(c)) / (np.log(r2) * np.cos(b * pi))) ** (a / alpha)
g = np.tan(pi * b / 2.0)
h = np.tan(c / 2.0)
i = 1.0 - g ** 2.0
j = f * (2.0 * (g - h) * (g * h + 1.0) - (h * i - 2.0 * g) * e * 2.0 * h)
k = j / (i * (h ** 2.0 + 1.0)) + e * (f - 1.0)
return mu + sigma * k
def truncated_levy_stable(trunc: float, alpha: float, beta: float, size: int, mu: float = 0.0,
sigma: float = 1.0) -> np.ndarray:
"""
Create the empirical levy stable distribution with extremes truncated.
:param trunc: absolute value at which to truncate distribution. (truncation is symmetric)
:param alpha: stability parameter in (0, 2]
:param beta: skewness parameter in [-1, -1]
:param mu: location parameter in (-inf, inf)
:param sigma: scale parameter in (0, inf)
:param size: size of resulting array
"""
z = levy_stable(alpha=alpha, beta=beta, mu=mu, sigma=sigma, size=size)
too_big = np.where(np.abs(z) > trunc)[0]
while too_big.size > 0:
z[too_big] = levy_stable(alpha=alpha, beta=beta, mu=mu, sigma=sigma, size=too_big.size)
too_big_remaining = np.where(np.abs(z[too_big]) > trunc)[0]
too_big = too_big[too_big_remaining]
return z
def memory_efficient_truncated_levy_stable(trunc: float, alpha: float, beta: float, size: int,
mu: float = 0.0, sigma: float = 1.0, steps: int = 256) -> np.ndarray:
"""
Create the empirical levy stable distribution with extremes truncated.
To prevent large inefficient allocations of memory the distribution is generated in chunks.
:param trunc: absolute value at which to truncate distribution. (truncation is symmetric)
:param alpha: stability parameter in (0, 2]
:param beta: skewness parameter in [-1, -1]
:param mu: location parameter in (-inf, inf)
:param sigma: scale parameter in (0, inf)
:param size: size of resulting array
:param steps: number of chunks to generated the final array
"""
step_length = size // steps
remaining = size % steps
out = np.zeros(size)
for i in range(steps):
out[i * step_length:(i + 1) * step_length] = truncated_levy_stable(trunc=trunc, alpha=alpha, beta=beta,
size=step_length, mu=mu, sigma=sigma)
if remaining > 0:
out[-remaining:] = truncated_levy_stable(trunc=trunc, alpha=alpha, beta=beta,
size=remaining, mu=mu, sigma=sigma)
return out
def flm(H: float, alpha: float, N: int, trunc: float, scale: float = 1, C: float = 1, m: int = 256, M: int = 6000,
steps: int = 256) -> np.ndarray:
"""
Generate realizations of fractional levy motion, also know as linear fractional stable motion.
Please ensure that m * ( M + N ) is a power of 2 because ffts are most efficient then.
:param H: Hurst parameter. Also known as the self-similarity parameter
:param alpha: the tail-exponent of the stable distribution (between 0 and 2). Lower alpha = heavier tails
:param m: 1/m is the mesh size
:param M: kernel cut-off parameter
:param C: normalization parameter
:param N: size of sample
:param scale: scale parameter of Levy distribution
:param trunc: truncate levy distr at +/-trunc
:param steps: break down generation of levy stable samples into steps number of batches
"""
Na = m * (M + N)
if alpha < 0 or alpha > 2:
raise ValueError('Alpha must be greater than 0 and less than or equal to 2.')
mh = 1 / m
d = H - 1 / alpha
t0 = np.linspace(mh, 1, m) ** d
t1 = np.linspace(1 + mh, M, int((M - (1 + mh)) / mh) + 1)
t1 = t1 ** d - (t1 - 1) ** d
A = mh ** (1 / alpha) * np.concatenate((t0, t1))
C = C * (np.abs(A) ** alpha).sum() ** (-1 / alpha)
A *= C
A = scipy.fft.fft(A, n=Na)
Z = memory_efficient_truncated_levy_stable(trunc=trunc, alpha=alpha, beta=0, size=Na, mu=0, sigma=scale,
steps=steps)
Z = scipy.fft.fft(Z, Na)
w = np.real(scipy.fft.ifft(Z * A, Na))
return w[0:N * m:m]
| [
"numpy.absolute",
"numpy.abs",
"numpy.log",
"numpy.zeros",
"numpy.tan",
"numpy.random.random",
"numpy.random.standard_normal",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.sqrt"
] | [((1052, 1074), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (1068, 1074), True, 'import numpy as np\n'), ((1084, 1106), 'numpy.random.random', 'np.random.random', (['size'], {}), '(size)\n', (1100, 1106), True, 'import numpy as np\n'), ((1316, 1336), 'numpy.tan', 'np.tan', (['(pi * b / 2.0)'], {}), '(pi * b / 2.0)\n', (1322, 1336), True, 'import numpy as np\n'), ((1345, 1360), 'numpy.tan', 'np.tan', (['(c / 2.0)'], {}), '(c / 2.0)\n', (1351, 1360), True, 'import numpy as np\n'), ((3350, 3364), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (3358, 3364), True, 'import numpy as np\n'), ((908, 932), 'numpy.absolute', 'np.absolute', (['(alpha - 1.0)'], {}), '(alpha - 1.0)\n', (919, 932), True, 'import numpy as np\n'), ((1194, 1221), 'numpy.tan', 'np.tan', (['(np.pi * alpha / 2.0)'], {}), '(np.pi * alpha / 2.0)\n', (1200, 1221), True, 'import numpy as np\n'), ((4902, 4923), 'numpy.linspace', 'np.linspace', (['mh', '(1)', 'm'], {}), '(mh, 1, m)\n', (4913, 4923), True, 'import numpy as np\n'), ((5052, 5076), 'numpy.concatenate', 'np.concatenate', (['(t0, t1)'], {}), '((t0, t1))\n', (5066, 5076), True, 'import numpy as np\n'), ((1263, 1273), 'numpy.log', 'np.log', (['r2'], {}), '(r2)\n', (1269, 1273), True, 'import numpy as np\n'), ((1276, 1290), 'numpy.cos', 'np.cos', (['(b * pi)'], {}), '(b * pi)\n', (1282, 1290), True, 'import numpy as np\n'), ((2216, 2225), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (2222, 2225), True, 'import numpy as np\n'), ((625, 656), 'numpy.random.standard_normal', 'np.random.standard_normal', (['size'], {}), '(size)\n', (650, 656), True, 'import numpy as np\n'), ((659, 671), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (666, 671), True, 'import numpy as np\n'), ((1233, 1242), 'numpy.cos', 'np.cos', (['c'], {}), '(c)\n', (1239, 1242), True, 'import numpy as np\n'), ((2400, 2418), 'numpy.abs', 'np.abs', (['z[too_big]'], {}), '(z[too_big])\n', (2406, 2418), True, 'import numpy as np\n'), ((1249, 1258), 'numpy.sin', 'np.sin', (['c'], {}), '(c)\n', (1255, 1258), True, 'import numpy as np\n'), ((5090, 5099), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (5096, 5099), True, 'import numpy as np\n')] |
import cv2
import pyautogui
from PIL import ImageGrab, Image
import numpy as np
import platform
import psutil
import os
import time
PLATFORM = platform.system()
if PLATFORM:
import win32gui
from winguiauto import findTopWindow
def windowEnumerationHandler(hwnd, top_windows):
top_windows.append((hwnd, win32gui.GetWindowText(hwnd)))
def set_top_window(title):
top_windows = []
win32gui.EnumWindows(windowEnumerationHandler, top_windows)
for i in top_windows:
if title in i[1].lower():
win32gui.ShowWindow(i[0],5)
win32gui.SetForegroundWindow(i[0])
return True
else:
return False
def find_lushi_window(title):
hwnd = findTopWindow(title)
rect = win32gui.GetWindowPlacement(hwnd)[-1]
image = ImageGrab.grab(rect)
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY)
return rect, image
# elif platform.system() == 'Darwin':
# import psutil
# from Cocoa import NSRunningApplication, NSApplicationActivateIgnoringOtherApps
#
# def find_lushi_window(title):
# for p in psutil.process_iter():
# if p.name == title:
# pid = p.pid
# app = NSRunningApplication.runningApplicationWithProcessIdentifier_(pid)
# app.activateWithOptions_(NSApplicationActivateIgnoringOtherApps)
# else:
# raise ValueError("Hearthstone is not running")
else:
raise ValueError(f"Plafform {platform.platform()} is not supported yet")
def find_icon_location(lushi, icon, confidence):
result = cv2.matchTemplate(lushi, icon, cv2.TM_CCOEFF_NORMED)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(result)
if maxVal > confidence:
(startX, startY) = maxLoc
endX = startX + icon.shape[1]
endY = startY + icon.shape[0]
return True, (startX+endX)//2, (startY+endY)//2, maxVal
else:
return False, None, None, maxVal
def find_relative_loc(title='炉石传说'):
pos = pyautogui.position()
rect, _ = find_lushi_window(title)
print((pos[0]-rect[0], pos[1]-rect[1]))
def move2loc(x, y, title='炉石传说'):
rect, _ = find_lushi_window(title)
loc = (x + rect[0], y + rect[1])
pyautogui.moveTo(loc)
def restart_game(lang):
if lang == 'eng':
bn = 'Battle.net'
hs = 'Hearthstone'
elif lang == 'chs':
bn = "战网"
hs = "炉石传说"
else:
raise ValueError(f"Language {lang} not supported")
icon_path = os.path.join(f'imgs_{lang}', 'icons', 'start_game_icon.png')
icon = cv2.imread(icon_path)
icon = cv2.cvtColor(np.array(icon), cv2.COLOR_BGR2GRAY)
for p in psutil.process_iter():
if p.name() == 'Hearthstone.exe':
p.kill()
print("hearthstone killed")
time.sleep(10)
bn_found = set_top_window(bn)
if bn_found:
while True:
image = ImageGrab.grab()
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY)
success, x, y, conf = find_icon_location(image, icon, 0.9)
if success:
pyautogui.click(x, y)
time.sleep(5)
set_top_window(hs)
break
if __name__ == "__main__":
restart_game("chs") | [
"win32gui.SetForegroundWindow",
"cv2.minMaxLoc",
"os.path.join",
"cv2.matchTemplate",
"win32gui.ShowWindow",
"psutil.process_iter",
"win32gui.GetWindowText",
"winguiauto.findTopWindow",
"win32gui.EnumWindows",
"pyautogui.position",
"PIL.ImageGrab.grab",
"time.sleep",
"pyautogui.click",
"pl... | [((144, 161), 'platform.system', 'platform.system', ([], {}), '()\n', (159, 161), False, 'import platform\n'), ((1652, 1704), 'cv2.matchTemplate', 'cv2.matchTemplate', (['lushi', 'icon', 'cv2.TM_CCOEFF_NORMED'], {}), '(lushi, icon, cv2.TM_CCOEFF_NORMED)\n', (1669, 1704), False, 'import cv2\n'), ((1744, 1765), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['result'], {}), '(result)\n', (1757, 1765), False, 'import cv2\n'), ((2067, 2087), 'pyautogui.position', 'pyautogui.position', ([], {}), '()\n', (2085, 2087), False, 'import pyautogui\n'), ((2287, 2308), 'pyautogui.moveTo', 'pyautogui.moveTo', (['loc'], {}), '(loc)\n', (2303, 2308), False, 'import pyautogui\n'), ((2557, 2617), 'os.path.join', 'os.path.join', (['f"""imgs_{lang}"""', '"""icons"""', '"""start_game_icon.png"""'], {}), "(f'imgs_{lang}', 'icons', 'start_game_icon.png')\n", (2569, 2617), False, 'import os\n'), ((2629, 2650), 'cv2.imread', 'cv2.imread', (['icon_path'], {}), '(icon_path)\n', (2639, 2650), False, 'import cv2\n'), ((2724, 2745), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (2743, 2745), False, 'import psutil\n'), ((2854, 2868), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2864, 2868), False, 'import time\n'), ((421, 480), 'win32gui.EnumWindows', 'win32gui.EnumWindows', (['windowEnumerationHandler', 'top_windows'], {}), '(windowEnumerationHandler, top_windows)\n', (441, 480), False, 'import win32gui\n'), ((761, 781), 'winguiauto.findTopWindow', 'findTopWindow', (['title'], {}), '(title)\n', (774, 781), False, 'from winguiauto import findTopWindow\n'), ((851, 871), 'PIL.ImageGrab.grab', 'ImageGrab.grab', (['rect'], {}), '(rect)\n', (865, 871), False, 'from PIL import ImageGrab, Image\n'), ((2675, 2689), 'numpy.array', 'np.array', (['icon'], {}), '(icon)\n', (2683, 2689), True, 'import numpy as np\n'), ((797, 830), 'win32gui.GetWindowPlacement', 'win32gui.GetWindowPlacement', (['hwnd'], {}), '(hwnd)\n', (824, 830), False, 'import win32gui\n'), ((901, 916), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (909, 916), True, 'import numpy as np\n'), ((2960, 2976), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {}), '()\n', (2974, 2976), False, 'from PIL import ImageGrab, Image\n'), ((325, 353), 'win32gui.GetWindowText', 'win32gui.GetWindowText', (['hwnd'], {}), '(hwnd)\n', (347, 353), False, 'import win32gui\n'), ((565, 593), 'win32gui.ShowWindow', 'win32gui.ShowWindow', (['i[0]', '(5)'], {}), '(i[0], 5)\n', (584, 593), False, 'import win32gui\n'), ((609, 643), 'win32gui.SetForegroundWindow', 'win32gui.SetForegroundWindow', (['i[0]'], {}), '(i[0])\n', (637, 643), False, 'import win32gui\n'), ((1545, 1564), 'platform.platform', 'platform.platform', ([], {}), '()\n', (1562, 1564), False, 'import platform\n'), ((3010, 3025), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3018, 3025), True, 'import numpy as np\n'), ((3158, 3179), 'pyautogui.click', 'pyautogui.click', (['x', 'y'], {}), '(x, y)\n', (3173, 3179), False, 'import pyautogui\n'), ((3196, 3209), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3206, 3209), False, 'import time\n')] |
# future
from __future__ import annotations
# stdlib
from collections.abc import Sequence
import os
from pathlib import Path
from typing import Any
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import capnp
from nacl.signing import VerifyKey
import numpy as np
# relative
from ....core.adp.entity import Entity
from ....core.adp.entity_list import EntityList
from ....lib.numpy.array import arrow_deserialize as numpy_deserialize
from ....lib.numpy.array import arrow_serialize as numpy_serialize
from ...adp.vm_private_scalar_manager import VirtualMachinePrivateScalarManager
from ...common.serde.deserialize import CAPNP_END_MAGIC_HEADER
from ...common.serde.deserialize import CAPNP_START_MAGIC_HEADER
from ...common.serde.serializable import serializable
from ...common.uid import UID
from ...pointer.pointer import Pointer
from ..ancestors import AutogradTensorAncestor
from ..lazy_repeat_array import lazyrepeatarray
from ..passthrough import AcceptableSimpleType # type: ignore
from ..passthrough import PassthroughTensor # type: ignore
from ..passthrough import SupportedChainType # type: ignore
from ..passthrough import is_acceptable_simple_type # type: ignore
from .adp_tensor import ADPTensor
from .initial_gamma import InitialGammaTensor
from .initial_gamma import IntermediateGammaTensor
from .single_entity_phi import SingleEntityPhiTensor
@serializable(recursive_serde=True)
class TensorWrappedNDimEntityPhiTensorPointer(Pointer):
__name__ = "TensorWrappedNDimEntityPhiTensorPointer"
__module__ = "syft.core.tensor.autodp.ndim_entity_phi"
__attr_allowlist__ = ("min_vals", "max_vals", "entities")
# TODO :should create serialization for Entity List
def __init__(
self,
entities: EntityList,
min_vals: np.typing.ArrayLike,
max_vals: np.typing.ArrayLike,
client: Any,
# scalar_manager: Optional[VirtualMachinePrivateScalarManager] = None,
id_at_location: Optional[UID] = None,
object_type: str = "",
tags: Optional[List[str]] = None,
description: str = "",
public_shape: Optional[Tuple[int, ...]] = None,
public_dtype: Optional[np.dtype] = None,
):
super().__init__(
client=client,
id_at_location=id_at_location,
object_type=object_type,
tags=tags,
description=description,
)
self.min_vals = min_vals
self.max_vals = max_vals
self.entities = entities
# self.scalar_manager = scalar_manager
self.public_shape = public_shape
self.public_dtype = public_dtype
@serializable(capnp_bytes=True)
class NDimEntityPhiTensor(PassthroughTensor, AutogradTensorAncestor, ADPTensor):
PointerClassOverride = TensorWrappedNDimEntityPhiTensorPointer
__attr_allowlist__ = ["child", "min_vals", "max_vals", "entities"]
__slots__ = (
"child",
"min_vals",
"max_vals",
"entities",
)
def __init__(
self,
child: Sequence,
entities: Union[List[Entity], EntityList],
min_vals: np.ndarray,
max_vals: np.ndarray,
row_type: SingleEntityPhiTensor = SingleEntityPhiTensor, # type: ignore
) -> None:
# child = the actual private data
super().__init__(child)
# lazyrepeatarray matching the shape of child
if not isinstance(min_vals, lazyrepeatarray):
min_vals = lazyrepeatarray(data=min_vals, shape=child.shape) # type: ignore
if not isinstance(max_vals, lazyrepeatarray):
max_vals = lazyrepeatarray(data=max_vals, shape=child.shape) # type: ignore
self.min_vals = min_vals
self.max_vals = max_vals
if not isinstance(entities, EntityList):
entities = EntityList.from_objs(entities)
self.entities = entities
# if scalar_manager is None:
# self.scalar_manager = VirtualMachinePrivateScalarManager()
# else:
# self.scalar_manager = scalar_manager
@staticmethod
def from_rows(rows: Sequence) -> NDimEntityPhiTensor:
if len(rows) < 1 or not isinstance(rows[0], SingleEntityPhiTensor):
raise Exception(
"NDimEntityPhiTensor.from_rows requires a list of SingleEntityPhiTensors"
)
# create lazyrepeatarrays of the first element
first_row = rows[0]
min_vals = lazyrepeatarray(
data=first_row.min_vals,
shape=tuple([len(rows)] + list(first_row.min_vals.shape)),
)
max_vals = lazyrepeatarray(
data=first_row.max_vals,
shape=tuple([len(rows)] + list(first_row.max_vals.shape)),
)
# collect entities and children into numpy arrays
entity_list = []
child_list = []
for row in rows:
entity_list.append(row.entity)
child_list.append(row.child)
entities = EntityList.from_objs(entities=entity_list)
child = np.stack(child_list)
# use new constructor
return NDimEntityPhiTensor(
child=child,
min_vals=min_vals,
max_vals=max_vals,
entities=entities,
)
def init_pointer(
self,
client: Any,
id_at_location: Optional[UID] = None,
object_type: str = "",
tags: Optional[List[str]] = None,
description: str = "",
) -> TensorWrappedNDimEntityPhiTensorPointer:
return TensorWrappedNDimEntityPhiTensorPointer(
# Arguments specifically for SEPhiTensor
entities=self.entities,
min_vals=self.min_vals,
max_vals=self.max_vals,
# scalar_manager=self.scalar_manager,
# Arguments required for a Pointer to work
client=client,
id_at_location=id_at_location,
object_type=object_type,
tags=tags,
description=description,
)
@property
def gamma(self) -> InitialGammaTensor:
"""Property to cast this tensor into a GammaTensor"""
return self.create_gamma()
def copy(self, order: Optional[str] = "K") -> NDimEntityPhiTensor:
"""Return copy of the given object"""
return NDimEntityPhiTensor(
child=self.child.copy(order=order),
min_vals=self.min_vals.copy(order=order),
max_vals=self.max_vals.copy(order=order),
entities=self.entities.copy(order=order),
)
def all(self) -> bool:
return self.child.all()
def any(self) -> bool:
return self.child.any()
def copy_with(self, child: np.ndarray) -> NDimEntityPhiTensor:
new_tensor = self.copy()
new_tensor.child = child
return new_tensor
def create_gamma(
self, scalar_manager: Optional[VirtualMachinePrivateScalarManager] = None
) -> InitialGammaTensor:
"""Return a new Gamma tensor based on this phi tensor"""
# if scalar_manager is None:
# scalar_manager = self.scalar_manager
# Gamma expects an entity for each scalar
# entities = np.array([self.entity] * np.array(self.child.shape).prod()).reshape(
# self.shape
# )
# TODO: update InitialGammaTensor to handle EntityList
return InitialGammaTensor(
values=self.child,
min_vals=self.min_vals,
max_vals=self.max_vals,
entities=self.entities,
# scalar_manager=scalar_manager,
)
def publish(
self, acc: Any, sigma: float, user_key: VerifyKey
) -> AcceptableSimpleType:
print("PUBLISHING TO GAMMA:")
print(self.child)
return self.gamma.publish(acc=acc, sigma=sigma, user_key=user_key)
@property
def value(self) -> np.ndarray:
return self.child
def astype(self, np_type: np.dtype) -> NDimEntityPhiTensor:
return self.__class__(
child=self.child.astype(np_type),
entities=self.entities,
min_vals=self.min_vals.astype(np_type),
max_vals=self.max_vals.astype(np_type),
# scalar_manager=self.scalar_manager,
)
@property
def shape(self) -> Tuple[Any, ...]:
return self.child.shape
def __repr__(self) -> str:
"""Pretty print some information, optimized for Jupyter notebook viewing."""
return (
f"{self.__class__.__name__}(child={self.child.shape}, "
+ f"min_vals={self.min_vals}, max_vals={self.max_vals})"
)
def __eq__(self, other: Any) -> Union[NDimEntityPhiTensor, IntermediateGammaTensor]:
# TODO: what about entities and min / max values?
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
gamma_output = False
if is_acceptable_simple_type(other):
result = self.child == other
else:
# check entities match, if they dont gamma_output = True
#
result = self.child == other.child
if isinstance(result, InitialGammaTensor):
gamma_output = True
if not gamma_output:
# min_vals=self.min_vals * 0.0,
# max_vals=self.max_vals * 0.0 + 1.0,
return self.copy_with(child=result)
else:
return self.copy_with(child=result).gamma
else:
raise Exception(
"Tensor dims do not match for __eq__: "
+ f"{len(self.child)} != {len(other.child)}"
)
def __add__(
self, other: SupportedChainType
) -> Union[NDimEntityPhiTensor, IntermediateGammaTensor]:
# if the tensor being added is also private
if isinstance(other, NDimEntityPhiTensor):
if self.entities != other.entities:
return self.gamma + other.gamma
return NDimEntityPhiTensor(
child=self.child + other.child,
min_vals=self.min_vals + other.min_vals,
max_vals=self.max_vals + other.max_vals,
entities=self.entities,
# scalar_manager=self.scalar_manager,
)
# if the tensor being added is a public tensor / int / float / etc.
elif is_acceptable_simple_type(other):
return NDimEntityPhiTensor(
child=self.child + other,
min_vals=self.min_vals + other,
max_vals=self.max_vals + other,
entities=self.entities,
# scalar_manager=self.scalar_manager,
)
elif isinstance(other, IntermediateGammaTensor):
return self.gamma + other
else:
print("Type is unsupported:" + str(type(other)))
raise NotImplementedError
@staticmethod
def get_capnp_schema() -> type:
here = os.path.dirname(__file__)
root_dir = Path(here) / ".." / ".." / ".." / ".." / ".." / "capnp"
return capnp.load(str(root_dir / "ndept.capnp"))
@staticmethod
def chunk_bytes(
data: Sequence, field_name: str, builder: capnp.lib.capnp._DynamicStructBuilder
) -> List:
CHUNK_SIZE = int(5.12e8) # capnp max for a List(Data) field
list_size = len(data) // CHUNK_SIZE + 1
data_lst = builder.init(field_name, list_size)
idx = 0
while len(data) > CHUNK_SIZE:
data_lst[idx] = data[:CHUNK_SIZE]
data = data[CHUNK_SIZE:]
idx += 1
else:
data_lst[0] = data
return data_lst
@staticmethod
def combine_bytes(capnp_list: List[bytes]) -> bytes:
# TODO: make sure this doesn't copy, perhaps allocate a fixed size buffer
# and move the bytes into it as we go
bytes_value = b""
for value in capnp_list:
bytes_value += value
return bytes_value
@staticmethod
def serde_magic_header() -> str:
return (
f"{CAPNP_START_MAGIC_HEADER}"
+ f"{NDimEntityPhiTensor.__name__}"
+ f"{CAPNP_END_MAGIC_HEADER}"
)
def _object2bytes(self) -> bytes:
schema = NDimEntityPhiTensor.get_capnp_schema()
rows, rows_size = numpy_serialize(self.child, get_bytes=True)
min_vals, min_vals_size = numpy_serialize(self.min_vals.data, get_bytes=True)
max_vals, max_vals_size = numpy_serialize(self.max_vals.data, get_bytes=True)
entities_indexed, entities_indexed_size = numpy_serialize(
self.entities.entities_indexed, get_bytes=True
)
one_hot_lookup = self.entities.one_hot_lookup
ndept_struct: capnp.lib.capnp._StructModule = schema.NDEPT # type: ignore
ndept_msg = ndept_struct.new_message()
metadata_schema = ndept_struct.TensorMetadata
child_metadata = metadata_schema.new_message()
min_vals_metadata = metadata_schema.new_message()
max_vals_metadata = metadata_schema.new_message()
entities_metadata = metadata_schema.new_message()
# this is how we dispatch correct deserialization of bytes
ndept_msg.magicHeader = NDimEntityPhiTensor.serde_magic_header()
NDimEntityPhiTensor.chunk_bytes(rows, "child", ndept_msg)
child_metadata.dtype = str(self.child.dtype)
child_metadata.decompressedSize = rows_size
ndept_msg.childMetadata = child_metadata
NDimEntityPhiTensor.chunk_bytes(min_vals, "minVals", ndept_msg)
min_vals_metadata.dtype = str(self.min_vals.data.dtype)
min_vals_metadata.decompressedSize = min_vals_size
ndept_msg.minValsMetadata = min_vals_metadata
NDimEntityPhiTensor.chunk_bytes(max_vals, "maxVals", ndept_msg)
max_vals_metadata.dtype = str(self.max_vals.data.dtype)
max_vals_metadata.decompressedSize = max_vals_size
ndept_msg.maxValsMetadata = max_vals_metadata
NDimEntityPhiTensor.chunk_bytes(entities_indexed, "entitiesIndexed", ndept_msg)
entities_metadata.dtype = str(self.entities.entities_indexed.dtype)
entities_metadata.decompressedSize = entities_indexed_size
ndept_msg.entitiesIndexedMetadata = entities_metadata
oneHotLookupList = ndept_msg.init("oneHotLookup", len(one_hot_lookup))
for i, entity in enumerate(one_hot_lookup):
oneHotLookupList[i] = (
entity if not getattr(entity, "name", None) else entity.name # type: ignore
)
return ndept_msg.to_bytes()
@staticmethod
def _bytes2object(buf: bytes) -> NDimEntityPhiTensor:
schema = NDimEntityPhiTensor.get_capnp_schema()
ndept_struct: capnp.lib.capnp._StructModule = schema.NDEPT # type: ignore
ndept_msg = ndept_struct.from_bytes(buf)
child_metadata = ndept_msg.childMetadata
child = numpy_deserialize(
NDimEntityPhiTensor.combine_bytes(ndept_msg.child),
child_metadata.decompressedSize,
child_metadata.dtype,
)
min_vals_metadata = ndept_msg.minValsMetadata
min_vals = lazyrepeatarray(
numpy_deserialize(
NDimEntityPhiTensor.combine_bytes(ndept_msg.minVals),
min_vals_metadata.decompressedSize,
min_vals_metadata.dtype,
),
child.shape,
)
max_vals_metadata = ndept_msg.maxValsMetadata
max_vals = lazyrepeatarray(
numpy_deserialize(
NDimEntityPhiTensor.combine_bytes(ndept_msg.maxVals),
max_vals_metadata.decompressedSize,
max_vals_metadata.dtype,
),
child.shape,
)
entities_metadata = ndept_msg.entitiesIndexedMetadata
entities_indexed = numpy_deserialize(
NDimEntityPhiTensor.combine_bytes(ndept_msg.entitiesIndexed),
entities_metadata.decompressedSize,
entities_metadata.dtype,
)
one_hot_lookup = np.array(ndept_msg.oneHotLookup)
entity_list = EntityList(one_hot_lookup, entities_indexed)
return NDimEntityPhiTensor(
child=child, min_vals=min_vals, max_vals=max_vals, entities=entity_list
)
| [
"numpy.stack",
"os.path.dirname",
"numpy.array",
"pathlib.Path"
] | [((5085, 5105), 'numpy.stack', 'np.stack', (['child_list'], {}), '(child_list)\n', (5093, 5105), True, 'import numpy as np\n'), ((11034, 11059), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (11049, 11059), False, 'import os\n'), ((16159, 16191), 'numpy.array', 'np.array', (['ndept_msg.oneHotLookup'], {}), '(ndept_msg.oneHotLookup)\n', (16167, 16191), True, 'import numpy as np\n'), ((11079, 11089), 'pathlib.Path', 'Path', (['here'], {}), '(here)\n', (11083, 11089), False, 'from pathlib import Path\n')] |
import math
import cv2
import numpy as np
from SolarPaperRecord import SolarPaperRecord
class SolarMediumPaperRecord(SolarPaperRecord):
paper_record = None
def __init__(self, image = None, file_name = None, name = None):
super(SolarMediumPaperRecord, self).__init__(image, file_name, name)
self.image_to_value = None # 圖形對應到數值的轉換矩陣(2x3)
self.value_to_image = None # 數值對應到圖形的轉換矩陣(2x3)
self.image_r = 0.
self.image_x0 = 1253. # 估算出來06:00時的中心位置
self.image_y0 = 1899.
# 計算時間方向的 dot 向量
self.min_v = (577. - 1253., 408. - 1899.)
self.v_len = math.sqrt(self.min_v[0] * self.min_v[0] + self.min_v[1] * self.min_v[1])
self.min_v = (self.min_v[0] / self.v_len / self.v_len * 12 * 60, \
self.min_v[1] / self.v_len / self.v_len * 12 * 60);
# 計算燒洞方向的 dot 向量
self.r_v = (408. - 1899., 1253. - 577.)
self.r_v = (self.r_v[0] / self.v_len, self.r_v[1] / self.v_len);
# 轉換 x,y => min,r
self.v_matrix = np.array([ \
[self.r_v[0], self.r_v[1], -(self.image_x0 * self.r_v[0] + self.image_y0 * self.r_v[1])], \
[self.min_v[0], self.min_v[1], -(self.image_x0 * self.min_v[0] + self.image_y0 * self.min_v[1])], \
[0, 0, 1]]);
self.inv_v_matrix = np.linalg.inv(self.v_matrix)
self.min_r = -88 # 容許的最小燒孔與圓心距離
self.max_r = 48.75448697 # 容許的最大燒孔與圓心距離
self.value_region = (self.min_r, 0, self.max_r, 12 * 60) # 圖形的裁切區域
self.v_t0 = 6 # 起始時間(文字輸出用)
def standard(self):
if (SolarMediumPaperRecord.paper_record == None):
SolarMediumPaperRecord.paper_record = SolarMediumPaperRecord(file_name = '../sample/solar_medium_00.jpg')
print('create SolarMediumPaperRecord standard');
return SolarMediumPaperRecord.paper_record
# 轉換圖片上的座標到數值座標
def value_coordiante(self, image_coordiante):
self.create_image_value_matrix()
if self.image_matrix is None:
return None
# 投影轉換
v0 = self.image_matrix @ np.array([image_coordiante[0], image_coordiante[1], 1])
v1 = self.v_matrix @ v0
return v1
# 轉換數值座標到圖片上的座標
def image_coordiante(self, value_coordiante):
self.create_image_value_matrix()
if self.image_matrix is None:
return None
v0 = self.inv_v_matrix @ np.array([value_coordiante[0], value_coordiante[1], 1])
v1 = self.image_matrix_inv @ v0
return v1
| [
"numpy.linalg.inv",
"numpy.array",
"math.sqrt"
] | [((589, 661), 'math.sqrt', 'math.sqrt', (['(self.min_v[0] * self.min_v[0] + self.min_v[1] * self.min_v[1])'], {}), '(self.min_v[0] * self.min_v[0] + self.min_v[1] * self.min_v[1])\n', (598, 661), False, 'import math\n'), ((989, 1208), 'numpy.array', 'np.array', (['[[self.r_v[0], self.r_v[1], -(self.image_x0 * self.r_v[0] + self.image_y0 *\n self.r_v[1])], [self.min_v[0], self.min_v[1], -(self.image_x0 * self.\n min_v[0] + self.image_y0 * self.min_v[1])], [0, 0, 1]]'], {}), '([[self.r_v[0], self.r_v[1], -(self.image_x0 * self.r_v[0] + self.\n image_y0 * self.r_v[1])], [self.min_v[0], self.min_v[1], -(self.\n image_x0 * self.min_v[0] + self.image_y0 * self.min_v[1])], [0, 0, 1]])\n', (997, 1208), True, 'import numpy as np\n'), ((1249, 1277), 'numpy.linalg.inv', 'np.linalg.inv', (['self.v_matrix'], {}), '(self.v_matrix)\n', (1262, 1277), True, 'import numpy as np\n'), ((1972, 2027), 'numpy.array', 'np.array', (['[image_coordiante[0], image_coordiante[1], 1]'], {}), '([image_coordiante[0], image_coordiante[1], 1])\n', (1980, 2027), True, 'import numpy as np\n'), ((2256, 2311), 'numpy.array', 'np.array', (['[value_coordiante[0], value_coordiante[1], 1]'], {}), '([value_coordiante[0], value_coordiante[1], 1])\n', (2264, 2311), True, 'import numpy as np\n')] |
import numpy as np
import soccer3d
from soccer3d.tracking import Detection, find_tracks, smooth_trajectory, convert_to_MOT
from os.path import join
import utils.camera as cam_utils
import utils.misc as misc_utils
import json
import argparse
from tqdm import tqdm
import glog
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Calibrate a soccer video')
parser.add_argument('--path_to_data', default='/home/krematas/Mountpoints/grail/data/barcelona', help='path')
parser.add_argument('--dist_thresh', type=int, default=50, help='Distance threshold for merging tracklets (in pixels)')
opt, _ = parser.parse_known_args()
db = soccer3d.YoutubeVideo(opt.path_to_data)
db.digest_metadata()
db.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)
# ----------------------------------------------------------------------------------------------------------------------
# Gather all souces, boxes
glog.info('Tracking players')
dets = []
dets_per_frame = []
for i in range(db.n_frames):
basename = db.frame_basenames[i]
poses = db.poses[basename]
__detection_list = []
for j in range(len(poses)):
cur_det = Detection(poses[j], basename, i)
cur_det.mesh_name = '{0}_{1:05d}'.format(basename, j)
__detection_list.append(cur_det)
dets.append(cur_det)
dets_per_frame.append(__detection_list)
new_tracklets = find_tracks(dets_per_frame, db.frame_basenames, dist_thresh=opt.dist_thresh)
# ----------------------------------------------------------------------------------------------------------------------
# Save tracks
mot_matrix = convert_to_MOT(new_tracklets, db.n_frames)
db.dump_video('tracks', scale=2, mot_tracks=mot_matrix)
# ----------------------------------------------------------------------------------------------------------------------
# 3DTrajectory smoothing
glog.info('Smoothing 3D trajectories')
data_out = {i: [] for i in db.frame_basenames}
fig = plt.figure()
ax = fig.add_subplot(111)
for i in tqdm(range(len(new_tracklets))):
neck_pos = []
for j in range(len(new_tracklets[i])):
frame_index = new_tracklets[i][j].frame_index
basename = db.frame_basenames[frame_index]
cam_data = db.calib[basename]
cam = cam_utils.Camera(basename, cam_data['A'], cam_data['R'], cam_data['T'], db.shape[0], db.shape[1])
kp_3d = misc_utils.lift_keypoints_in_3d(cam, new_tracklets[i][j].keypoints)
neck_pos.append(kp_3d[1, :])
neck_pos = np.array(neck_pos)
# Smooth trajectory
smoothed_positions = smooth_trajectory(new_tracklets[i], neck_pos)
for j in range(len(new_tracklets[i])):
data_out[new_tracklets[i][j].frame].append({'mesh': new_tracklets[i][j].mesh_name, 'x': smoothed_positions[0, j],
'y': smoothed_positions[1, j], 'z': smoothed_positions[2, j]})
ax.plot(smoothed_positions[0, :], smoothed_positions[2, :], 'o')
plt.show()
with open(join(db.path_to_dataset, 'players', 'metadata', 'position.json'), 'w') as outfile:
json.dump(data_out, outfile)
| [
"json.dump",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"soccer3d.tracking.find_tracks",
"glog.info",
"soccer3d.tracking.smooth_trajectory",
"soccer3d.tracking.convert_to_MOT",
"matplotlib.pyplot.figure",
"numpy.array",
"utils.misc.lift_keypoints_in_3d",
"soccer3d.YoutubeVideo",
"soc... | [((318, 381), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calibrate a soccer video"""'}), "(description='Calibrate a soccer video')\n", (341, 381), False, 'import argparse\n'), ((654, 693), 'soccer3d.YoutubeVideo', 'soccer3d.YoutubeVideo', (['opt.path_to_data'], {}), '(opt.path_to_data)\n', (675, 693), False, 'import soccer3d\n'), ((935, 964), 'glog.info', 'glog.info', (['"""Tracking players"""'], {}), "('Tracking players')\n", (944, 964), False, 'import glog\n'), ((1398, 1474), 'soccer3d.tracking.find_tracks', 'find_tracks', (['dets_per_frame', 'db.frame_basenames'], {'dist_thresh': 'opt.dist_thresh'}), '(dets_per_frame, db.frame_basenames, dist_thresh=opt.dist_thresh)\n', (1409, 1474), False, 'from soccer3d.tracking import Detection, find_tracks, smooth_trajectory, convert_to_MOT\n'), ((1626, 1668), 'soccer3d.tracking.convert_to_MOT', 'convert_to_MOT', (['new_tracklets', 'db.n_frames'], {}), '(new_tracklets, db.n_frames)\n', (1640, 1668), False, 'from soccer3d.tracking import Detection, find_tracks, smooth_trajectory, convert_to_MOT\n'), ((1873, 1911), 'glog.info', 'glog.info', (['"""Smoothing 3D trajectories"""'], {}), "('Smoothing 3D trajectories')\n", (1882, 1911), False, 'import glog\n'), ((1967, 1979), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1977, 1979), True, 'import matplotlib.pyplot as plt\n'), ((2971, 2981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2979, 2981), True, 'import matplotlib.pyplot as plt\n'), ((2505, 2523), 'numpy.array', 'np.array', (['neck_pos'], {}), '(neck_pos)\n', (2513, 2523), True, 'import numpy as np\n'), ((2574, 2619), 'soccer3d.tracking.smooth_trajectory', 'smooth_trajectory', (['new_tracklets[i]', 'neck_pos'], {}), '(new_tracklets[i], neck_pos)\n', (2591, 2619), False, 'from soccer3d.tracking import Detection, find_tracks, smooth_trajectory, convert_to_MOT\n'), ((3080, 3108), 'json.dump', 'json.dump', (['data_out', 'outfile'], {}), '(data_out, outfile)\n', (3089, 3108), False, 'import json\n'), ((1171, 1203), 'soccer3d.tracking.Detection', 'Detection', (['poses[j]', 'basename', 'i'], {}), '(poses[j], basename, i)\n', (1180, 1203), False, 'from soccer3d.tracking import Detection, find_tracks, smooth_trajectory, convert_to_MOT\n'), ((2270, 2372), 'utils.camera.Camera', 'cam_utils.Camera', (['basename', "cam_data['A']", "cam_data['R']", "cam_data['T']", 'db.shape[0]', 'db.shape[1]'], {}), "(basename, cam_data['A'], cam_data['R'], cam_data['T'], db.\n shape[0], db.shape[1])\n", (2286, 2372), True, 'import utils.camera as cam_utils\n'), ((2385, 2452), 'utils.misc.lift_keypoints_in_3d', 'misc_utils.lift_keypoints_in_3d', (['cam', 'new_tracklets[i][j].keypoints'], {}), '(cam, new_tracklets[i][j].keypoints)\n', (2416, 2452), True, 'import utils.misc as misc_utils\n'), ((2993, 3057), 'os.path.join', 'join', (['db.path_to_dataset', '"""players"""', '"""metadata"""', '"""position.json"""'], {}), "(db.path_to_dataset, 'players', 'metadata', 'position.json')\n", (2997, 3057), False, 'from os.path import join\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import StandardScaler
import time
import timeit
start = timeit.default_timer()
t0 = time.clock()
#file read
DS = pd.read_csv('diabetes.csv')
#preprocessing --replacing zeroes
pd.DataFrame.hist(DS, figsize=[20,20])
noZero= ['Glucose', 'BloodPressure','SkinThickness','BMI','Insulin']
classifiers=[]
y_pred = []
acc =[]
max=-1
for C in noZero:
DS[C]= DS[C].replace(0, np.NaN)
mean=int(DS[C].mean(skipna=True))
DS[C]=DS[C].replace(np.NaN,mean)
#splitting DataSet
X = np.array(DS.drop(['Outcome'],1))
Y = np.array(DS['Outcome'])
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,random_state=0,test_size=0.2)
for x in range(1,61):
clf = neighbors.KNeighborsClassifier(n_neighbors = x, n_jobs = -1, algorithm = 'auto')
classifiers.append(clf)
clf_model_1 = clf.fit(X_train, Y_train)
yPred= cross_val_predict(clf_model_1, X, Y, cv=10)
y_pred.append(yPred)
score = accuracy_score(Y, yPred)
acc.append(score)
if max < acc[x-1]:
max = acc[x-1]
position = x
print('Accuracy with k = %f is %f'%(x, acc[x-1]))
plt.plot(range(1,61),acc, color='red', marker='o', linestyle='dashed',linewidth=3, markersize=12)
plt.xlabel('K-Neighbor')
plt.ylabel('Accuracy')
plt.savefig('diabetics.pdf')
plt.show(block=True)
print('Max Accuracy is with k = %f and Accuracy is %f'%(position, acc[position-1]))
#Feature scaling- when distance and normalization is involved
scale_x= StandardScaler()
X_train=scale_x.fit_transform(X_train)
X_test=scale_x.transform(X_test)
#defining the model using KNN
#Y_test=sqrt(12.4)--12-1 taking odd to classify accurately
classifier=KNeighborsClassifier(n_neighbors=11, p=2, metric='euclidean')
#Fit model
classifier.fit(X_train, Y_train)
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='euclidean',
metric_params=None, n_jobs=1,n_neighbors=11,p=2,
weights='uniform')
#predicting the test results
Y_pred =cross_val_predict(classifier,X,Y,cv=10)
#evaluating-- accuracy
cm=confusion_matrix(Y,Y_pred)
print(cm)
stop = timeit.default_timer()
print('Time: ', stop - start)
print (time.clock() - t0, "seconds") | [
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.show",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"timeit.default_timer",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"pandas.DataFrame.hist",
"time.clock",
"sklearn.model_selection.cross_val_p... | [((433, 455), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (453, 455), False, 'import timeit\n'), ((461, 473), 'time.clock', 'time.clock', ([], {}), '()\n', (471, 473), False, 'import time\n'), ((490, 517), 'pandas.read_csv', 'pd.read_csv', (['"""diabetes.csv"""'], {}), "('diabetes.csv')\n", (501, 517), True, 'import pandas as pd\n'), ((553, 592), 'pandas.DataFrame.hist', 'pd.DataFrame.hist', (['DS'], {'figsize': '[20, 20]'}), '(DS, figsize=[20, 20])\n', (570, 592), True, 'import pandas as pd\n'), ((895, 918), 'numpy.array', 'np.array', (["DS['Outcome']"], {}), "(DS['Outcome'])\n", (903, 918), True, 'import numpy as np\n'), ((955, 1008), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'random_state': '(0)', 'test_size': '(0.2)'}), '(X, Y, random_state=0, test_size=0.2)\n', (971, 1008), False, 'from sklearn.model_selection import train_test_split\n'), ((1555, 1579), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""K-Neighbor"""'], {}), "('K-Neighbor')\n", (1565, 1579), True, 'import matplotlib.pyplot as plt\n'), ((1580, 1602), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1590, 1602), True, 'import matplotlib.pyplot as plt\n'), ((1603, 1631), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""diabetics.pdf"""'], {}), "('diabetics.pdf')\n", (1614, 1631), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1652), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (1640, 1652), True, 'import matplotlib.pyplot as plt\n'), ((1810, 1826), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1824, 1826), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2001, 2062), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(11)', 'p': '(2)', 'metric': '"""euclidean"""'}), "(n_neighbors=11, p=2, metric='euclidean')\n", (2021, 2062), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2109, 2255), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'algorithm': '"""auto"""', 'leaf_size': '(30)', 'metric': '"""euclidean"""', 'metric_params': 'None', 'n_jobs': '(1)', 'n_neighbors': '(11)', 'p': '(2)', 'weights': '"""uniform"""'}), "(algorithm='auto', leaf_size=30, metric='euclidean',\n metric_params=None, n_jobs=1, n_neighbors=11, p=2, weights='uniform')\n", (2129, 2255), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2330, 2372), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['classifier', 'X', 'Y'], {'cv': '(10)'}), '(classifier, X, Y, cv=10)\n', (2347, 2372), False, 'from sklearn.model_selection import cross_val_predict\n'), ((2397, 2424), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y', 'Y_pred'], {}), '(Y, Y_pred)\n', (2413, 2424), False, 'from sklearn.metrics import classification_report, accuracy_score, confusion_matrix\n'), ((2443, 2465), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2463, 2465), False, 'import timeit\n'), ((1039, 1113), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', ([], {'n_neighbors': 'x', 'n_jobs': '(-1)', 'algorithm': '"""auto"""'}), "(n_neighbors=x, n_jobs=-1, algorithm='auto')\n", (1069, 1113), False, 'from sklearn import neighbors\n'), ((1203, 1246), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['clf_model_1', 'X', 'Y'], {'cv': '(10)'}), '(clf_model_1, X, Y, cv=10)\n', (1220, 1246), False, 'from sklearn.model_selection import cross_val_predict\n'), ((1284, 1308), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y', 'yPred'], {}), '(Y, yPred)\n', (1298, 1308), False, 'from sklearn.metrics import classification_report, accuracy_score, confusion_matrix\n'), ((2505, 2517), 'time.clock', 'time.clock', ([], {}), '()\n', (2515, 2517), False, 'import time\n')] |
import sys
import timeit
import numpy as np
from global_vars import *
from MLP.global_vars import *
from MLP.MLP import MLP
from MLP.count_success import count_success
def process(model, file_name, test_set, label_set):
try:
file = open(log_full_path + "_detail/" + file_name, "w")
inference_time = timeit.default_timer()
predict_set = model.test_model(test_set)
inference_time = timeit.default_timer() - inference_time
test_time = timeit.default_timer()
success, error_idx, n_error = count_success(predict_set, np.hsplit(label_set, int(size_output_layer / 4)), countBit=True)
for idx in range(len(label_set)):
file.write(str(error_idx[idx]) + "\t" + str(n_error[idx]) + "\n")
file.close()
test_time = timeit.default_timer() - test_time
return success, np.sum(np.array(n_error)), [inference_time, test_time]
except Exception as ex:
_, _, tb = sys.exc_info()
print("[MLP:process:" + str(tb.tb_lineno) + "] " + str(ex) + "\n\n")
| [
"timeit.default_timer",
"numpy.array",
"sys.exc_info"
] | [((329, 351), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (349, 351), False, 'import timeit\n'), ((479, 501), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (499, 501), False, 'import timeit\n'), ((420, 442), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (440, 442), False, 'import timeit\n'), ((776, 798), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (796, 798), False, 'import timeit\n'), ((934, 948), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (946, 948), False, 'import sys\n'), ((841, 858), 'numpy.array', 'np.array', (['n_error'], {}), '(n_error)\n', (849, 858), True, 'import numpy as np\n')] |
'''
A number of functions related to computing catchments from a field of flow directions.
The function 'main' is intended to be the function called externally
Created on Feb 9, 2016
@author: thomasriddick
'''
import numpy as np
import warnings
import os.path as path
from Dynamic_HD_Scripts.interface.fortran_interface \
import f2py_manager
from Dynamic_HD_Scripts.base import field
from Dynamic_HD_Scripts.base import iodriver
from Dynamic_HD_Scripts.interface.cpp_interface.libs \
import compute_catchments_wrapper as cc_ccp_wrap
from Dynamic_HD_Scripts.context import fortran_source_path
def compute_catchments_cpp(field,loop_logfile):
"""Compute the unordered catchments on all grid points using c++ code
Input:
field: numpy-like; field of river flow directions
loop_logfile: string; the path to a file in which to store any loops found
Output:
A numpy array containing the unordered catchments calculated
Use a c++ module to calculate the unordered catchements (labelling them by
order of discovery) on all the grid points in the supplied array of river flow
direction. Store any loops found in the named logfile. (The axis swap is required
to ensure the data is processed in the correct orientation). The algorithm used
is based on a queue structure
"""
print("Writing circular flow log file to {0}".format(loop_logfile))
catchments = np.empty(shape=field.shape,dtype=np.int32)
cc_ccp_wrap.compute_catchments_cpp(catchments,
np.ascontiguousarray(field,dtype=np.float64),
loop_logfile)
return catchments
def compute_catchments(field,loop_logfile,circ_flow_check_period=1000):
"""Compute the unordered catchments on all grid points
Input:
field: numpy-like; field of river flow directions
loop_logfile: string; the path to a file in which to store any loops found
circ_flow_check_period (optional): integer; how often (in terms of step
along a river) to check if the river is going in a loop
Output:
An tuple consisting of an numpy array of the number of each of the six catchment
types found and a second numpy array containing the unordered catchments calculated
themselves.
Use a fortran module to calculate the unordered catchements (labelling them by
order of discovery) on all the grid points in the supplied array of river flow
direction. Store any loops found in the named logfile. (The axis swap is required
to ensure the data is processed in the correct orientation).
"""
f2py_mngr = f2py_manager.f2py_manager(path.join(fortran_source_path,
"mod_compute_catchments.f90"),
func_name="compute_catchments")
field = np.swapaxes(np.asarray(field,dtype=np.int64),0,1)
print("Writing circular flow log file to {0}".format(loop_logfile))
catchment_types,catchments = \
f2py_mngr.run_current_function_or_subroutine(field,
circ_flow_check_period,
loop_logfile)
catchments = np.ma.array(np.swapaxes(catchments,0,1))
return catchment_types,catchments
def check_catchment_types(catchment_types,logfile=None):
"""Check the catchment types returned for potential problems
Input:
catchment_types: numpy-like array; the number of each catchment type found
logfile(optional): string; full path to a logfile to record catchment_type information
Return: None
The output is both printed to screen and saved to a file if one is specified. Warnings
are given for unknown flow directions, flows over the pole and circular flows.
"""
catchment_type_names = ["coast","ocean","local","unknown"]
other_type_names = ["flow over pole","circular flow"]
output = ""
for name, count in zip(catchment_type_names,catchment_types[0:4].tolist()):
output += "Number of {0} type sinks found: {1} \n".format(name,count)
if name == "flow over pole" and count > 0:
warnings.warn("Unknown flow direction detected!")
for name, count in zip(other_type_names,catchment_types[4:6]):
output += "Number of {0}s found: {1} \n".format(name,count)
if count > 0:
warnings.warn("{0} detected!".format(name))
output = output.rstrip('\n')
print(output)
if logfile:
with open(logfile,'w') as f:
print("Logging catchment type counts in file {0}".format(logfile))
f.write(output)
def renumber_catchments_by_size(catchments,loop_logfile=None):
"""Renumber catchments according to there size in terms of number of cells
Input:
catchments: numpy-like array; catchments to be relabelled catchments
loop_logfile: string; the full path to the existing loop_logfile (optional)
Returns: Relabelled catchements
Label catchments in order of desceding size using a fortran function
to assist a time critical part of the procedure that can't be done in
numpy effectively. Also opens the existing loop logfile and changes the
catchment numbers with loops to reflect the new catchment labelling.
"""
f2py_mngr = f2py_manager.f2py_manager(path.join(fortran_source_path,
"mod_compute_catchments.f90"),
func_name="relabel_catchments")
catch_nums = np.arange(np.amax(catchments)+1)
counts = np.bincount(catchments.flatten())
catchments_sizes = np.empty(len(catch_nums),
dtype=[('catch_nums',int),
('new_catch_nums',int),
('counts',int)])
catchments_sizes['catch_nums'] = catch_nums
catchments_sizes['counts'] = counts
catchments_sizes.sort(order='counts')
catchments_sizes['new_catch_nums'] = np.arange(len(catchments_sizes['catch_nums']),
0,-1)
catchments = np.asfortranarray(catchments,np.int32)
old_to_new_label_map = np.asfortranarray(np.copy(np.sort(catchments_sizes,
order='catch_nums'))['new_catch_nums'],np.int32)
f2py_mngr.run_current_function_or_subroutine(catchments,
old_to_new_label_map)
if loop_logfile is not None:
with open(loop_logfile,'r') as f:
next(f)
loops = [int(line.strip()) for line in f]
#-1 to account for differing array offset between Fortran and python
loops = [str(old_to_new_label_map[old_loop_num])+'\n' for old_loop_num in loops]
with open(loop_logfile,'w') as f:
f.write('Loops found in catchments:\n')
f.writelines(loops)
return catchments
def advanced_main(filename,fieldname,output_filename,output_fieldname,
loop_logfile,use_cpp_alg=True):
rdirs = iodriver.advanced_field_loader(filename,
field_type='Generic',
fieldname=fieldname)
nlat,nlon = rdirs.get_grid_dimensions()
if use_cpp_alg:
catchments = compute_catchments_cpp(rdirs.get_data(),
loop_logfile)
else:
catchment_types, catchments = compute_catchments(rdirs.get_data(),loop_logfile)
check_catchment_types(catchment_types,logfile=path.splitext(output_filename)[0]+".log")
numbered_catchments = field.Field(renumber_catchments_by_size(catchments,loop_logfile),
grid=rdirs.get_grid())
iodriver.advanced_field_writer(target_filename=output_filename,field=numbered_catchments,
fieldname=output_fieldname)
def main(filename,output_filename,loop_logfile,use_cpp_code=True,grid_type='HD',**grid_kwargs):
"""Generates a file with numbered catchments from a given river flow direction file
Inputs:
filename: string; the input file of river directions
output_filename: string; the target file for the output numbered catchments
loop_logfile: string; an input file of catchments with loop to be updated
use_cpp_alg: bool; use the Cpp code if True otherwise use the Fortran code
grid_type: string; a keyword giving the type of grid being used
**grid_kwargs(optional): keyword dictionary; the parameter of the grid to
be used (if required)
Returns: Nothing
Produces the numbered catchments where the numbering is in descending order of size;
also update the loop log file to reflect the relabelling of catchements and runs a
check on the type of catchments generated (which are placed in a log file with the
same basename as the output catchments but with the extension '.log').
"""
rdirs = iodriver.load_field(filename,
file_type=iodriver.get_file_extension(filename),
field_type='Generic',grid_type=grid_type,**grid_kwargs)
if use_cpp_code:
catchments = compute_catchments_cpp(rdirs.get_data(),
loop_logfile)
else:
catchment_types, catchments = compute_catchments(rdirs.get_data(),loop_logfile)
check_catchment_types(catchment_types,logfile=path.splitext(output_filename)[0]+".log")
numbered_catchments = field.Field(renumber_catchments_by_size(catchments,loop_logfile),
grid=grid_type,
**grid_kwargs)
iodriver.write_field(filename=output_filename,field=numbered_catchments,
file_type=iodriver.get_file_extension(output_filename))
| [
"os.path.join",
"numpy.empty",
"numpy.asarray",
"numpy.asfortranarray",
"Dynamic_HD_Scripts.base.iodriver.advanced_field_loader",
"Dynamic_HD_Scripts.base.iodriver.advanced_field_writer",
"numpy.amax",
"numpy.sort",
"Dynamic_HD_Scripts.base.iodriver.get_file_extension",
"numpy.swapaxes",
"os.pat... | [((1413, 1456), 'numpy.empty', 'np.empty', ([], {'shape': 'field.shape', 'dtype': 'np.int32'}), '(shape=field.shape, dtype=np.int32)\n', (1421, 1456), True, 'import numpy as np\n'), ((6157, 6196), 'numpy.asfortranarray', 'np.asfortranarray', (['catchments', 'np.int32'], {}), '(catchments, np.int32)\n', (6174, 6196), True, 'import numpy as np\n'), ((7098, 7186), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_loader', 'iodriver.advanced_field_loader', (['filename'], {'field_type': '"""Generic"""', 'fieldname': 'fieldname'}), "(filename, field_type='Generic', fieldname=\n fieldname)\n", (7128, 7186), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((7803, 7926), 'Dynamic_HD_Scripts.base.iodriver.advanced_field_writer', 'iodriver.advanced_field_writer', ([], {'target_filename': 'output_filename', 'field': 'numbered_catchments', 'fieldname': 'output_fieldname'}), '(target_filename=output_filename, field=\n numbered_catchments, fieldname=output_fieldname)\n', (7833, 7926), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((1546, 1591), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['field'], {'dtype': 'np.float64'}), '(field, dtype=np.float64)\n', (1566, 1591), True, 'import numpy as np\n'), ((2654, 2714), 'os.path.join', 'path.join', (['fortran_source_path', '"""mod_compute_catchments.f90"""'], {}), "(fortran_source_path, 'mod_compute_catchments.f90')\n", (2663, 2714), True, 'import os.path as path\n'), ((2867, 2900), 'numpy.asarray', 'np.asarray', (['field'], {'dtype': 'np.int64'}), '(field, dtype=np.int64)\n', (2877, 2900), True, 'import numpy as np\n'), ((3245, 3274), 'numpy.swapaxes', 'np.swapaxes', (['catchments', '(0)', '(1)'], {}), '(catchments, 0, 1)\n', (3256, 3274), True, 'import numpy as np\n'), ((5346, 5406), 'os.path.join', 'path.join', (['fortran_source_path', '"""mod_compute_catchments.f90"""'], {}), "(fortran_source_path, 'mod_compute_catchments.f90')\n", (5355, 5406), True, 'import os.path as path\n'), ((4180, 4229), 'warnings.warn', 'warnings.warn', (['"""Unknown flow direction detected!"""'], {}), "('Unknown flow direction detected!')\n", (4193, 4229), False, 'import warnings\n'), ((5561, 5580), 'numpy.amax', 'np.amax', (['catchments'], {}), '(catchments)\n', (5568, 5580), True, 'import numpy as np\n'), ((9073, 9110), 'Dynamic_HD_Scripts.base.iodriver.get_file_extension', 'iodriver.get_file_extension', (['filename'], {}), '(filename)\n', (9100, 9110), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((9846, 9890), 'Dynamic_HD_Scripts.base.iodriver.get_file_extension', 'iodriver.get_file_extension', (['output_filename'], {}), '(output_filename)\n', (9873, 9890), False, 'from Dynamic_HD_Scripts.base import iodriver\n'), ((6249, 6294), 'numpy.sort', 'np.sort', (['catchments_sizes'], {'order': '"""catch_nums"""'}), "(catchments_sizes, order='catch_nums')\n", (6256, 6294), True, 'import numpy as np\n'), ((7604, 7634), 'os.path.splitext', 'path.splitext', (['output_filename'], {}), '(output_filename)\n', (7617, 7634), True, 'import os.path as path\n'), ((9493, 9523), 'os.path.splitext', 'path.splitext', (['output_filename'], {}), '(output_filename)\n', (9506, 9523), True, 'import os.path as path\n')] |
import os
import numpy as np
import logging
import cv2
import shutil
import json
import copy
import sys
import re
from matplotlib import pyplot as plt
import imageio
from collections import Counter
# ファイル出力ログ用
file_logger = logging.getLogger("message").getChild(__name__)
logger = logging.getLogger("__main__").getChild(__name__)
level = {0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG}
# 人物ソート
def sort(cnt, _display_idx, _iidx, sorted_idxs, now_str, interval, subdir, json_path, json_size, number_people_max, reverse_specific_dict, order_specific_dict, start_json_name, start_frame, pred_multi_ary, pred_multi_z_ary, pred_multi_xy_ary, pred_multi_frame_ary, frame_imgs, max_conf_ary, max_conf_color_ary, org_width, org_height, past_data, past_depths, past_depths_z, png_lib, verbose):
# 該当シーンのJSONデータを読み込む
file_name = re.sub(r'\d{12}', "{0:012d}".format(cnt), start_json_name)
_file = os.path.join(json_path, file_name)
try:
data = json.load(open(_file))
# 過去データ上書きしないデータも保持
org_data = json.load(open(_file))
except Exception as e:
logger.warning("JSON読み込み失敗のため、空データ読み込み, %s %s", _file, e)
data = json.load(open("json/all_empty_keypoints.json"))
org_data = json.load(open("json/all_empty_keypoints.json"))
for i in range(len(data["people"]), number_people_max):
# 足りない分は空データを埋める
data["people"].append(json.load(open("json/one_keypoints.json")))
org_data["people"].append(json.load(open("json/one_keypoints.json")))
logger.info("人体別処理: iidx: %s file: %s ------", _iidx, file_name)
# 並べ直したindex用配列反転有無
is_all_reverses = [False for x in range(number_people_max)]
# 並べ直したindex用配列反転有無(上半身のみ)
is_upper_reverses = [False for x in range(number_people_max)]
# 並べ直したindex用配列反転有無(下半身のみ)
is_lower_reverses = [False for x in range(number_people_max)]
# 現在反転中か否か(上半身)
is_now_upper_reversed = [False for x in range(number_people_max)]
# 現在反転中か否か(下半身)
is_now_lower_reversed = [False for x in range(number_people_max)]
# インデックス並び替え -------------------------
# 開始時
if _iidx == 0:
# 前回のXYを保持
past_data = data["people"]
# 前回の深度を保持
past_depths = pred_multi_ary[0]
# 前回の深度(センターZ)を保持
past_depths_z = pred_multi_z_ary[0]
# 最初は左から順番に0番目,1番目と並べる
# FIXME 初期表示時の左から順番に並べたい
# first_sorted_idxs = sort_first_idxs(data["people"])
# logger.info("first_sorted_idxs: %s", first_sorted_idxs)
for pidx in range(number_people_max):
# 最初はインデックスの通りに並べる
sorted_idxs[_iidx][pidx] = pidx
past_depth_idx = -1
next_depth_idx = -1
else:
# 前回の深度
past_depth_idx = _iidx - (_iidx % interval)
# 次回の深度
next_depth_idx = _iidx + interval - (_iidx % interval)
if next_depth_idx >= json_size - start_frame:
# 最後は同じ値をnextとして見る
next_depth_idx = json_size - start_frame - 1
if _iidx in order_specific_dict:
# 順番指定リストに該当フレームがある場合
for key_idx, person_idx in enumerate(order_specific_dict[_iidx]):
# Openposeのデータの順番に応じたソート順を指定する
sorted_idxs[_iidx][key_idx] = person_idx
# 反転はさせない
is_all_reverses[key_idx] = False
is_upper_reverses[key_idx] = False
is_lower_reverses[key_idx] = False
# logger.info("_iidx: %s, _display_idx: %s, key_idx: %s, person_idx: %s", _iidx, _display_idx, key_idx, person_idx )
file_logger.warning("※※{0:05d}F目 順番指定あり {2}".format(_iidx, _display_idx, order_specific_dict[_iidx]))
# logger.info("_iidx: %s, _display_idx: %s, sorted_idxs[_iidx]: %s", _iidx, _display_idx, sorted_idxs[_iidx] )
else:
# 前回のXYと深度から近いindexを算出
sorted_idxs[_iidx], is_all_reverses, is_upper_reverses, is_lower_reverses = calc_nearest_idxs(
sorted_idxs[_iidx - 1], past_data, data["people"], pred_multi_ary[past_depth_idx], pred_multi_ary[next_depth_idx], max_conf_ary, max_conf_color_ary, frame_imgs[(_iidx - 1) % interval], frame_imgs[_iidx % interval])
logger.info("**_iidx: %s(%s), past_depth_idx: %s, next_depth_idx: %s, sorted_idxs: %s, all: %s, upper: %s, lower: %s", _iidx, _display_idx,
past_depth_idx, next_depth_idx, sorted_idxs[_iidx], is_all_reverses, is_upper_reverses, is_lower_reverses)
# 現在データ
now_data = [[] for x in range(number_people_max)]
# 過去を引き継いだ現在データ
all_now_data = [[] for x in range(number_people_max)]
# 過去を引き継いだ現在深度
all_now_depths = [[] for x in range(number_people_max)]
# 過去を引き継いだ現在深度のxy
all_now_depths_xy = [[] for x in range(number_people_max)]
# 過去を引き継いだ現在センターZ
all_now_depths_z = [[] for x in range(number_people_max)]
# インデックス出力 ------------------------------
for pidx, sidx in enumerate(sorted_idxs[_iidx]):
logger.debug("reverse_specific_dict _iidx: %s, pidx: %s, in: %s", _iidx, pidx, (_iidx in reverse_specific_dict and pidx in reverse_specific_dict[_iidx]))
if _iidx in reverse_specific_dict and pidx in reverse_specific_dict[_iidx]:
if 'R' in reverse_specific_dict[_iidx][pidx]:
logger.debug("反転指定対象フレーム【R】: %s(%s) rsd: %s", _iidx, _display_idx, reverse_specific_dict[_iidx][pidx])
# 全身反転
is_all_reverses[pidx] = True
is_upper_reverses[pidx] = False
is_lower_reverses[pidx] = False
elif 'U' in reverse_specific_dict[_iidx][pidx]:
logger.debug("反転指定対象フレーム【U】: %s(%s) rsd: %s", _iidx, _display_idx, reverse_specific_dict[_iidx][pidx])
# 上半身反転
is_all_reverses[pidx] = False
is_upper_reverses[pidx] = True
is_lower_reverses[pidx] = False
elif 'L' in reverse_specific_dict[_iidx][pidx]:
logger.debug("反転指定対象フレーム【L】: %s(%s) rsd: %s", _iidx, _display_idx, reverse_specific_dict[_iidx][pidx])
# 下半身反転
is_all_reverses[pidx] = False
is_upper_reverses[pidx] = False
is_lower_reverses[pidx] = True
elif 'N' in reverse_specific_dict[_iidx][pidx]:
logger.debug("反転指定対象フレーム【N】: %s(%s) rsd: %s", _iidx, _display_idx, reverse_specific_dict[_iidx][pidx])
# 反転なし
is_all_reverses[pidx] = False
is_upper_reverses[pidx] = False
is_lower_reverses[pidx] = False
else:
logger.warning("反転指定対象フレーム【EMPTY】: %s(%s) rsd: %s", _iidx, _display_idx, reverse_specific_dict[_iidx][pidx])
# 反転なし
is_all_reverses[pidx] = False
is_upper_reverses[pidx] = False
is_lower_reverses[pidx] = False
if is_all_reverses[pidx]:
# 現在の反転状況(全身反転)
is_now_upper_reversed[pidx] = True
is_now_lower_reversed[pidx] = True
else:
# 現在の反転状況(上下別々反転)
is_now_upper_reversed[pidx] = is_upper_reverses[pidx]
is_now_lower_reversed[pidx] = is_lower_reverses[pidx]
logger.debug("_iidx: %s(%s), upper: %s, lower: %s", _iidx, _display_idx, is_now_upper_reversed, is_now_lower_reversed)
else:
# 現在データ(sidxで振り分け済み)
now_sidx_data = data["people"][sidx]["pose_keypoints_2d"]
if _iidx > 0:
# とりあえず何らかのデータがある場合
# 過去データ
past_pidx_data = past_data[sorted_idxs[_iidx - 1][pidx]]["pose_keypoints_2d"]
for o in range(0,len(now_sidx_data),3):
oidx = int(o/3)
if now_sidx_data[o] == now_sidx_data[o+1] == 0 and oidx in [1,2,3,4,5,6,7,8,9,10,11,12,13]:
logger.debug("過去PU: pidx: %s, sidx:%s, o: %s, ns: %s, pp: %s, np: %s, ps: %s", pidx, sidx, oidx, now_sidx_data[o], past_pidx_data[o], data["people"][pidx]["pose_keypoints_2d"][o], past_data[sidx]["pose_keypoints_2d"][o])
logger.debug("sidx: %s, now_sidx_data: %s", sidx, now_sidx_data)
# XもYも0の場合、過去から引っ張ってくる
# 反転対応済みのINDEXに設定する
now_sidx_data[o] = past_pidx_data[o]
now_sidx_data[o+1] = past_pidx_data[o+1]
now_sidx_data[o+2] = past_pidx_data[o+2] - 0.1
logger.debug("反転再チェック: %s(%s) ----------------------------", _iidx, _display_idx)
# 前回のXYと深度から近いindexを算出
# 埋まってない部分を補完して、改めて反転再算出
# 既に並べ終わってるので、少し底上げして厳しめにチェックする
_, is_retake_all_reverses, is_retake_upper_reverses, is_retake_lower_reverses = \
calc_nearest_idxs([0], [past_data[pidx]], [data["people"][sidx]], [pred_multi_ary[past_depth_idx][sidx]], [pred_multi_ary[next_depth_idx][sidx]], None, max_conf_color_ary, frame_imgs[(_iidx - 1) % interval], frame_imgs[_iidx % interval], 0.03)
is_all_reverses[pidx] = is_retake_all_reverses[0]
is_upper_reverses[pidx] = is_retake_upper_reverses[0]
is_lower_reverses[pidx] = is_retake_lower_reverses[0]
logger.debug("**反転再チェック: _iidx: %s, pidx: %s, all: %s, upper: %s, lower: %s", _iidx, pidx, is_all_reverses[pidx], is_upper_reverses[pidx], is_lower_reverses[pidx])
if is_all_reverses[pidx]:
logger.debug("全身判定 true")
# 全身反転の場合
if is_upper_reverses[pidx] != is_lower_reverses[pidx]:
logger.debug("全身判定 上半身・下半身違いでクリア")
# 上半身と下半身で反転が違う場合、反転クリア
is_now_upper_reversed[pidx] = False
is_now_lower_reversed[pidx] = False
else:
# 反転状況が同じ場合は、反転採用
is_now_upper_reversed[pidx] = True
is_now_lower_reversed[pidx] = True
else:
is_now_upper_reversed[pidx] = is_upper_reverses[pidx]
is_now_lower_reversed[pidx] = is_lower_reverses[pidx]
else:
# 反転対象外の場合、クリア
is_now_upper_reversed[pidx] = False
is_now_lower_reversed[pidx] = False
logger.info("**反転確定:pidx: %s, is_now_upper_reversed: %s, is_now_lower_reversed: %s", pidx, is_now_upper_reversed[pidx], is_now_lower_reversed[pidx])
# # トレース失敗の場合、クリア
# if (is_all_reverse == False and (is_upper_reverse or (is_upper_reverse == False and is_now_upper_reversed[pidx] ))) and (targetdata[2*3] == 0 or targetdata[3*3] == 0 or targetdata[5*3] == 0 or targetdata[6*3] == 0) :
# logger.debug("上半身ひじまでのトレース失敗のため、上半身反転フラグクリア %s(%s) data: %s", _iidx, _display_idx, targetdata)
# is_upper_reverses[pidx] = False
# is_now_upper_reversed[pidx] = False
# if (is_all_reverse == False or (is_lower_reverse or (is_lower_reverse == False and is_now_lower_reversed[pidx] ))) and (targetdata[8*3] == 0 or targetdata[9*3] == 0 or targetdata[11*3] == 0 or targetdata[12*3] == 0) :
# logger.debug("下半身ひざまでのトレース失敗のため、下半身反転フラグクリア %s(%s) data: %s", _iidx, _display_idx, targetdata)
# is_lower_reverses[pidx] = False
# is_now_lower_reversed[pidx] = False
logger.debug("_iidx: %s(%s), sidx: %s, pidx: %s, upper: %s, lower: %s", _iidx, _display_idx, sidx, pidx, is_now_upper_reversed[pidx], is_now_lower_reversed[pidx])
logger.debug("is_now_upper_reversed: %s, is_now_lower_reversed: %s", is_now_upper_reversed, is_now_lower_reversed)
# 反転判定が終わった後、出力処理
for pidx, sidx in enumerate(sorted_idxs[_iidx]):
# 指定ありの場合、メッセージ追加
reverse_specific_str = ""
if _iidx in reverse_specific_dict and pidx in reverse_specific_dict[_iidx]:
reverse_specific_str = "【指定】"
if is_now_upper_reversed[pidx] and is_now_lower_reversed[pidx]:
file_logger.warning("※※{0:05d}F目 {2}番目の人物、全身反転 [{0}:{2},R]{3}".format( _iidx, _display_idx, pidx, reverse_specific_str))
elif is_now_upper_reversed[pidx] and is_now_lower_reversed[pidx] == False :
file_logger.warning("※※{0:05d}F目 {2}番目の人物、上半身反転 [{0}:{2},U]{3}".format( _iidx, _display_idx, pidx, reverse_specific_str))
elif is_now_upper_reversed[pidx] == False and is_now_lower_reversed[pidx]:
file_logger.warning("※※{0:05d}F目 {2}番目の人物、下半身反転 [{0}:{2},L]{3}".format( _iidx, _display_idx, pidx, reverse_specific_str))
else:
if len(reverse_specific_str) > 0:
file_logger.warning("※※{0:05d}F目 {2}番目の人物、反転なし [{0}:{2},N]{3}".format( _iidx, _display_idx, pidx, reverse_specific_str))
# 一旦空データを読む
outputdata = json.load(open("json/empty_keypoints.json"))
# 一旦空データを読む
all_outputdata = json.load(open("json/empty_keypoints.json"))
# 過去の上書きがない元データ
org_sidx_data = org_data["people"][sidx]["pose_keypoints_2d"]
# 出力用深度(とりあえず全部0)
outputdepths = [0 for x in range(18)]
# 出力用深度センターZ(とりあえず全部0)
outputdepths_z = [0 for x in range(18)]
# 出力用深度XY(X,Yの配列が入る)
outputdepths_xy = [[] for x in range(18)]
for o in range(0,len(outputdata["people"][0]["pose_keypoints_2d"]),3):
# デフォルトのXINDEX
oidx = int(o/3)
if is_now_upper_reversed[pidx] and is_now_lower_reversed[pidx]:
oidx = OPENPOSE_REVERSE_ALL[oidx]
elif is_now_upper_reversed[pidx] and is_now_lower_reversed[pidx] == False:
# 反転している場合、反転INDEX(上半身)
oidx = OPENPOSE_REVERSE_UPPER[oidx]
elif is_now_upper_reversed[pidx] == False and is_now_lower_reversed[pidx]:
# 反転している場合、反転INDEX(下半身)
oidx = OPENPOSE_REVERSE_LOWER[oidx]
# 出力データはオリジナルデータのみコピー
outputdata["people"][0]["pose_keypoints_2d"][o] = org_sidx_data[oidx*3]
outputdata["people"][0]["pose_keypoints_2d"][o+1] = org_sidx_data[oidx*3+1]
outputdata["people"][0]["pose_keypoints_2d"][o+2] = org_sidx_data[oidx*3+2]
# 過去引継データもとりあえずオリジナルデータコピー
all_outputdata["people"][0]["pose_keypoints_2d"][o] = org_sidx_data[oidx*3]
all_outputdata["people"][0]["pose_keypoints_2d"][o+1] = org_sidx_data[oidx*3+1]
all_outputdata["people"][0]["pose_keypoints_2d"][o+2] = org_sidx_data[oidx*3+2]
if _iidx % interval == 0:
# 深度元データ
outputdepths[oidx] = pred_multi_ary[_iidx][sidx][oidx]
outputdepths_z[oidx] = pred_multi_z_ary[_iidx][sidx][oidx]
# logger.info("_iidx: %s, sidx: %s, oidx: %s, len(pred_multi_xy_ary[_iidx]): %s, len(pred_multi_xy_ary[_iidx][sidx]: %s", _iidx, sidx, oidx, len(pred_multi_xy_ary[_iidx]), len(pred_multi_xy_ary[_iidx][sidx]))
if len(pred_multi_xy_ary[_iidx][sidx]) > 0:
outputdepths_xy[oidx] = pred_multi_xy_ary[_iidx][sidx][oidx]
logger.debug("outputdata %s", outputdata["people"][0]["pose_keypoints_2d"])
# 出力順番順に並べなおしてリストに設定
now_data[sidx] = outputdata
all_now_data[sidx] = all_outputdata
if _iidx % interval == 0:
all_now_depths[sidx] = outputdepths
all_now_depths_z[sidx] = outputdepths_z
all_now_depths_xy[sidx] = outputdepths_xy
# 詰め直し
now_sorted_datas = {}
now_sorted_all_datas = {}
now_sorted_all_depths = {}
now_sorted_all_depths_xy = {}
now_sorted_all_depths_z = {}
for pidx, sidx in enumerate(sorted_idxs[_iidx]):
# 現在データ
now_sorted_datas[sidx] = now_data[pidx]["people"][0]
# 現在データ(過去引継)
now_sorted_all_datas[sidx] = all_now_data[pidx]["people"][0]
# 現在深度
now_sorted_all_depths[sidx] = all_now_depths[pidx]
# 現在深度XY
now_sorted_all_depths_xy[sidx] = all_now_depths_xy[pidx]
# 現在深度センターZ
now_sorted_all_depths_z[sidx] = all_now_depths_z[pidx]
# 過去データからの引継
for pidx, sidx in enumerate(sorted_idxs[_iidx]):
now_sorted_data = now_sorted_datas[pidx]["pose_keypoints_2d"]
now_sorted_all_data = now_sorted_all_datas[pidx]["pose_keypoints_2d"]
past_sorted_data = past_data[pidx]["pose_keypoints_2d"]
logger.debug("*** iidx: %s(%s) pidx: %s, sidx: %s, np: %s, pp: %s", _iidx, _display_idx, pidx, sidx, now_sorted_all_data[1*3], past_sorted_data[1*3])
for o in range(0,len(now_sorted_all_data),3):
if now_sorted_all_data[o] == 0 and now_sorted_all_data[o+1] == 0 and int(o/3) in [1,2,3,4,5,6,7,8,9,11,12,13,16,17]:
# 値がない場合、過去引継ぎデータは過去データをコピーする
logger.debug("***過去データ引継 iidx: %s(%s) pidx: %s, sidx: %s, np: %s, pp: %s", _iidx, _display_idx, pidx, sidx, now_sorted_all_data[o], past_sorted_data[o])
now_sorted_all_data[o] = past_sorted_data[o]
now_sorted_all_data[o+1] = past_sorted_data[o+1]
now_sorted_all_data[o+2] = 0.3
if now_sorted_all_data[o] > org_width or now_sorted_all_data[o] < 0 \
or now_sorted_all_data[o+1] > org_height or now_sorted_all_data[o+1] < 0 :
# 画像範囲外のデータが取れた場合、とりあえず0を入れ直す
now_sorted_data[o] = 0
now_sorted_data[o+1] = 0
now_sorted_data[o+2] = 0
now_sorted_all_data[o] = 0
now_sorted_all_data[o+1] = 0
now_sorted_all_data[o+2] = 0
# 深度が0の場合、過去深度をコピーする
if _iidx % interval == 0:
now_sorted_one_depths = now_sorted_all_depths[pidx]
now_sorted_one_depths_z = now_sorted_all_depths_z[pidx]
past_sorted_depths = past_depths[pidx]
past_sorted_depths_z = past_depths_z[pidx]
for didx, d in enumerate(now_sorted_one_depths):
if d == 0:
logger.debug("depth copy iidx: %s(%s) pidx: %s, sidx: %s, p: %s", _iidx, _display_idx, pidx, sidx, past_sorted_depths)
now_sorted_one_depths[didx] = past_sorted_depths[didx]
for didx, d in enumerate(now_sorted_one_depths_z):
if d == 0:
logger.debug("depth copy iidx: %s(%s) pidx: %s, sidx: %s, p: %s", _iidx, _display_idx, pidx, sidx, past_sorted_depths_z)
now_sorted_one_depths_z[didx] = past_sorted_depths_z[didx]
# if _iidx > 0 and _iidx + 1 < len(openpose_2d):
# # まだ次フレームがある場合、足異常チェック
# _next_file = os.path.join(json_path, openpose_filenames[_op_idx+1])
# if not os.path.isfile(_next_file): raise Exception("No file found!!, {0}".format(_next_file))
# try:
# next_data = json.load(open(_next_file))
# except Exception as e:
# logger.warning("JSON読み込み失敗のため、空データ読み込み, %s %s", _next_file, e)
# next_data = json.load(open("json/all_empty_keypoints.json"))
# for i in range(len(next_data["people"]), number_people_max):
# # 足りない分は空データを埋める
# next_data["people"].append(json.load(open("json/one_keypoints.json")))
# # 足異常チェック(リセットは行わない)
# is_result_oneside, is_result_crosses = calc_leg_irregular([0], [past_data[pidx]], [now_sorted_datas[pidx]], next_data["people"], 1, False)
# logger.debug("足異常再チェック %s, %s, 片寄せ: %s, 交差: %s", _iidx, pidx, is_result_oneside, is_result_crosses)
# if True in is_result_oneside or True in is_result_crosses:
# if True in is_result_oneside:
# # 片寄せの可能性がある場合、前回データをコピー
# file_logger.warning("※※{0:05d}F目 {2}番目の人物、片寄せ可能性あり".format( _iidx, _display_idx, pidx))
# if True in is_result_crosses:
# # 交差の可能性がある場合、前回データをコピー
# file_logger.warning("※※{0:05d}F目 {2}番目の人物、交差可能性あり".format( _iidx, _display_idx, pidx))
# for _lval in [8,9,10,11,12,13]:
# # logger.info("足異常:過去データコピー iidx: %s(%s) pidx: %s, sidx: %s, nn: %s, pn: %s, now: %s, past: %s", _iidx, _display_idx, pidx, sidx, org_sidx_data[1*3], past_pidx_data[1*3], org_sidx_data, past_pidx_data)
# # 信頼度は半分
# conf = past_pidx_data[_lval*3+2]/2
# # 出力用データ
# now_sorted_data[_lval*3] = past_pidx_data[_lval*3]
# now_sorted_data[_lval*3+1] = past_pidx_data[_lval*3+1]
# now_sorted_data[_lval*3+2] = conf if 0 < conf < 0.3 else 0.3
# # 過去引継データ
# now_sorted_all_datas[_lval*3] = past_pidx_data[_lval*3]
# now_sorted_all_datas[_lval*3+1] = past_pidx_data[_lval*3+1]
# now_sorted_all_datas[_lval*3+2] = conf if 0 < conf < 0.3 else 0.3
# 首の位置が一番よく取れてるので、首の位置を出力する
display_nose_pos = {}
for pidx, sidx in enumerate(sorted_idxs[_iidx]):
# データがある場合、そのデータ
display_nose_pos[sidx] = [now_data[pidx]["people"][0]["pose_keypoints_2d"][1*3], now_data[pidx]["people"][0]["pose_keypoints_2d"][1*3+1]]
# インデックス対応分のディレクトリ作成
idx_path = '{0}/{1}_{3}_idx{2:02d}/json/{4}'.format(os.path.dirname(json_path), os.path.basename(json_path), sidx+1, now_str, file_name)
os.makedirs(os.path.dirname(idx_path), exist_ok=True)
# 出力
# json.dump(data, open(idx_path,'w'), indent=4)
json.dump(now_data[pidx], open(idx_path,'w'), indent=4)
if _iidx % interval == 0:
# 深度データ
depth_idx_path = '{0}/{1}_{3}_idx{2:02d}/depth.txt'.format(os.path.dirname(json_path), os.path.basename(json_path), pidx+1, now_str)
# 追記モードで開く
depthf = open(depth_idx_path, 'a')
# 深度データを文字列化する
# logger.debug("pred_multi_ary[_idx]: %s", pred_multi_ary[_idx])
# logger.debug("pred_multi_ary[_idx][sidx]: %s", pred_multi_ary[_idx][sidx])
# logger.info("all_now_depths pidx: %s, :%s", pidx, all_now_depths[pidx])
pred_str_ary = [ str(x) for x in now_sorted_all_depths[pidx] ]
# 一行分を追記
depthf.write("{0}, {1}\n".format(_display_idx, ','.join(pred_str_ary)))
depthf.close()
# ------------------
# 深度データ(センターZ)
depthz_idx_path = '{0}/{1}_{3}_idx{2:02d}/depth_z.txt'.format(os.path.dirname(json_path), os.path.basename(json_path), pidx+1, now_str)
# 追記モードで開く
depthzf = open(depthz_idx_path, 'a')
# 深度データを文字列化する
# logger.debug("pred_multi_ary[_idx]: %s", pred_multi_ary[_idx])
# logger.debug("pred_multi_ary[_idx][sidx]: %s", pred_multi_ary[_idx][sidx])
# logger.info("all_now_depths pidx: %s, :%s", pidx, all_now_depths[pidx])
pred_z_str_ary = [ str(x) for x in now_sorted_all_depths_z[pidx] ]
# 一行分を追記
depthzf.write("{0}, {1}\n".format(_display_idx, ','.join(pred_z_str_ary)))
depthzf.close()
# 深度画像保存 -----------------------
if _iidx % interval == 0 and level[verbose] <= logging.INFO and len(pred_multi_frame_ary[_iidx]) > 0:
# Plot result
plt.cla()
plt.clf()
ii = plt.imshow(pred_multi_frame_ary[_iidx], interpolation='nearest')
plt.colorbar(ii)
# 散布図のようにして、出力に使ったポイントを明示
DEPTH_COLOR = ["#33FF33", "#3333FF", "#FFFFFF", "#FFFF33", "#FF33FF", "#33FFFF", "#00FF00", "#0000FF", "#666666", "#FFFF00", "#FF00FF", "#00FFFF"]
for pidx, sidx in enumerate(sorted_idxs[_iidx]):
for pred_joint in now_sorted_all_depths_xy[pidx]:
plt.scatter(pred_joint[0], pred_joint[1], s=5, c=DEPTH_COLOR[pidx])
plotName = "{0}/depth_{1:012d}.png".format(subdir, cnt)
plt.savefig(plotName)
logger.debug("Save: {0}".format(plotName))
png_lib.append(imageio.imread(plotName))
# # アニメーションGIF用に区間分保持
for mm in range(interval - 1):
png_lib.append(imageio.imread(plotName))
plt.close()
file_logger.warning("**{0:05d}F目の出力順番: [{0}:{2}], 位置: {3}".format(_iidx, _display_idx, ','.join(map(str, sorted_idxs[_iidx])), sorted(display_nose_pos.items()) ))
# 今回全データを返す
return all_now_data, all_now_depths, all_now_depths_z
# 0F目を左から順番に並べた人物INDEXを取得する
def sort_first_idxs(now_datas):
most_common_idxs = []
th = 0.3
# 最終的な左からのINDEX
result_nearest_idxs = [-1 for x in range(len(now_datas))]
# 比較対象INDEX(最初は0(左端)を起点とする)
target_x = [ 0 for x in range(int(len(now_datas[0]["pose_keypoints_2d"]))) ]
# 人数分チェック
for _idx in range(len(now_datas)):
now_nearest_idxs = []
# 関節位置Xでチェック
for o in range(0,len(now_datas[0]["pose_keypoints_2d"]),3):
is_target = True
x_datas = []
for _pnidx in range(len(now_datas)):
if _pnidx not in result_nearest_idxs:
# 人物のうち、まだ左から並べられていない人物だけチェック対象とする
x_data = now_datas[_pnidx]["pose_keypoints_2d"][o]
x_conf = now_datas[_pnidx]["pose_keypoints_2d"][o+2]
if x_conf > th and is_target:
# 信頼度が一定以上あって、これまでも追加されている場合、追加
x_datas.append(x_data)
else:
# 一度でも信頼度が満たない場合、チェック対象外
is_target = False
else:
# 既に並べられている人物の場合、比較対象にならない値を設定する
x_datas.append(sys.maxsize)
# logger.info("sort_first_idxs: _idx: %s, x_datas: %s, is_target: %s", _idx, x_datas, is_target)
if is_target:
# 最終的に対象のままである場合、ひとつ前の人物に近い方のINDEXを取得する
now_nearest_idxs.append(get_nearest_idx(x_datas, target_x[o]))
# logger.info("sort_first_idxs: _idx: %s, now_nearest_idxs: %s", _idx, now_nearest_idxs)
if len(now_nearest_idxs) > 0:
# チェック対象件数がある場合、最頻出INDEXをチェックする
most_common_idxs = Counter(now_nearest_idxs).most_common()
logger.debug("sort_first_idxs: _idx: %s, most_common_idxs: %s", _idx, most_common_idxs)
# 最頻出INDEX
result_nearest_idxs[_idx] = most_common_idxs[0][0]
# 次の比較元として、再頻出INDEXの人物を対象とする
target_x = now_datas[most_common_idxs[0][0]]["pose_keypoints_2d"]
logger.debug("sort_first_idxs: result_nearest_idxs: %s", result_nearest_idxs)
if -1 in result_nearest_idxs:
# 不採用になって判定できなかったデータがある場合
for _nidx, _nval in enumerate(result_nearest_idxs):
if _nval == -1:
# 該当値が-1(判定不可)の場合
for _cidx in range(len(now_datas)):
logger.debug("_nidx: %s, _nval: %s, _cidx: %s, _cidx not in nearest_idxs: %s", _nidx, _nval, _cidx, _cidx not in result_nearest_idxs)
# INDEXを頭から順に見ていく(正0, 正1 ... 正n, 逆0, 逆1 ... 逆n)
if _cidx not in result_nearest_idxs:
# 該当INDEXがリストに無い場合、設定
result_nearest_idxs[_nidx] = _cidx
break
return result_nearest_idxs
# 前回のXYから片足寄せであるか判断する
def calc_leg_oneside(past_sorted_idxs, past_data, now_data, is_oneside_reset=False):
# ひざと足首のペア
LEG_IDXS = [[9,12],[10,13]]
# 過去のX位置データ
is_past_oneside = False
for _pidx, _idx in enumerate(past_sorted_idxs):
past_xyc = past_data[_idx]["pose_keypoints_2d"]
for _lidx, _lvals in enumerate(LEG_IDXS):
logger.debug("past _idx: %s, _lidx: %s, %sx: %s, %sx: %s, %sy: %s, %sy:%s", _idx, _lidx, _lvals[0], past_xyc[_lvals[0]*3], _lvals[1], past_xyc[_lvals[1]*3], _lvals[0], past_xyc[_lvals[0]*3+1], _lvals[1], past_xyc[_lvals[1]*3+1])
if past_xyc[_lvals[0]*3] > 0 and past_xyc[_lvals[1]*3] > 0 and past_xyc[_lvals[0]*3+1] > 0 and past_xyc[_lvals[1]*3+1] > 0 \
and abs(past_xyc[_lvals[0]*3] - past_xyc[_lvals[1]*3]) < 10 and abs(past_xyc[_lvals[0]*3+1] - past_xyc[_lvals[1]*3+1]) < 10:
logger.debug("過去片寄せ: %s(%s), (%s,%s), (%s,%s)", _pidx, _lidx, past_xyc[_lvals[0]*3], past_xyc[_lvals[1]*3], past_xyc[_lvals[0]*3+1], past_xyc[_lvals[1]*3+1] )
# 誰かの足が片寄せっぽいならば、FLG=ON
is_past_oneside = True
is_leg_onesides = [ False for x in range(len(now_data)) ]
# 今回のX位置データ
for _idx in range(len(now_data)):
now_xyc = now_data[_idx]["pose_keypoints_2d"]
is_now_oneside_cnt = 0
for _lidx, _lvals in enumerate(LEG_IDXS):
logger.debug("now _idx: %s, _lidx: %s, %sx: %s, %sx: %s, %sy: %s, %sy:%s", _idx, _lidx, _lvals[0], now_xyc[_lvals[0]*3], _lvals[1], now_xyc[_lvals[1]*3], _lvals[0], now_xyc[_lvals[0]*3+1], _lvals[1], now_xyc[_lvals[1]*3+1])
if now_xyc[_lvals[0]*3] > 0 and now_xyc[_lvals[1]*3] > 0 and now_xyc[_lvals[0]*3+1] > 0 and now_xyc[_lvals[1]*3+1] > 0 \
and abs(now_xyc[_lvals[0]*3] - now_xyc[_lvals[1]*3]) < 10 and abs(now_xyc[_lvals[0]*3+1] - now_xyc[_lvals[1]*3+1]) < 10:
# 両ひざ、両足首のX位置、Y位置がほぼ同じである場合
logger.debug("現在片寄せ: %s(%s), (%s,%s), (%s,%s)", _idx, _lidx, now_xyc[_lvals[0]*3], now_xyc[_lvals[1]*3], now_xyc[_lvals[0]*3+1], now_xyc[_lvals[1]*3+1] )
is_now_oneside_cnt += 1
if is_now_oneside_cnt == len(LEG_IDXS) and is_past_oneside == False:
# フラグを立てる
is_leg_onesides[_idx] = True
for _lidx, _lval in enumerate([8,9,10,11,12,13]):
# リセットFLG=ONの場合、足の位置を一旦全部クリア
if is_oneside_reset:
now_xyc[_lval*3] = 0
now_xyc[_lval*3+1] = 0
now_xyc[_lval*3+2] = 0
return is_leg_onesides
# 前回のXYから足関節が異常であるか判断する
def calc_leg_irregular(past_sorted_idxs, past_data, now_data, next_data, people_size, is_reset=False):
now_sotred_data = [ [] for x in range(people_size) ]
for _idx in range(people_size):
# 過去の人物に近い現在INDEXを取得(上半身のみで判定)
most_common_idxs = calc_upper_most_common_idxs(people_size, past_data, now_data[_idx])
for mci in range(len(most_common_idxs)):
now_idx = most_common_idxs[mci][0]
# logger.debug("mci: %s, now_idx: %s", mci, now_idx)
# logger.debug("now_sotred_data[now_idx]: %s", now_sotred_data[now_idx])
# logger.debug("len(now_sotred_data[now_idx]): %s", len(now_sotred_data[now_idx]))
if len(now_sotred_data[now_idx]) == 0:
# まだ未設定の場合、ソート済みデータリストの該当INDEX箇所に設定
now_sotred_data[now_idx] = now_data[_idx]
break
# 現在の人物分のデータを用意する
next_sotred_data = [ [] for x in range(people_size) ]
for _idx in range(people_size):
# 現在の人物に近い未来INDEXを取得(上半身のみで判定)
most_common_idxs = calc_upper_most_common_idxs(people_size, now_sotred_data, next_data[_idx])
logger.debug("next most_common_idxs: %s, next_data[_idx]: %s", most_common_idxs, next_data[_idx])
for mci in range(len(most_common_idxs)):
next_idx = most_common_idxs[mci][0]
# logger.debug("mci: %s, next_idx: %s", mci, next_idx)
# logger.debug("next_sotred_data[next_idx]: %s", next_sotred_data[next_idx])
# logger.debug("len(next_sotred_data[next_idx]): %s", len(next_sotred_data[next_idx]))
if len(next_sotred_data[next_idx]) == 0:
# まだ未設定の場合、ソート済みデータリストの該当INDEX箇所に設定
next_sotred_data[next_idx] = next_data[_idx]
break
# logger.debug("past_data: %s", past_data)
# logger.debug("now_data: %s", now_data)
# logger.debug("now_sotred_data: %s", now_sotred_data)
# logger.debug("next_data: %s", next_data)
# logger.debug("next_sotred_data: %s", next_sotred_data)
# ひざと足首のペア
LEG_IDXS = [[9,12],[10,13]]
is_leg_crosses = [ False for x in range(people_size) ]
is_leg_onesides = [ False for x in range(len(now_data)) ]
for _idx, (past_d, now_d, next_d) in enumerate(zip(past_data, now_sotred_data, next_sotred_data)):
# logger.debug("past_d: %s", past_d)
# logger.debug("now_d: %s", now_d)
# logger.debug("next_d: %s", next_d)
past_xyc = past_d["pose_keypoints_2d"]
now_xyc = now_d["pose_keypoints_2d"]
next_xyc = next_d["pose_keypoints_2d"]
is_now_cross_cnt = 0
is_now_oneside_cnt = 0
for _lidx, _lvals in enumerate(LEG_IDXS):
_lrightx = _lvals[0]*3
_lleftx = _lvals[1]*3
logger.debug("past _idx: %s, _lidx: %s, %sx: %s, %sx: %s, %sy: %s, %sy:%s", _idx, _lidx, _lvals[0], past_xyc[_lrightx], _lvals[1], past_xyc[_lleftx], _lvals[0], past_xyc[_lrightx+1], _lvals[1], past_xyc[_lleftx+1])
logger.debug("now _idx: %s, _lidx: %s, %sx: %s, %sx: %s, %sy: %s, %sy:%s", _idx, _lidx, _lvals[0], now_xyc[_lrightx], _lvals[1], now_xyc[_lleftx], _lvals[0], now_xyc[_lrightx+1], _lvals[1], now_xyc[_lleftx+1])
logger.debug("next _idx: %s, _lidx: %s, %sx: %s, %sx: %s, %sy: %s, %sy:%s", _idx, _lidx, _lvals[0], next_xyc[_lrightx], _lvals[1], next_xyc[_lleftx], _lvals[0], next_xyc[_lrightx+1], _lvals[1], next_xyc[_lleftx+1])
# logger.debug("abs(past_xyc[_lrightx] - now_xyc[_lrightx]): %s, abs(past_xyc[_lrightx] - now_xyc[_lleftx]: %s, :%s", abs(past_xyc[_lrightx] - now_xyc[_lrightx]), abs(past_xyc[_lrightx] - now_xyc[_lleftx]), abs(past_xyc[_lrightx] - now_xyc[_lrightx]) > abs(past_xyc[_lrightx] - now_xyc[_lleftx]))
# logger.debug("abs(past_xyc[_lleftx] - now_xyc[_lleftx]): %s, abs(past_xyc[_lleftx] - now_xyc[_lrightx]): %s, :%s", abs(past_xyc[_lleftx] - now_xyc[_lleftx]), abs(past_xyc[_lleftx] - now_xyc[_lrightx]), abs(past_xyc[_lrightx] - next_xyc[_lrightx]) < abs(past_xyc[_lrightx] - next_xyc[_lleftx]))
# logger.debug("abs(past_xyc[_lrightx] - next_xyc[_lrightx]): %s, abs(past_xyc[_lrightx] - next_xyc[_lleftx]): %s, :%s", abs(past_xyc[_lrightx] - next_xyc[_lrightx]), abs(past_xyc[_lrightx] - next_xyc[_lleftx]), abs(past_xyc[_lleftx] - now_xyc[_lleftx]) > abs(past_xyc[_lleftx] - now_xyc[_lrightx]))
# logger.debug("abs(past_xyc[_lleftx] - next_xyc[_lleftx]): %s, abs(past_xyc[_lleftx] - next_xyc[_lrightx]): %s, :%s", abs(past_xyc[_lleftx] - next_xyc[_lleftx]), abs(past_xyc[_lleftx] - next_xyc[_lrightx]), abs(past_xyc[_lleftx] - next_xyc[_lleftx]) < abs(past_xyc[_lleftx] - next_xyc[_lrightx]))
if now_xyc[_lrightx] > 0 and now_xyc[_lleftx] > 0 and past_xyc[_lrightx] > 0 and past_xyc[_lleftx] > 0 and next_xyc[_lrightx] > 0 and next_xyc[_lleftx] > 0 :
if abs(past_xyc[_lrightx] - now_xyc[_lrightx]) > abs(past_xyc[_lrightx] - now_xyc[_lleftx]) and \
abs(past_xyc[_lrightx] - next_xyc[_lrightx]) < abs(past_xyc[_lrightx] - next_xyc[_lleftx]) and \
abs(past_xyc[_lleftx] - now_xyc[_lleftx]) > abs(past_xyc[_lleftx] - now_xyc[_lrightx]) and \
abs(past_xyc[_lleftx] - next_xyc[_lleftx]) < abs(past_xyc[_lleftx] - next_xyc[_lrightx]) :
# 過去と現在で、反対方向の足の位置の方が近く、かつ過去と未来で、同じ方向の足の位置が近い場合、現在のみ交差しているとみなす
logger.info("!!足データ交差あり: %s(%s), nowx:(%s,%s), pastx:(%s,%s), nextx:(%s,%s)", _idx, _lidx, now_xyc[_lrightx], now_xyc[_lleftx], past_xyc[_lrightx], past_xyc[_lleftx], next_xyc[_lrightx], next_xyc[_lleftx] )
is_now_cross_cnt += 1
else:
logger.debug("××足データ交差なし: %s(%s), nowx:(%s,%s), pastx:(%s,%s), nextx:(%s,%s)", _idx, _lidx, now_xyc[_lrightx], now_xyc[_lleftx], past_xyc[_lrightx], past_xyc[_lleftx], next_xyc[_lrightx], next_xyc[_lleftx] )
if abs(now_xyc[_lrightx] - now_xyc[_lleftx]) < 10 and abs(now_xyc[_lrightx+1] - now_xyc[_lleftx+1]) < 10 \
and abs(past_xyc[_lrightx] - past_xyc[_lleftx]) > 10 and abs(past_xyc[_lrightx+1] - past_xyc[_lleftx+1]) > 10:
# 両ひざ、両足首のX位置、Y位置がほぼ同じである場合
logger.info("!!足データ片寄せあり: %s(%s), nowx:(%s,%s), pastx:(%s,%s), nextx:(%s,%s)", _idx, _lidx, now_xyc[_lrightx], now_xyc[_lleftx], past_xyc[_lrightx], past_xyc[_lleftx], next_xyc[_lrightx], next_xyc[_lleftx] )
is_now_oneside_cnt += 1
else:
logger.debug("××足データ片寄せなし: %s(%s), nowx:(%s,%s), pastx:(%s,%s), nextx:(%s,%s)", _idx, _lidx, now_xyc[_lrightx], now_xyc[_lleftx], past_xyc[_lrightx], past_xyc[_lleftx], next_xyc[_lrightx], next_xyc[_lleftx] )
if is_now_cross_cnt > 0:
# フラグを立てる
is_leg_crosses[_idx] = True
for _lidx, _lval in enumerate([8,9,10,11,12,13]):
# リセットFLG=ONの場合、足の位置を一旦全部クリア
if is_reset:
now_xyc[_lval*3] = 0
now_xyc[_lval*3+1] = 0
now_xyc[_lval*3+2] = 0
if is_now_oneside_cnt == len(LEG_IDXS):
# フラグを立てる
is_leg_onesides[_idx] = True
for _lidx, _lval in enumerate([8,9,10,11,12,13]):
# リセットFLG=ONの場合、足の位置を一旦全部クリア
if is_reset:
now_xyc[_lval*3] = 0
now_xyc[_lval*3+1] = 0
now_xyc[_lval*3+2] = 0
return is_leg_onesides, is_leg_crosses
def calc_upper_most_common_idxs(people_size, past_datas, now_datas):
if people_size == 1:
return [(0, 1)]
# 過去データの上半身関節で、現在データと最も近いINDEXのリストを生成
now_nearest_idxs = []
most_common_idxs = []
# logger.debug("calc_upper_most_common_idxs now_datas: %s", now_datas)
# logger.debug("calc_upper_most_common_idxs past_datas: %s", past_datas)
# # 位置データ(全身+手足)
# for _idx in [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,2,3,4,5,6,7,8,9,10,11,12,13]:
# 位置データ(上半身X)
for _idx in [0,1,2,3,4,5,6,7,8,11,16,17]:
one_data = now_datas["pose_keypoints_2d"][_idx*3]
past_person = []
for p in past_datas:
# logger.debug("p: %s, c: %s", p, c)
if _idx < len(p["pose_keypoints_2d"]):
pdata = p["pose_keypoints_2d"][_idx*3]
past_person.append(pdata)
# 今回データがないものはチェック対象外
if len(past_person) > 0 and 0 not in past_person and one_data > 0:
logger.debug("upper: %s, one_data %s", past_person, one_data)
now_nearest_idxs.append(get_nearest_idx(past_person, one_data))
else:
# logger.debug("%s:: past_person対象外: %s, x_data %s", dimensional, past_person, x_data)
pass
if len(now_nearest_idxs) > 0:
most_common_idxs = Counter(now_nearest_idxs).most_common()
# 頻出で振り分けた後、件数が足りない場合(全部どれか1つに寄せられている場合)
if len(most_common_idxs) < people_size:
# logger.debug("頻出カウント不足: len(most_common_idxs): %s, len(conf_idxs): %s ", len(most_common_idxs), len(conf_idxs))
for c in range(people_size):
is_existed = False
for m, mci in enumerate(most_common_idxs):
if c == most_common_idxs[m][0]:
is_existed = True
break
if is_existed == False:
# 存在しないインデックスだった場合、追加
most_common_idxs.append( (c, 0) )
logger.debug("upper: most_common_idxs: %s, now_nearest_idxs: %s", most_common_idxs, now_nearest_idxs)
return most_common_idxs
# 左右反転させたINDEX
OPENPOSE_REVERSE_ALL = {
0: 0,
1: 1,
2: 5,
3: 6,
4: 7,
5: 2,
6: 3,
7: 4,
8: 11,
9: 12,
10: 13,
11: 8,
12: 9,
13: 10,
14: 15,
15: 14,
16: 17,
17: 16,
18: 18
}
# 上半身のみ左右反転させたINDEX
OPENPOSE_REVERSE_UPPER = {
0: 0,
1: 1,
2: 5,
3: 6,
4: 7,
5: 2,
6: 3,
7: 4,
8: 8,
9: 9,
10: 10,
11: 11,
12: 12,
13: 13,
14: 15,
15: 14,
16: 17,
17: 16,
18: 18
}
# 下半身のみ左右反転させたINDEX
OPENPOSE_REVERSE_LOWER = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 11,
9: 12,
10: 13,
11: 8,
12: 9,
13: 10,
14: 14,
15: 15,
16: 16,
17: 17,
18: 18
}
# 通常INDEX
OPENPOSE_NORMAL = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 12,
13: 13,
14: 14,
15: 15,
16: 16,
17: 17,
18: 18
}
# 前回のXYと深度から近いindexを算出
def calc_nearest_idxs(past_sorted_idxs, past_data, now_data, past_pred_ary, now_pred_ary, max_conf_ary, max_conf_color_ary, past_frame, now_frame, limit_correction=0.0):
# logger.debug("past_data: %s", past_data)
# 前回の人物データ(前回のソート順に対応させる)
# 左右反転もチェックするので、2倍。
past_x_ary = [[] for x in range(len(past_data) * 2)]
past_y_ary = [[] for x in range(len(past_data) * 2)]
past_conf_ary = [[] for x in range(len(past_data) * 2)]
# 下半身だけ回転しているパターン用
past_lower_x_ary = [[] for x in range(len(past_data) * 2)]
past_lower_y_ary = [[] for x in range(len(past_data) * 2)]
past_lower_conf_ary = [[] for x in range(len(past_data) * 2)]
# 上半身だけ回転しているパターン用
past_upper_x_ary = [[] for x in range(len(past_data) * 2)]
past_upper_y_ary = [[] for x in range(len(past_data) * 2)]
past_upper_conf_ary = [[] for x in range(len(past_data) * 2)]
# 過去画像の色情報リスト
past_colors = [[] for x in range(len(past_data))]
# 過去の首位置リスト
past_necks = [0 for x in range(len(past_data))]
for _idx, _idxv in enumerate(past_sorted_idxs):
# logger.debug("past_data[_idx]: %s", past_data[_idx])
past_xyc = past_data[_idx]["pose_keypoints_2d"]
# logger.debug("_idx: %s, past_xyc: %s", _idx, past_xyc)
# 正データ
for o in range(0,len(past_xyc),3):
# logger.debug("_idx: %s, o: %s", _idx, o)
# 全身反転用
past_x_ary[_idx].append(past_xyc[o])
past_y_ary[_idx].append(past_xyc[o+1])
past_conf_ary[_idx].append(past_xyc[o+2])
# 下半身反転用
past_lower_x_ary[_idx].append(past_xyc[o])
past_lower_y_ary[_idx].append(past_xyc[o+1])
past_lower_conf_ary[_idx].append(past_xyc[o+2])
# 上半身反転用
past_upper_x_ary[_idx].append(past_xyc[o])
past_upper_y_ary[_idx].append(past_xyc[o+1])
past_upper_conf_ary[_idx].append(past_xyc[o+2])
# 色情報
if 0 < int(past_xyc[o+1]) < past_frame.shape[0] and 0 < int(past_xyc[o]) < past_frame.shape[1]:
past_colors[_idx].append(past_frame[int(past_xyc[o+1]),int(past_xyc[o])])
# 最高信頼度が書き換えられたら、色値も書き換える
if max_conf_ary is not None:
if max_conf_ary[_idx][int(o/3)] < past_xyc[o+2]:
max_conf_color_ary[_idx][int(o/3)] = past_frame[int(past_xyc[o+1]),int(past_xyc[o])]
else:
# logger.warn("_idx: %s, o: %s, int(past_xyc[o+1]): %s, int(past_xyc[o]): %s", _idx, o, int(past_xyc[o+1]), int(past_xyc[o]))
past_colors[_idx].append(np.array([0,0,0]))
# 首位置
if int(o/3) == 1:
past_necks[_idx] = past_xyc[o]
# 反転データ
for o in range(0,len(past_xyc),3):
# logger.debug("_idx: %s, o: %s", _idx, o)
past_x_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_ALL[int(o/3)]*3])
past_y_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_ALL[int(o/3)]*3+1])
# 反転は信頼度を下げる
past_conf_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_ALL[int(o/3)]*3+2] - 0.1)
# 下半身反転データ
for o in range(0,len(past_xyc),3):
# logger.debug("_idx: %s, o: %s", _idx, o)
past_lower_x_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_LOWER[int(o/3)]*3])
past_lower_y_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_LOWER[int(o/3)]*3+1])
# 反転は信頼度を下げる
past_lower_conf_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_LOWER[int(o/3)]*3+2] - 0.1)
# 上半身反転データ
for o in range(0,len(past_xyc),3):
# logger.debug("_idx: %s, o: %s", _idx, o)
past_upper_x_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_UPPER[int(o/3)]*3])
past_upper_y_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_UPPER[int(o/3)]*3+1])
# 反転は信頼度を下げる
past_upper_conf_ary[_idx + len(now_data)].append(past_xyc[OPENPOSE_REVERSE_UPPER[int(o/3)]*3+2] - 0.1)
logger.debug("max_conf_color_ary: %s", max_conf_color_ary)
logger.debug("past_x: %s", np.array(past_x_ary)[:,1])
# logger.debug("past_x_ary: %s", past_x_ary)
# logger.debug("past_y_ary: %s", past_y_ary)
# 今回の人物データ
# 全身左右反転もチェックするので、2倍。
now_x_ary = [[] for x in range(len(now_data) * 2)]
now_y_ary = [[] for x in range(len(now_data) * 2)]
now_conf_ary = [[] for x in range(len(now_data) * 2)]
# 下半身だけ回転しているパターン用
now_lower_x_ary = [[] for x in range(len(now_data) * 2)]
now_lower_y_ary = [[] for x in range(len(now_data) * 2)]
now_lower_conf_ary = [[] for x in range(len(now_data) * 2)]
# 上半身だけ回転しているパターン用
now_upper_x_ary = [[] for x in range(len(now_data) * 2)]
now_upper_y_ary = [[] for x in range(len(now_data) * 2)]
now_upper_conf_ary = [[] for x in range(len(now_data) * 2)]
# 現在画像の色情報リスト
now_colors = [[] for x in range(len(now_data))]
# 現在の首X位置リスト
now_necks = [0 for x in range(len(now_data))]
for _idx in range(len(now_data)):
now_xyc = now_data[_idx]["pose_keypoints_2d"]
# logger.debug("_idx: %s, now_xyc: %s", _idx, now_xyc)
# 正データ
for o in range(0,len(now_xyc),3):
# logger.debug("_idx: %s, o: %s", _idx, o)
now_x_ary[_idx].append(now_xyc[o])
now_y_ary[_idx].append(now_xyc[o+1])
now_conf_ary[_idx].append(now_xyc[o+2])
# 下半身反転用
now_lower_x_ary[_idx].append(now_xyc[o])
now_lower_y_ary[_idx].append(now_xyc[o+1])
now_lower_conf_ary[_idx].append(now_xyc[o+2])
# 上半身反転用
now_upper_x_ary[_idx].append(now_xyc[o])
now_upper_y_ary[_idx].append(now_xyc[o+1])
now_upper_conf_ary[_idx].append(now_xyc[o+2])
# 色情報
if 0 <= int(now_xyc[o+1]) < now_frame.shape[0] and 0 <= int(now_xyc[o]) < now_frame.shape[1]:
now_colors[_idx].append(now_frame[int(now_xyc[o+1]),int(now_xyc[o])])
else:
now_colors[_idx].append(np.array([0,0,0]))
# 首位置
if int(o/3) == 1:
now_necks[_idx] = now_xyc[o]
# 反転データ
for o in range(0,len(now_xyc),3):
# logger.debug("_idx: %s, rev_idx: %s, o: %s, len(now_x_ary): %s, len(now_xyc): %s, OPENPOSE_REVERSE_ALL[o]: %s", _idx, _idx + len(now_data), o, len(now_x_ary), len(now_xyc), OPENPOSE_REVERSE_ALL[int(o/3)])
now_x_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_ALL[int(o/3)]*3])
now_y_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_ALL[int(o/3)]*3+1])
# 反転は信頼度をすこし下げる
now_conf_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_ALL[int(o/3)]*3+2] - 0.1)
# 下半身反転データ
for o in range(0,len(now_xyc),3):
# logger.debug("_idx: %s, o: %s", _idx, o)
now_lower_x_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_LOWER[int(o/3)]*3])
now_lower_y_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_LOWER[int(o/3)]*3+1])
now_lower_conf_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_LOWER[int(o/3)]*3+2] - 0.1)
# 上半身反転データ
for o in range(0,len(now_xyc),3):
# logger.debug("_idx: %s, o: %s", _idx, o)
now_upper_x_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_UPPER[int(o/3)]*3])
now_upper_y_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_UPPER[int(o/3)]*3+1])
now_upper_conf_ary[_idx + len(now_data)].append(now_xyc[OPENPOSE_REVERSE_UPPER[int(o/3)]*3+2] - 0.1)
logger.debug("now_x: %s", np.array(now_x_ary)[:,1])
# 過去の深度データ
past_pred = []
for _pidx, _idx in enumerate(past_sorted_idxs):
past_pred.append(past_pred_ary[_idx])
# logger.debug("past_pred: %s,", past_pred)
# logger.debug("org_past_conf: %s,", org_past_conf)
# 信頼度の高い順に人物インデックスを割り当てていく
avg_conf_ary = []
for con in now_conf_ary:
# 体幹ほど重みをつけて平均値を求める
avg_conf_ary.append(np.average(np.array(con), weights=[0.5,2.0,0.5,0.3,0.1,0.5,0.3,0.1,0.8,0.3,0.1,0.8,0.3,0.1,0.1,0.1,0.1,0.1]))
# 信頼度の低い順のインデックス番号
conf_idxs = np.argsort(avg_conf_ary)
logger.debug("avg_conf_ary: %s", avg_conf_ary)
logger.debug("conf_idxs: %s", conf_idxs)
# # 信頼度の高い順に人物インデックスを割り当てていく
# normal_avg_conf_ary = []
# for con in now_conf_ary[0:len(now_data)]:
# # 体幹ほど重みをつけて平均値を求める
# normal_avg_conf_ary.append(np.average(np.array(con), weights=[0.5,0.8,0.5,0.3,0.1,0.5,0.3,0.1,0.8,0.3,0.1,0.8,0.3,0.1,0.1,0.1,0.1,0.1]))
# # 信頼度の低い順のインデックス番号
# normal_conf_idxs = np.argsort(normal_avg_conf_ary)
# conf_idxs = [-1 for x in range(len(now_conf_ary))]
# for _ncidx in range(len(normal_conf_idxs)):
# # 正データ
# conf_idxs[_ncidx] = normal_conf_idxs[_ncidx]+len(now_data)
# # 反転データ
# conf_idxs[_ncidx+len(now_data)] = normal_conf_idxs[_ncidx]
# logger.debug("normal_avg_conf_ary: %s", normal_avg_conf_ary)
# logger.debug("normal_conf_idxs: %s", normal_conf_idxs)
# logger.debug("conf_idxs: %s", conf_idxs)
nearest_idxs = [-1 for x in range(len(conf_idxs))]
is_upper_reverses = [False for x in range(len(conf_idxs))]
is_lower_reverses = [False for x in range(len(conf_idxs))]
most_common_idxs = []
# logger.debug("past_pred_ary: %s", past_pred_ary)
# logger.debug("now_pred_ary: %s", now_pred_ary)
# XY正の判定用
XY_LIMIT = 0.73 + limit_correction
# XY上半身・下半身のみ反転用。やや厳しめ
REV_LIMIT = 0.83 + limit_correction
# 深度判定用。甘め
D_LIMIT = 0.61 + limit_correction
# 色度判定用。甘め
C_LIMIT = 0.61 + limit_correction
logger.debug("XY_LIMIT: %s, REV_LIMIT: %s, C_LIMIT: %s", XY_LIMIT, REV_LIMIT, C_LIMIT)
# 複数人数のソートであるか
is_multi_sort = len(past_sorted_idxs) > 1
# 首位置がほとんど同じものは優先採用
for ncidx in range(len(now_necks)):
for pcidx in range(len(past_necks)):
if abs(past_necks[pcidx] - now_necks[ncidx]) < 3:
# 首位置がほとんど動いていない場合、優先採用
logger.debug("首優先採用: ncidx: %s, now: %s, pcidx: %s, past: %s", ncidx, now_necks[ncidx], pcidx, past_necks[pcidx])
nearest_idxs[ncidx] = pcidx
break
# 信頼度の低い順の逆順(信頼度降順)に人物を当てはめていく
cidx = len(conf_idxs) - 1
cidxcnt = 0
while cidx >= 0 and cidxcnt < len(conf_idxs):
now_conf_idx = conf_idxs[cidx]
now_x = now_x_ary[now_conf_idx]
now_y = now_y_ary[now_conf_idx]
now_conf = now_conf_ary[now_conf_idx]
logger.debug("cidx: %s, now_conf_idx: %s ----------------------------------------", cidx, now_conf_idx )
logger.debug("now_x: %s", now_x)
# 過去データの当該関節で、現在データと最も近いINDEXのリストを生成
now_nearest_idxs, most_common_idxs, is_y = calc_most_common_idxs( is_multi_sort, conf_idxs, now_x, now_y, now_conf, past_x_ary, past_y_ary, past_lower_conf_ary, OPENPOSE_NORMAL, XY_LIMIT)
sum_most_common_idxs, most_common_per, same_frame_per, top_frame, second_frame, is_top = \
get_most_common_frames(now_nearest_idxs, most_common_idxs, conf_idxs)
logger.debug("len(now_nearest_idxs): %s, all_size: %s, per: %s", len(now_nearest_idxs), (len(now_x) + ( 0 if is_y == False else len(now_y) )), len(now_nearest_idxs) / (len(now_x) + ( 0 if is_y == False else len(now_y) )))
logger.info("now_nearest_idxs: %s, most_common_per: %s", now_nearest_idxs, most_common_per)
if most_common_per < XY_LIMIT or len(now_nearest_idxs) / (len(now_x) + ( 0 if is_y == False else len(now_y) )) < 0.25:
# 再頻出が指定未満、チェック対象件数が指定未満、のいずれかの場合、
# 上半身と下半身で回転が違っている可能性あり。
logger.info("下半身反転データチェック cidx: %s, now_conf_idx: %s", cidx, now_conf_idx)
# 下半身だけ反転しているデータで比較する
now_lower_x = now_lower_x_ary[now_conf_idx]
now_lower_y = now_lower_y_ary[now_conf_idx]
now_lower_conf = now_lower_conf_ary[now_conf_idx]
lower_now_nearest_idxs, lower_most_common_idxs, is_y = calc_most_common_idxs(is_multi_sort, conf_idxs, now_lower_x, now_lower_y, now_lower_conf, past_lower_x_ary, past_lower_y_ary, past_conf_ary, OPENPOSE_REVERSE_LOWER, REV_LIMIT )
sum_lower_most_common_idxs, lower_most_common_per, lower_same_frame_per, lower_top_frame, lower_second_frame, is_top_lower = \
get_most_common_frames(lower_now_nearest_idxs, lower_most_common_idxs, conf_idxs)
logger.info("lower_most_common_per: %s, most_common_per: %s", lower_most_common_per, most_common_per)
if lower_most_common_per > REV_LIMIT and lower_most_common_per > most_common_per:
# # 下半身反転データも同じINDEXで、より精度が高い場合、採用
if (now_x[2] == 0 or now_x[3] == 0 or now_x[5] == 0 or now_x[6] == 0):
# 上半身がない場合、全身反転とする
now_nearest_idxs = []
for lnni in lower_now_nearest_idxs:
now_nearest_idxs.append(lnni + len(now_data))
most_common_idxs = Counter(now_nearest_idxs).most_common()
for c in range(len(conf_idxs)):
is_existed = False
for m, mci in enumerate(most_common_idxs):
if c == most_common_idxs[m][0]:
is_existed = True
break
if is_existed == False:
# 存在しないインデックスだった場合、追加
most_common_idxs.append( (c, 0) )
logger.info("*下半身→全身反転データ採用: now_nearest_idxs: %s, most_common_idxs: %s", now_nearest_idxs, most_common_idxs)
else:
now_nearest_idxs = lower_now_nearest_idxs
most_common_idxs = lower_most_common_idxs
is_lower_reverses[now_conf_idx] = True
logger.info("*下半身反転データ採用: lower_now_nearest_idxs: %s, lower_most_common_idxs: %s, is_lower_reverses: %s", lower_now_nearest_idxs, lower_most_common_idxs, is_lower_reverses)
else:
# 信頼度が最後のものはチェックしない
# 精度が高くない場合、上半身反転データチェック
logger.info("上半身反転データチェック cidx: %s, now_conf_idx: %s", cidx, now_conf_idx)
# 上半身だけ反転しているデータで比較する
now_upper_x = now_upper_x_ary[now_conf_idx]
now_upper_y = now_upper_y_ary[now_conf_idx]
now_upper_conf = now_upper_conf_ary[now_conf_idx]
upper_now_nearest_idxs, upper_most_common_idxs, is_y = calc_most_common_idxs(is_multi_sort, conf_idxs, now_upper_x, now_upper_y, now_upper_conf, past_upper_x_ary, past_upper_y_ary, past_upper_conf_ary, OPENPOSE_REVERSE_UPPER, REV_LIMIT)
sum_upper_most_common_idxs, upper_most_common_per, upper_same_frame_per, upper_top_frame, upper_second_frame, is_top_upper = \
get_most_common_frames(upper_now_nearest_idxs, upper_most_common_idxs, conf_idxs)
logger.info("upper_most_common_per: %s, most_common_per: %s", upper_most_common_per, most_common_per)
if upper_most_common_per > REV_LIMIT and upper_most_common_per > most_common_per:
# 上半身反転データも同じINDEXで、より精度が高い場合、採用
if (now_x[8] == 0 or now_x[9] == 0 or now_x[11] == 0 or now_x[12] == 0):
# 下半身がない場合、全身反転とする
now_nearest_idxs = []
for unni in upper_now_nearest_idxs:
now_nearest_idxs.append(unni + len(now_data))
most_common_idxs = Counter(now_nearest_idxs).most_common()
for c in range(len(conf_idxs)):
is_existed = False
for m, mci in enumerate(most_common_idxs):
if c == most_common_idxs[m][0]:
is_existed = True
break
if is_existed == False:
# 存在しないインデックスだった場合、追加
most_common_idxs.append( (c, 0) )
logger.info("*上半身→全身反転データ採用: now_nearest_idxs: %s, most_common_idxs: %s", now_nearest_idxs, most_common_idxs)
else:
now_nearest_idxs = upper_now_nearest_idxs
most_common_idxs = upper_most_common_idxs
is_upper_reverses[now_conf_idx] = True
logger.info("*上半身反転データ採用: upper_now_nearest_idxs: %s, upper_most_common_idxs: %s, is_upper_reverses: %s", upper_now_nearest_idxs, upper_most_common_idxs, is_upper_reverses)
else:
# logger.debug("most_common_idxs: %s, lower_most_common_idxs: %s, upper_most_common_idxs: %s", most_common_idxs, lower_most_common_idxs, upper_most_common_idxs )
# # TOP1.2で上位を占めているか
# logger.info("再検査:: same_frame_per: %s, len(now_x): %s, top: %s, second: %s, is_top: %s", same_frame_per, int(len(conf_idxs)/2), top_frame, second_frame, is_top)
# if is_top:
# logger.info("全身TOP2の最頻出同一枠のため全身採用: same_frame_per: %s, top: %s, second: %s", same_frame_per, most_common_idxs[1][0] % len(now_data), most_common_idxs[1][0] % len(now_data))
# is_upper_reverses[now_conf_idx] = False
# is_lower_reverses[now_conf_idx] = False
# else:
# 下半身反転も上半身反転もダメな場合、深度チェック
logger.info("深度データチェック cidx: %s, now_conf_idx: %s", cidx, now_conf_idx)
# 深度データは反転保持していないので、半分にする
now_depth = now_pred_ary[int(now_conf_idx % len(now_data))]
depth_now_nearest_idxs, depth_most_common_idxs = calc_depth_most_common_idxs(conf_idxs, now_depth, now_conf, past_pred, past_conf_ary, now_nearest_idxs)
sum_depth_most_common_idxs, depth_most_common_per, depth_same_frame_per, depth_top_frame, depth_second_frame, is_top_depth = \
get_most_common_frames(depth_now_nearest_idxs, depth_most_common_idxs, conf_idxs)
logger.info("depth_most_common_per: %s, most_common_per: %s", depth_most_common_per, most_common_per)
if depth_most_common_per > D_LIMIT and depth_most_common_per > most_common_per:
now_nearest_idxs = depth_now_nearest_idxs
most_common_idxs = depth_most_common_idxs
logger.info("*深度データ採用: depth_now_nearest_idxs: %s, depth_most_common_idxs: %s", depth_now_nearest_idxs, depth_most_common_idxs)
else:
# 下半身反転も上半身反転も深度推定ダメな場合、色チェック
logger.info("色データチェック cidx: %s, now_conf_idx: %s", cidx, now_conf_idx)
# 色データは反転保持していないので、半分にする
now_color = now_colors[int(now_conf_idx % len(now_data))]
color_now_nearest_idxs, color_most_common_idxs = calc_color_most_common_idxs(conf_idxs, now_color, now_conf, max_conf_color_ary, past_conf_ary, now_nearest_idxs)
sum_color_most_common_idxs = 0
for lmci_data in color_most_common_idxs:
sum_color_most_common_idxs += lmci_data[1]
sum_most_common_idxs = 0
for mci_data in most_common_idxs:
sum_most_common_idxs += mci_data[1]
color_most_common_per = 0 if sum_color_most_common_idxs == 0 else color_most_common_idxs[0][1] / sum_color_most_common_idxs
most_common_per = 0 if sum_most_common_idxs == 0 else most_common_idxs[0][1] / sum_most_common_idxs
logger.info("color_most_common_per: %s, most_common_per: %s", color_most_common_per, most_common_per)
# color_most_common_perの下限は甘め
if color_most_common_per > C_LIMIT and color_most_common_per > most_common_per:
now_nearest_idxs = color_now_nearest_idxs
most_common_idxs = color_most_common_idxs
is_upper_reverses[now_conf_idx] = False
is_lower_reverses[now_conf_idx] = False
logger.info("*色データ採用: color_now_nearest_idxs: %s, color_most_common_idxs: %s", color_now_nearest_idxs, color_most_common_idxs)
else:
# どのパターンも採用できなかった場合、採用なしで次にいく
logger.info("採用なし")
now_nearest_idxs = [0]
most_common_idxs = [(0,0)]
# # 深度データも駄目だったので、とりあえずこれまでの中でもっとも確率の高いのを採用する
# if most_common_idxs[0][0] in nearest_idxs and lower_most_common_per > most_common_per:
# now_nearest_idxs = lower_now_nearest_idxs
# most_common_idxs = lower_most_common_idxs
# is_lower_reverses[now_conf_idx] = True
# logger.info("*深度データ不採用→下半身反転データ採用: lower_now_nearest_idxs: %s, lower_most_common_idxs: %s, is_lower_reverses: %s", lower_now_nearest_idxs, lower_most_common_idxs, is_lower_reverses)
# elif most_common_idxs[0][0] in nearest_idxs and upper_most_common_per > most_common_per:
# now_nearest_idxs = upper_now_nearest_idxs
# most_common_idxs = upper_most_common_idxs
# is_upper_reverses[now_conf_idx] = True
# logger.info("*深度データ不採用→上半身反転データ採用: upper_now_nearest_idxs: %s, upper_most_common_idxs: %s, is_upper_reverses: %s", upper_now_nearest_idxs, upper_most_common_idxs, is_upper_reverses)
# else:
# logger.info("*深度データ不採用→全身データ採用: upper_now_nearest_idxs: %s, upper_most_common_idxs: %s, is_upper_reverses: %s", upper_now_nearest_idxs, upper_most_common_idxs, is_upper_reverses)
logger.debug("cidx: %s, most_common_idx: %s", cidx, most_common_idxs)
is_passed = False
# 最も多くヒットしたINDEXを処理対象とする
for cmn_idx in range(len(most_common_idxs)):
# 入れようとしているINDEXが、採用枠(前半)か不採用枠(後半)か
if now_conf_idx < len(now_data):
# 採用枠(前半)の場合
check_ary = nearest_idxs[0: len(now_data)]
else:
# 不採用枠(後半)の場合
check_ary = nearest_idxs[len(now_data): len(now_data)*2]
logger.debug("nearest_idxs: %s, most_common_idxs[cmn_idx][0]: %s, check_ary: %s", nearest_idxs, most_common_idxs[cmn_idx][0], check_ary )
is_idx_existed = False
for ca in check_ary:
logger.debug("ca: %s, ca / len(now): %s, most / len(now): %s", ca, ca % len(now_data), most_common_idxs[cmn_idx][0] % len(now_data))
if ca >= 0 and ca % len(now_data) == most_common_idxs[cmn_idx][0] % len(now_data):
# 同じ枠に既に同じINDEXの候補が居る場合、TRUE
is_idx_existed = True
break
if most_common_idxs[cmn_idx][0] in nearest_idxs or is_idx_existed:
# 同じINDEXが既にリストにある場合
# もしくは入れようとしているINDEXが反対枠の同じ並び順にいるか否か
# logger.info("次点繰り上げ cmn_idx:%s, val: %s, nearest_idxs: %s", cmn_idx, most_common_idxs[cmn_idx][0], nearest_idxs)
# continue
logger.info("既出スキップ cmn_idx:%s, val: %s, nearest_idxs: %s", cmn_idx, most_common_idxs[cmn_idx][0], nearest_idxs)
# 既出の場合、これ以上チェックできないので、次にいく
cidx -= 1
break
elif most_common_idxs[cmn_idx][1] > 0:
# 同じINDEXがリストにまだない場合
logger.info("採用 cmn_idx:%s, val: %s, nearest_idxs: %s", cmn_idx, most_common_idxs[cmn_idx][0], nearest_idxs)
# 採用の場合、cidx減算
is_passed = True
cidx -= 1
break
else:
logger.info("再頻出ゼロ cmn_idx:%s, val: %s, nearest_idxs: %s", cmn_idx, most_common_idxs[cmn_idx][0], nearest_idxs)
# 最頻出がない場合、これ以上チェックできないので、次にいく
cidx -= 1
break
logger.info("結果: near: %s, cmn_idx: %s, val: %s, most_common_idxs: %s", now_conf_idx, cmn_idx, most_common_idxs[cmn_idx][0], most_common_idxs)
if is_passed:
# 信頼度の高いINDEXに該当する最多ヒットINDEXを設定
nearest_idxs[now_conf_idx] = most_common_idxs[cmn_idx][0]
# 現在のループ回数は必ず加算
cidxcnt += 1
logger.debug("now_conf_idx: %s, cidx: %s, cidxcnt: %s, nearest_idxs: %s ---------------------", now_conf_idx, cidx, cidxcnt, nearest_idxs)
logger.debug("nearest_idxs: %s", nearest_idxs)
if -1 in nearest_idxs:
# 不採用になって判定できなかったデータがある場合
for _nidx, _nval in enumerate(nearest_idxs):
if _nval == -1:
# 該当値が-1(判定不可)の場合
for _cidx in range(len(conf_idxs)):
logger.debug("_nidx: %s, _nval: %s, _cidx: %s, _cidx not in nearest_idxs: %s", _nidx, _nval, _cidx, _cidx not in nearest_idxs)
# INDEXを頭から順に見ていく(正0, 正1 ... 正n, 逆0, 逆1 ... 逆n)
if _cidx not in nearest_idxs:
# 入れようとしているINDEXが、採用枠(前半)か不採用枠(後半)か
if now_conf_idx < len(now_data):
# 採用枠(前半)の場合
check_ary = nearest_idxs[len(now_data): len(now_data)*2]
else:
# 不採用枠(後半)の場合
check_ary = nearest_idxs[0: len(now_data)]
logger.debug("nearest_idxs: %s, _cidx: %s, check_ary: %s", nearest_idxs, _cidx, check_ary )
is_idx_existed = False
for ca in check_ary:
logger.debug("ca: %s, ca / len(now): %s, _cidx / len(now): %s", ca, ca % len(now_data), _cidx % len(now_data))
if ca >= 0 and ca % len(now_data) == _cidx % len(now_data):
# 同じ枠に既に同じINDEXの候補が居る場合、TRUE
is_idx_existed = True
break
if is_idx_existed == False:
# 該当INDEXがリストに無い場合、設定
nearest_idxs[_nidx] = _cidx
break
logger.debug("is_upper_reverses: %s, is_lower_reverses: %s", is_upper_reverses, is_lower_reverses)
logger.debug("past_sorted_idxs: %s nearest_idxs(retake): %s", past_sorted_idxs, nearest_idxs)
# 最終的に人数分だけ残したINDEXリスト
result_nearest_idxs = [-1 for x in range(len(now_data))]
result_is_all_reverses = [False for x in range(len(now_data))]
result_is_upper_reverses = [False for x in range(len(now_data))]
result_is_lower_reverses = [False for x in range(len(now_data))]
for _ridx in range(len(now_data)):
# # 反転の可能性があるので、人数で割った余りを設定する
sidx = int(nearest_idxs[_ridx] % len(now_data))
if _ridx < len(now_data):
# 自分より前に、自分と同じINDEXが居る場合、次のINDEXを引っ張り出す
s = 1
while sidx in result_nearest_idxs[0:_ridx+1]:
newsidx = int(nearest_idxs[_ridx+s] % len(now_data))
logger.info("INDEX重複のため、次点繰り上げ: %s, sidx: %s, newsidx: %s", _ridx, sidx, newsidx)
sidx = newsidx
s += 1
result_nearest_idxs[_ridx] = sidx
result_is_upper_reverses[sidx] = is_upper_reverses[_ridx]
result_is_lower_reverses[sidx] = is_lower_reverses[_ridx]
idx_target = OPENPOSE_NORMAL
if result_is_upper_reverses[sidx] and result_is_lower_reverses[sidx]:
# 全身反転
idx_target = OPENPOSE_REVERSE_ALL
elif result_is_upper_reverses[sidx] and result_is_lower_reverses[sidx] == False:
# 反転している場合、反転INDEX(上半身)
idx_target = OPENPOSE_REVERSE_UPPER
elif result_is_upper_reverses[sidx] == False and result_is_lower_reverses[sidx]:
# 反転している場合、反転INDEX(下半身)
idx_target = OPENPOSE_REVERSE_LOWER
# 上下の左右が合っているか
if is_match_left_right(now_x, idx_target) == False:
# 上下の左右があってない場合、とりあえず反転クリア
result_is_upper_reverses[sidx] = False
result_is_lower_reverses[sidx] = False
result_is_all_reverses[sidx] = True if nearest_idxs[_ridx] >= len(now_data) and is_upper_reverses[_ridx] == False and is_lower_reverses[_ridx] == False else False
logger.info("result_nearest_idxs: %s, all: %s, upper: %s, lower: %s", result_nearest_idxs, result_is_all_reverses, result_is_upper_reverses, result_is_lower_reverses)
return result_nearest_idxs, result_is_all_reverses, result_is_upper_reverses, result_is_lower_reverses
# 上半身と下半身で左右の方向が合っているか
def is_match_left_right(now_x, idx_target):
shoulder = now_x[idx_target[2]] > 0 and now_x[idx_target[5]] > 0 and (now_x[idx_target[2]] - now_x[idx_target[5]]) < 0
elbow = now_x[idx_target[3]] > 0 and now_x[idx_target[6]] > 0 and (now_x[idx_target[3]] - now_x[idx_target[6]]) < 0
hip = now_x[idx_target[8]] > 0 and now_x[idx_target[11]] > 0 and (now_x[idx_target[8]] - now_x[idx_target[11]]) < 0
knee = now_x[idx_target[9]] > 0 and now_x[idx_target[12]] > 0 and (now_x[idx_target[9]] - now_x[idx_target[12]]) < 0
if shoulder == elbow == hip == knee:
logger.debug("方向統一: shoulder: %s, elbow: %s, hip: %s, knee: %s, x: %s", shoulder, elbow, hip, knee, now_x)
else:
if shoulder == elbow and shoulder != hip and hip == knee:
logger.debug("上下で方向ずれあり: shoulder: %s, elbow: %s, hip: %s, knee: %s, x: %s", shoulder, elbow, hip, knee, now_x)
return False
else:
logger.debug("上下バラバラ: shoulder: %s, elbow: %s, hip: %s, knee: %s, x: %s", shoulder, elbow, hip, knee, now_x)
# 明示的なずれでなければ、とりあえずTRUE
return True
# 過去データと現在データを比較して、頻出インデックス算出
def calc_most_common_idxs(is_multi_sort, conf_idxs, now_x, now_y, now_confs, past_x_ary, past_y_ary, past_conf_ary, idx_target, limit_th):
# 過去データの当該関節で、現在データと最も近いINDEXのリストを生成
now_nearest_idxs = []
most_common_idxs = []
th = 0.3
# X方向の頻出インデックス(th高めで過去データを含まない) ------------------
now_nearest_idxs, most_common_idxs = \
calc_one_dimensional_most_common_idxs("x", is_multi_sort, conf_idxs, now_x, now_confs, past_x_ary, past_conf_ary, now_nearest_idxs, idx_target, 0.3)
sum_most_common_idxs, most_common_per, same_frame_per, top_frame, second_frame, is_top = \
get_most_common_frames(now_nearest_idxs, most_common_idxs, conf_idxs)
if (most_common_per > limit_th or same_frame_per > limit_th) and len(now_nearest_idxs) >= len(now_x) * 0.2:
# 一定件数以上で、上限を満たしている場合、結果を返す
return now_nearest_idxs, most_common_idxs, False
# X方向の頻出インデックス(th低めで過去データを含む) ------------------
now_nearest_idxs, most_common_idxs = \
calc_one_dimensional_most_common_idxs("x", is_multi_sort, conf_idxs, now_x, now_confs, past_x_ary, past_conf_ary, now_nearest_idxs, idx_target, 0.0)
sum_most_common_idxs, most_common_per, same_frame_per, top_frame, second_frame, is_top = \
get_most_common_frames(now_nearest_idxs, most_common_idxs, conf_idxs)
if (most_common_per > limit_th or same_frame_per > limit_th) and len(now_nearest_idxs) >= len(now_x) * 0.2:
# 一定件数以上で、上限を満たしている場合、結果を返す
return now_nearest_idxs, most_common_idxs, False
# Y方向の頻出インデックス(th高めで過去データを含まない) ------------------
now_nearest_idxs, most_common_idxs = \
calc_one_dimensional_most_common_idxs("y", is_multi_sort, conf_idxs, now_y, now_confs, past_y_ary, past_conf_ary, now_nearest_idxs, idx_target, 0.3)
if (most_common_per > limit_th or same_frame_per > limit_th) and len(now_nearest_idxs) >= len(now_y) * 0.2:
# 一定件数以上で、上限を満たしている場合、結果を返す
return now_nearest_idxs, most_common_idxs, True
# Y方向の頻出インデックス(th低めで過去データを含む) ------------------
now_nearest_idxs, most_common_idxs = \
calc_one_dimensional_most_common_idxs("y", is_multi_sort, conf_idxs, now_y, now_confs, past_y_ary, past_conf_ary, now_nearest_idxs, idx_target, 0.0)
return now_nearest_idxs, most_common_idxs, True
# 一方向だけの頻出インデックス算出
def calc_one_dimensional_most_common_idxs(dimensional, is_multi_sort, conf_idxs, now_datas, now_confs, past_datas, past_confs, now_nearest_idxs, idx_target, th):
logger.debug("calc_one_dimensional_most_common_idxs: %s, th=%s", dimensional, th)
# この前の頻出は引き継がない
now_nearest_idxs = []
# 過去データの当該関節で、現在データと最も近いINDEXのリストを生成
most_common_idxs = []
# 判定対象は全身
TARGET_IDX = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
if is_multi_sort == True:
# 複数人数トレースの場合、体幹中心にソートする
TARGET_IDX = [1,2,3,5,6,8,9,10,11,12,13,1,1,1]
# 位置データ(+体幹)
for _idx in TARGET_IDX:
one_data = now_datas[_idx]
past_person = []
zero_cnt = 0
for p, c in zip(past_datas, past_confs):
# logger.debug("p: %s, c: %s", p, c)
if _idx < len(p):
if c[idx_target[_idx]] > th:
# 信頼度が一定以上の場合、判定対象
past_person.append(p[idx_target[_idx]])
else:
past_person.append(0)
if past_person[-1] == 0:
zero_cnt += 1
if len(past_person) > 0 and len(past_person) > zero_cnt and one_data > 0 and now_confs[_idx] > th:
logger.debug("_idx: %s, %s: %s, one_data %s", _idx, dimensional, past_person, one_data)
now_nearest_idxs.append(get_nearest_idx(past_person, one_data))
else:
logger.debug("×対象外 _idx: %s, %s: %s, one_data %s", _idx, dimensional, past_person, one_data)
pass
if len(now_nearest_idxs) > 0:
most_common_idxs = Counter(now_nearest_idxs).most_common()
if is_multi_sort == True:
# 複数人数トレースの場合、全体の中心もチェックする
past_persons_avg = []
for p, c in zip(past_datas, past_confs):
p_sum = 0
p_cnt = 0
for _idx in TARGET_IDX:
if _idx < len(p):
if c[idx_target[_idx]] > th:
# 信頼度が一定以上の場合、判定対象
p_sum += p[idx_target[_idx]]
p_cnt += 1
# 平均値を求める
if p_cnt > 0:
past_persons_avg.append(p_sum / p_cnt)
else:
past_persons_avg.append(0)
if past_persons_avg[-1] == 0:
zero_cnt += 1
now_avg = 0
n_sum = 0
n_cnt = 0
for _idx in TARGET_IDX:
if now_confs[_idx] > th:
# 信頼度が一定以上の場合、判定対象
n_sum += now_datas[_idx]
n_cnt += 1
# 平均値を求める
if n_cnt > 0:
now_avg = n_sum / n_cnt
# TOPの枠
top_frame = -1 if len(most_common_idxs) <= 0 else most_common_idxs[0][0] % int(len(conf_idxs)/2)
# 多めに求める
for cnt in range(3):
if len(past_persons_avg) > 0 and len(past_persons_avg) > zero_cnt and now_avg > 0 and now_confs[_idx] > th:
logger.debug("avg _idx: %s, %s: %s, one_data %s", _idx, dimensional, past_persons_avg, now_avg)
avg_nearest_idx = get_nearest_idx(past_persons_avg, now_avg)
# 現在の枠
now_frame = avg_nearest_idx % int(len(conf_idxs)/2)
if top_frame == now_frame:
# TOPの枠と、現在の枠が同じ場合、TOPの枠を設定する
now_nearest_idxs.append(most_common_idxs[0][0])
else:
now_nearest_idxs.append(avg_nearest_idx)
else:
logger.debug("×avg対象外 _idx: %s, %s: %s, one_data %s", _idx, dimensional, past_persons_avg, now_avg)
pass
if len(now_nearest_idxs) > 0:
most_common_idxs = Counter(now_nearest_idxs).most_common()
# 頻出で振り分けた後、件数が足りない場合(全部どれか1つに寄せられている場合)
if len(most_common_idxs) < len(conf_idxs):
# logger.debug("頻出カウント不足: len(most_common_idxs): %s, len(conf_idxs): %s ", len(most_common_idxs), len(conf_idxs))
for c in range(len(conf_idxs)):
is_existed = False
for m, mci in enumerate(most_common_idxs):
if c == most_common_idxs[m][0]:
is_existed = True
break
if is_existed == False:
# 存在しないインデックスだった場合、追加
most_common_idxs.append( (c, 0) )
logger.debug("%s:: len(most_common_idxs): %s, len(conf_idxs): %s, len(now_nearest_idxs): %s, dimensional,len(now_datas): %s", dimensional, len(most_common_idxs), len(conf_idxs), len(now_nearest_idxs), len(now_datas))
logger.debug("%s:: now_nearest_idxs: %s, most_common_idxs: %s", dimensional, now_nearest_idxs, most_common_idxs)
return now_nearest_idxs, most_common_idxs
def get_most_common_frames(now_nearest_idxs, most_common_idxs, conf_idxs):
top_frame = most_common_idxs[0][0] % int(len(conf_idxs)/2)
# 同じ枠と合わせた割合を計算する
sum_most_common_idxs = 0
same_frames_most_common_idxs = 0
for smidx in range(len(most_common_idxs)):
now_frame = most_common_idxs[smidx][0] % int(len(conf_idxs)/2)
if top_frame == now_frame:
same_frames_most_common_idxs += most_common_idxs[smidx][1]
sum_most_common_idxs += most_common_idxs[smidx][1]
logger.debug("sum_most_common_idxs: %s, same_frames_most_common_idxs: %s", sum_most_common_idxs, same_frames_most_common_idxs)
# 同じ枠の割合
same_frame_per = 0 if sum_most_common_idxs == 0 else same_frames_most_common_idxs / sum_most_common_idxs
# 同じ枠と合わせた割合を計算する
smidx = 1
while smidx < len(most_common_idxs):
# logger.debug("smidx: %s, most_common_idxs[1][1]: %s, most_common_idxs[smidx][1]: %s", smidx, most_common_idxs[1][1], most_common_idxs[smidx][1])
if most_common_idxs[1][1] == most_common_idxs[smidx][1]:
# 2位と3位以下が同率の場合
second_frame = most_common_idxs[1][0] % int(len(conf_idxs)/2)
third_frame = most_common_idxs[smidx][0] % int(len(conf_idxs)/2)
# 1位と同じ枠を採用
second_frame = third_frame if top_frame == third_frame else second_frame
# logger.debug("smidx: %s, top_frame: %s, second_frame: %s, third_frame: %s, most_common_idxs: %s", smidx, top_frame, second_frame, third_frame, most_common_idxs)
smidx += 1
else:
second_frame = most_common_idxs[1][0] % int(len(conf_idxs)/2)
break
most_common_per = 0 if sum_most_common_idxs == 0 else (most_common_idxs[0][1]) / sum_most_common_idxs
logger.debug("top_frame: %s, second_frame: %s, sum_most_common_idxs: %s, most_common_per: %s", top_frame, second_frame, sum_most_common_idxs, most_common_per)
# TOP1だけで7割か、同じ枠で7.3割か
is_top = most_common_idxs[0][1] > 0.7 or same_frame_per > 0.73
logger.debug("same_frame_per: %s, len(now_datas): %s, top: %s, second: %s, is_top: %s", same_frame_per, int(len(conf_idxs)/2), top_frame, second_frame, is_top)
return sum_most_common_idxs, most_common_per, same_frame_per, top_frame, second_frame, is_top
# 色差データで人物判定
def calc_color_most_common_idxs(conf_idxs, now_clr, now_conf, past_color_ary, past_conf_ary, now_nearest_idxs):
# XYの頻出は引き継がない
now_nearest_idxs = []
most_common_idxs = []
th = 0.1
# 色データ(全身)
for c_idx in [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]:
if c_idx < len(now_clr):
c_data = now_clr[c_idx]
past_colors = []
for p in past_color_ary:
if c_idx < len(p):
# logger.debug("c_idx: %s, p[c_idx]: %s, c_data: %s", c_idx, p[c_idx], c_data)
past_colors.append(p[c_idx])
# 今回データがないものはチェック対象外
if len(past_colors) > 0 and (c_data > 0).all():
logger.debug("_idx: %s, c: %s, one_data %s", c_idx, past_colors, c_data)
now_nearest_idxs.append(get_nearest_idx_ary(past_colors, c_data))
else:
logger.debug("past_colors対象外: %s, c_data %s", past_colors, c_data)
if len(now_nearest_idxs) > 0:
most_common_idxs = Counter(now_nearest_idxs).most_common()
logger.debug("c:: now_nearest_idxs: %s, most_common_idxs: %s, ", now_nearest_idxs, most_common_idxs)
# 頻出で振り分けた後、件数が足りない場合(全部どれか1つに寄せられている場合)
if len(most_common_idxs) < len(conf_idxs):
# logger.debug("頻出カウント不足: len(most_common_idxs): %s, len(conf_idxs): %s ", len(most_common_idxs), len(conf_idxs))
for c in range(len(conf_idxs)):
is_existed = False
for m, mci in enumerate(most_common_idxs):
if c == most_common_idxs[m][0]:
is_existed = True
break
if is_existed == False:
# 存在しないインデックスだった場合、追加
most_common_idxs.append( (c, 0) )
return now_nearest_idxs, most_common_idxs
# 深度データで人物判定
def calc_depth_most_common_idxs(conf_idxs, now_depth, now_conf, past_depth_ary, past_conf_ary, now_nearest_idxs):
# XYの頻出は引き継がない
now_nearest_idxs = []
most_common_idxs = []
th = 0.1
# 深度データ(末端除く)
for d_idx in [0,1,2,3,5,6,8,9,11,12,14,15,16,17]:
if d_idx < len(now_depth):
d_data = now_depth[d_idx]
past_depths = []
for p in past_depth_ary:
if d_idx < len(p):
# logger.debug("d_idx: %s, p[d_idx]: %s, c[d_idx]: %s", d_idx, p[d_idx], c[d_idx])
past_depths.append(p[d_idx])
# 今回データがないものはチェック対象外
if len(past_depths) > 0 and 0 not in past_depths and d_data > 0:
logger.debug("past_depths: %s, d_data %s", past_depths, d_data)
now_nearest_idxs.append(get_nearest_idx(past_depths, d_data))
else:
logger.debug("past_depths対象外: %s, d_data %s", past_depths, d_data)
if len(now_nearest_idxs) > 0:
most_common_idxs = Counter(now_nearest_idxs).most_common()
logger.debug("d:: now_nearest_idxs: %s, most_common_idxs: %s, ", now_nearest_idxs, most_common_idxs)
# logger.debug("past_depth_ary: %s", past_depth_ary)
# past_depths = []
# for p in past_depth_ary:
# past_sum_depths = []
# logger.debug("now_depth: %s", now_depth)
# for d_idx in range(len(now_depth)):
# logger.debug("d_idx: %s", d_idx)
# past_sum_depths.append(p[d_idx])
# logger.debug("past_sum_depths: %s", past_sum_depths)
# # 重み付けした平均値を求める
# past_depths.append(np.average(np.array(past_sum_depths), weights=[0.1,0.8,0.5,0.3,0.1,0.5,0.3,0.1,0.8,0.3,0.1,0.8,0.3,0.1,0.1,0.1,0.1,0.1]))
# # 今回データがないものはチェック対象外
# # if len(past_depths) > 0 and 0 not in past_depths and d_data > 0 and now_conf[d_idx] > th:
# # logger.debug("[limbs] past_depths: %s, d_data %s", past_depths, d_data)
# # now_nearest_idxs.append(get_nearest_idx(past_depths, d_data))
# if len(now_nearest_idxs) > 0:
# most_common_idxs = Counter(now_nearest_idxs).most_common()
# logger.debug("d:: now_nearest_idxs: %s, most_common_idxs: %s", now_nearest_idxs, most_common_idxs)
# 頻出で振り分けた後、件数が足りない場合(全部どれか1つに寄せられている場合)
if len(most_common_idxs) < len(conf_idxs):
# logger.debug("頻出カウント不足: len(most_common_idxs): %s, len(conf_idxs): %s ", len(most_common_idxs), len(conf_idxs))
for c in range(len(conf_idxs)):
is_existed = False
for m, mci in enumerate(most_common_idxs):
if c == most_common_idxs[m][0]:
is_existed = True
break
if is_existed == False:
# 存在しないインデックスだった場合、追加
most_common_idxs.append( (c, 0) )
return now_nearest_idxs, most_common_idxs
def get_nearest_idx(target_list, num):
"""
概要: リストからある値に最も近い値のINDEXを返却する関数
@param target_list: データ配列
@param num: 対象値
@return 対象値に最も近い値のINDEX
"""
# logger.debug(target_list)
# logger.debug(num)
# リスト要素と対象値の差分を計算し最小値のインデックスを取得
idx = np.abs(np.asarray(target_list) - num).argmin()
return idx
def get_nearest_idx_ary(target_list, num_ary):
"""
概要: リストからある値に最も近い値のINDEXを返却する関数
@param target_list: データ配列
@param num: 対象値
@return 対象値に最も近い値のINDEX
"""
# logger.debug(target_list)
# logger.debug(num)
target_list2 = []
for t in target_list:
# 現在との色の差を絶対値で求めて、10の位で四捨五入する
target_list2.append(np.round(np.abs(t - num_ary), decimals=-1))
# logger.debug("num_ary: %s", num_ary)
# logger.debug("target_list: %s", target_list)
# logger.debug("target_list2: %s", target_list2)
# リスト要素と対象値の差分を計算し最小値のインデックスを取得
idxs = np.asarray(target_list2).argmin(axis=0)
# logger.debug("np.asarray(target_list2).argmin(axis=0): %s", idxs)
idx = np.argmax(np.bincount(idxs))
# logger.debug("np.argmax(np.bincount(idxs)): %s", idx)
return idx | [
"matplotlib.pyplot.savefig",
"numpy.abs",
"matplotlib.pyplot.clf",
"os.path.basename",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"os.path.dirname",
"imageio.imread",
"numpy.asarray",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.colorbar",
"numpy.argsort",
"matplotlib.pyplot.c... | [((964, 998), 'os.path.join', 'os.path.join', (['json_path', 'file_name'], {}), '(json_path, file_name)\n', (976, 998), False, 'import os\n'), ((49125, 49149), 'numpy.argsort', 'np.argsort', (['avg_conf_ary'], {}), '(avg_conf_ary)\n', (49135, 49149), True, 'import numpy as np\n'), ((225, 253), 'logging.getLogger', 'logging.getLogger', (['"""message"""'], {}), "('message')\n", (242, 253), False, 'import logging\n'), ((282, 311), 'logging.getLogger', 'logging.getLogger', (['"""__main__"""'], {}), "('__main__')\n", (299, 311), False, 'import logging\n'), ((23552, 23561), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (23559, 23561), True, 'from matplotlib import pyplot as plt\n'), ((23570, 23579), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (23577, 23579), True, 'from matplotlib import pyplot as plt\n'), ((23593, 23657), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pred_multi_frame_ary[_iidx]'], {'interpolation': '"""nearest"""'}), "(pred_multi_frame_ary[_iidx], interpolation='nearest')\n", (23603, 23657), True, 'from matplotlib import pyplot as plt\n'), ((23666, 23682), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ii'], {}), '(ii)\n', (23678, 23682), True, 'from matplotlib import pyplot as plt\n'), ((24149, 24170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plotName'], {}), '(plotName)\n', (24160, 24170), True, 'from matplotlib import pyplot as plt\n'), ((24404, 24415), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24413, 24415), True, 'from matplotlib import pyplot as plt\n'), ((86495, 86512), 'numpy.bincount', 'np.bincount', (['idxs'], {}), '(idxs)\n', (86506, 86512), True, 'import numpy as np\n'), ((21556, 21582), 'os.path.dirname', 'os.path.dirname', (['json_path'], {}), '(json_path)\n', (21571, 21582), False, 'import os\n'), ((21584, 21611), 'os.path.basename', 'os.path.basename', (['json_path'], {}), '(json_path)\n', (21600, 21611), False, 'import os\n'), ((21661, 21686), 'os.path.dirname', 'os.path.dirname', (['idx_path'], {}), '(idx_path)\n', (21676, 21686), False, 'import os\n'), ((24246, 24270), 'imageio.imread', 'imageio.imread', (['plotName'], {}), '(plotName)\n', (24260, 24270), False, 'import imageio\n'), ((45003, 45023), 'numpy.array', 'np.array', (['past_x_ary'], {}), '(past_x_ary)\n', (45011, 45023), True, 'import numpy as np\n'), ((48568, 48587), 'numpy.array', 'np.array', (['now_x_ary'], {}), '(now_x_ary)\n', (48576, 48587), True, 'import numpy as np\n'), ((86362, 86386), 'numpy.asarray', 'np.asarray', (['target_list2'], {}), '(target_list2)\n', (86372, 86386), True, 'import numpy as np\n'), ((21971, 21997), 'os.path.dirname', 'os.path.dirname', (['json_path'], {}), '(json_path)\n', (21986, 21997), False, 'import os\n'), ((21999, 22026), 'os.path.basename', 'os.path.basename', (['json_path'], {}), '(json_path)\n', (22015, 22026), False, 'import os\n'), ((22737, 22763), 'os.path.dirname', 'os.path.dirname', (['json_path'], {}), '(json_path)\n', (22752, 22763), False, 'import os\n'), ((22765, 22792), 'os.path.basename', 'os.path.basename', (['json_path'], {}), '(json_path)\n', (22781, 22792), False, 'import os\n'), ((24008, 24075), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pred_joint[0]', 'pred_joint[1]'], {'s': '(5)', 'c': 'DEPTH_COLOR[pidx]'}), '(pred_joint[0], pred_joint[1], s=5, c=DEPTH_COLOR[pidx])\n', (24019, 24075), True, 'from matplotlib import pyplot as plt\n'), ((24369, 24393), 'imageio.imread', 'imageio.imread', (['plotName'], {}), '(plotName)\n', (24383, 24393), False, 'import imageio\n'), ((38960, 38985), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (38967, 38985), False, 'from collections import Counter\n'), ((48982, 48995), 'numpy.array', 'np.array', (['con'], {}), '(con)\n', (48990, 48995), True, 'import numpy as np\n'), ((75226, 75251), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (75233, 75251), False, 'from collections import Counter\n'), ((77294, 77319), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (77301, 77319), False, 'from collections import Counter\n'), ((81683, 81708), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (81690, 81708), False, 'from collections import Counter\n'), ((83532, 83557), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (83539, 83557), False, 'from collections import Counter\n'), ((86131, 86150), 'numpy.abs', 'np.abs', (['(t - num_ary)'], {}), '(t - num_ary)\n', (86137, 86150), True, 'import numpy as np\n'), ((26382, 26407), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (26389, 26407), False, 'from collections import Counter\n'), ((43403, 43422), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (43411, 43422), True, 'import numpy as np\n'), ((46954, 46973), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (46962, 46973), True, 'import numpy as np\n'), ((85716, 85739), 'numpy.asarray', 'np.asarray', (['target_list'], {}), '(target_list)\n', (85726, 85739), True, 'import numpy as np\n'), ((53989, 54014), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (53996, 54014), False, 'from collections import Counter\n'), ((56594, 56619), 'collections.Counter', 'Counter', (['now_nearest_idxs'], {}), '(now_nearest_idxs)\n', (56601, 56619), False, 'from collections import Counter\n')] |
# Copyright <NAME> 2013.
"""Decoding (inference) algorithms."""
import numpy as np
import pyximport
pyximport.install(setup_args={'include_dirs': np.get_include()})
from .bestfirst import bestfirst
from .viterbi import viterbi
DECODERS = {"bestfirst": bestfirst,
"viterbi": viterbi}
| [
"numpy.get_include"
] | [((148, 164), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (162, 164), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function, absolute_import
import itertools
from decimal import Decimal
from copy import copy
import numpy as np
from .TreeNode import TreeNode
from .TreeStyle import TreeStyle, COLORS2
from .StyleChecker import StyleChecker
from .Coords import Coords
from .TreeParser import TreeParser, FastTreeParser
from .TreeWriter import NewickWriter
from .Treemod import TreeMod
from .PCM import PCM
from .Rooter import Rooter
from .NodeAssist import NodeAssist
from .utils import ToytreeError, fuzzy_match_tipnames, normalize_values
from .Render import ToytreeMark
from .CanvasSetup import CanvasSetup
"""
Test for speed improvements:
- reduce deepcopies
- reduce traversals.
"""
class ToyTree(object):
"""
Toytree class object.
Parameters:
-----------
newick: (str, file, URL, or ToyTree)
A newick or nexus formatted string, or file handle or URL of file
containing correctly formatted string. A toytree can also be reloaded
from another toytree object.
tree_format: int
Format of the newick tree structure to be parsed.
Attributes:
-----------
...
Functions:
----------
...
"""
def __init__(self, newick=None, tree_format=0, **kwargs):
# if loading from a Toytree then inherit that trees draw style
inherit_style = False
# load from a TreeNode and detach. Must have .idx attributes on nodes.
if isinstance(newick, TreeNode):
self.treenode = newick.detach()
# load TreeNode from a ToyTree (user should just use .copy())
elif isinstance(newick, ToyTree):
self.treenode = newick.treenode
inherit_style = True
# parse a str, URL, or file
elif isinstance(newick, (str, bytes)):
self.treenode = TreeParser(newick, tree_format).treenodes[0]
# make an empty tree
else:
self.treenode = TreeNode()
# init dimensions and cache to be filled during coords update
self.nnodes = 0
self.ntips = 0
self.idx_dict = {}
# set tips order if fixing for multi-tree plotting (default None)
# self._fixed_order = None
# self._fixed_idx = list(range(self.ntips))
# if fixed_order:
# if not isinstance(fixed_order, (list, tuple)):
# raise ToytreeError("fixed_order arg should be a list")
# self._set_fixed_order(fixed_order)
# ladderize the tree unless user fixed order and wants it not.
# if not self._fixed_order:
self.treenode.ladderize()
# Object for storing default plot settings or saved styles.
# Calls several update functions when self.draw() to fit canvas.
if inherit_style:
self.style = newick.style
else:
self.style = TreeStyle(tree_style='n')
# Object for plot coordinates. Calls .update() whenever tree modified.
self._coords = Coords(self)
self._coords.update()
# if not kwargs.get("copy"):
# Object for modifying trees beyond root, prune, drop
self.mod = TreeMod(self)
self.pcm = PCM(self)
# --------------------------------------------------------------------
# Class definitions
# --------------------------------------------------------------------
# ... could add __repr__, __iter__, __next__, but .tree has most already
def __str__(self):
""" return ascii tree ... (not sure whether to keep this) """
return self.treenode.__str__()
def __len__(self):
""" return len of Tree (ntips) """
return len(self.treenode)
# def _set_fixed_order(self, fixed_order):
# """
# Setting fixed_idx is important for when nodes are rotated, and edges
# are different lengths, b/c it allows updating coords to match up.
# """
# if fixed_order:
# if set(fixed_order) != set(self.treenode.get_leaf_names()):
# raise ToytreeError(
# "fixed_order must include same tipnames as tree")
# self._fixed_order = fixed_order
# names = self.treenode.get_leaf_names()[::-1]
# self._fixed_idx = [names.index(i) for i in self._fixed_order]
# --------------------------------------------------------------------
# properties are not changeable by the user
# --------------------------------------------------------------------
@property
def features(self):
feats = set()
for node in self.treenode.traverse():
feats.update(node.features)
return feats
# @property
# def nnodes(self):
# "The total number of nodes in the tree including tips and root."
# return self._nnodes
# # return sum(1 for i in self.treenode.traverse())
# @property
# def ntips(self):
# "The number of tip nodes in the tree."
# return self._ntips
# # return sum(1 for i in self.treenode.get_leaves())
@property
def newick(self, tree_format=0):
"Returns newick represenation of the tree in its current state."
# checks one of root's children for features and extra feats.
if self.treenode.children:
features = {"name", "dist", "support", "height", "idx"}
testnode = self.treenode.children[0]
extrafeat = {i for i in testnode.features if i not in features}
features.update(extrafeat)
return self.treenode.write(format=tree_format)
# --------------------------------------------------------------------
# functions to return values from the ete3 .treenode object ----------
# --------------------------------------------------------------------
def write(self, handle=None, tree_format=0, features=None, dist_formatter=None):
"""
Write newick string representation of the tree.
Parameters:
-----------
handle (str):
A string file name to write output to. If None then newick is
returned as a string.
tree_format (int):
Format of the newick string. See ete3 tree formats. Default=0.
features (list, set, or tuple):
Features of treenodes that should be written to the newick string
in NHX format. Examples include "height", "idx", or other features
you may have saved to treenodes.
"""
if self.treenode.children:
# features = {"name", "dist", "support", "height", "idx"}
# testnode = self.treenode.children[0]
# extrafeat = {i for i in testnode.features if i not in features}
# features.update(extrafeat)
# get newick string
writer = NewickWriter(
treenode=self.treenode,
tree_format=tree_format,
features=features,
dist_formatter=dist_formatter,
)
newick = writer.write_newick()
# write to file or return as string
if handle:
with open(handle, 'w') as out:
out.write(newick)
else:
return newick
def get_edges(self):
"""
Returns an array with paired edges (parent, child).
"""
return self._coords.edges
# def get_edge_lengths(self):
# """
# Returns edge length values from tree object in node plot order. To
# modify edge length values you must modify nodes in the .treenode object
# directly.
# """
# return self.get_node_values('dist', True, True)
def get_edge_values(self, feature='idx', normalize=False):
"""
Returns edge values in the order they are plotted (see .get_edges())
Parameters:
-----------
feature (str):
The node feature to return for each edge, e.g., idx, dist, Ne.
normalize (bool):
This will normalize the values to be binned within a range that
makes it easier to visualize when plotted as node sizes or edge
widths. In the range(2, 12) typically.
"""
elist = []
for eidx in self._coords.edges[:, 1]:
node = self.idx_dict[eidx]
elist.append(
(getattr(node, feature) if hasattr(node, feature) else "")
)
elist = np.array(elist)
if normalize:
elist = normalize_values(elist)
return elist
def get_edge_values_mapped(self, node_mapping=None, include_stem=True):
"""
Enter a dictionary mapping node 'idx' or tuple of tipnames to values
that you want mapped to the stem and descendant edges that node.
Edge values are returned in proper plot order to be entered to the
edge_colors or edge_widths arguments to draw(). To see node idx values
use node_labels=True in draw(). If dictionary keys are integers it is
assumed they are node idxs.
Note: it is safer to use tip labels to identify clades than node idxs
since tree tranformations (e.g., rooting) can change the mapping of
idx values to nodes on the tree.
This function is most convenient for applying values to clades. To
instead map values to specific edges (e.g., a single internal edge)
it will be easier to use tre.get_edge_values() and then to set the
values of the internal edges manually.
Example 1:
tre = toytree.tree("((a,b),(c,d));")
tre.get_edge_values_mapped({5: 'green', 6: 'red'})
# ['green', 'green', 'green', 'red', 'red', 'red']
Example 2:
tre = toytree.tree("((a,b),(c,d));")
tre.get_edge_values_mapped({(a, b): 'green', (c, d): 'red'})
# ['green', 'green', 'green', 'red', 'red', 'red']
Example 3:
tre = toytree.tree("((a,b),(c,d));")
tre.get_edge_values_mapped({10, 13})
# ['green', 'green', 'green', 'red', 'red', 'red']
"""
values = [None] * self._coords.edges.shape[0]
if node_mapping is None:
return values
if isinstance(node_mapping, set):
cols = iter(COLORS2)
node_mapping = {i: next(cols) for i in node_mapping}
# build ...
rmap = {}
for key in node_mapping:
# if it is a node idx
if isinstance(key, int):
rmap[key] = node_mapping[key]
else:
ns = NodeAssist(self, key, None, None)
kidx = ns.get_mrca().idx
rmap[kidx] = node_mapping[key]
# ....
for idx in self.idx_dict:
node = self.idx_dict[idx]
if idx in rmap:
# add value to stem edge
if include_stem:
if not node.is_root():
values[idx] = rmap[idx]
# add value to descendants edges
for desc in node.get_descendants():
values[desc.idx] = rmap[idx]
return values
def get_edge_values_from_dict(self, node_value_dict=None, include_stem=True):
"""
No longer supported. See get_edge_values_mapped()
"""
print("Warning: get_edge_values_from_dict no longer supported."
" See get_edge_values_mapped() as a replacement.")
return self.get_edge_values_mapped(node_value_dict, include_stem)
def get_mrca_idx_from_tip_labels(self, names=None, wildcard=None, regex=None):
"""
Returns the node idx label of the most recent common ancestor node
for the clade that includes the selected tips. Arguments can use fuzzy
name matching: a list of tip names, wildcard selector, or regex string.
"""
ns = NodeAssist(self, names, wildcard, regex)
return ns.get_mrca().idx
# if not any([names, wildcard, regex]):
# raise ToytreeError("at least one argument required")
# node = fuzzy_match_tipnames(
# self, names, wildcard, regex, True, False)
# return node.idx
def get_node_descendant_idxs(self, idx=None):
"""
Returns a list of idx labels descendant from a selected node.
"""
ndict = self.get_feature_dict("idx", None)
node = ndict[idx]
return [idx] + [i.idx for i in node.get_descendants()]
def get_node_coordinates(self, layout=None, use_edge_lengths=True):
"""
Returns coordinate locations of nodes in the tree as an array. Each
row is an (x, y) coordinate, ordered by the 'idx' feature of nodes.
The first ntips rows are the tip coordinates, which can also be
returned using .get_tip_coordinates().
"""
# if layout argument then set style and update coords.
if layout is None:
layout = self.style.layout
if layout == 'c':
return self._coords.get_radial_coords(use_edge_lengths)
else:
return self._coords.get_linear_coords(layout, use_edge_lengths)
def get_node_values(
self,
feature=None,
show_root=False,
show_tips=False,
):
"""
Returns node values from tree object in node plot order. To modify
values you must modify the .treenode object directly by setting new
'features'. For example
for node in ttree.treenode.traverse():
node.add_feature("PP", 100)
By default node and tip values are hidden (set to "") so that they
are not shown on the tree plot. To include values for these nodes
use the 'show_root'=True, or 'show_tips'=True arguments.
tree.get_node_values("support", True, True)
"""
# return input if feature does not exist
# if feature is None:
# feature = ""
# else:
# feature = str(feature)
# access nodes in the order they will be plotted
ndict = self.get_node_dict(return_internal=True, return_nodes=True)
nodes = [ndict[i] for i in range(self.nnodes)[::-1]]
# get features
if feature:
vals = [getattr(i, feature) if hasattr(i, feature)
else "" for i in nodes]
else:
vals = [" " for i in nodes]
# apply hiding rules
if not show_root:
vals = [i if not j.is_root() else "" for i, j in zip(vals, nodes)]
if not show_tips:
vals = [i if not j.is_leaf() else "" for i, j in zip(vals, nodes)]
# convert float to ints for prettier printing unless all floats
# raise exception and skip if there are true strings (names)
try:
if all([Decimal(str(i)) % 1 == 0 for i in vals if i]):
vals = [int(i) if isinstance(i, float) else i for i in vals]
except Exception:
pass
return np.array(vals)
def get_feature_dict(self, key_attr=None, values_attr=None):
"""
Returns a dictionary in which features from nodes can be selected
as the keys or values. By default it returns {node: node}, but if you
select key_attr="name" then it returns {node.name: node} and if you
enter key_attr="name" values_attr="idx" it returns a dict with
{node.name: node.idx}.
"""
ndict = {}
for node in self.treenode.traverse():
if key_attr:
key = getattr(node, key_attr)
else:
key = node
if values_attr:
value = getattr(node, values_attr)
else:
value = node
# add to dict
ndict[key] = value
return ndict
def get_node_dict(self, return_internal=False, return_nodes=False, keys_as_names=False):
"""
Return node labels as a dictionary mapping {idx: name} where idx is
the order of nodes in 'preorder' traversal. Used internally by the
func .get_node_values() to return values in proper order.
Parameters:
-----------
return_internal (bool):
If True all nodes are returned, if False only tips.
return_nodes: (bool)
If True returns TreeNodes, if False return node names.
keys_as_names: (bool)
If True keys are names, if False keys are node idx labels.
"""
if return_internal:
nodes = [i for i in self.treenode.traverse("preorder")]
# names must be unique
if keys_as_names:
names = [i.name for i in nodes]
if len(names) != len(set(names)):
raise ToytreeError(
"cannot return node dict with names as keys "
"because node names are not all unique "
"(some may not be set)"
)
if return_nodes:
if keys_as_names:
return {i.name: i for i in nodes}
else:
return {i.idx: i for i in nodes}
else:
return {i.idx: i.name for i in nodes}
else:
nodes = [i for i in self.treenode.traverse("preorder") if i.is_leaf()]
if return_nodes:
return {i.idx: i for i in nodes}
else:
return {i.idx: i.name for i in nodes}
def get_tip_coordinates(self, layout=None, use_edge_lengths=True):
"""
Returns coordinates of the tip positions for a tree. If no argument
for axis then a 2-d array is returned. The first column is the x
coordinates the second column is the y-coordinates. If you enter an
argument for axis then a 1-d array will be returned of just that axis.
"""
# one could imagine a very simple method like this, but to accomodate
# circ and unrooted layout we'll want a better option.
# return np.arange(self.ntips) + self.style.xbaseline + ybase...
# if no layout provided then use current style
if layout is None:
layout = self.style.layout
# get coordinates array
coords = self.get_node_coordinates(layout, use_edge_lengths)
return coords[:self.ntips]
def get_tip_labels(self, idx=None):
"""
Returns tip labels in the order they will be plotted on the tree, i.e.,
starting from zero axis and counting up by units of 1 (bottom to top
in right-facing trees; left to right in down-facing). If 'idx' is
indicated then a list of tip labels descended from that node will be
returned, instead of all tip labels. This is useful in combination
with other functions that select nodes/clades of the tree based on a
list of tip labels. You can use the toytree draw() command with
tip_labels='idx' or tip_labels=True to see idx labels plotted on nodes.
Parameters:
idx (int): index label of a node.
"""
if idx is not None:
treenode = self.idx_dict[idx]
# if self._fixed_order:
# return [str(i) for i in self._fixed_order if i in
# treenode.get_leaf_names()]
# else:
return [str(i) for i in treenode.get_leaf_names()[::-1]]
else:
# if self._fixed_order:
# return [str(i) for i in self._fixed_order]
# else:
return [str(i) for i in self.treenode.get_leaf_names()[::-1]]
def set_node_values(self, feature, values=None, default=None):
"""
Set values for a node attribute and RETURNS A COPY of the tree with
node values modified. If the attribute does not yet exist
and you set vaues for only some nodes then a null values ("") will
be set to all other nodes. You cannot set "idx" (this is used
internally by toytree to draw trees). You can use this to set names,
change node distances ("dist") or heights ("height"; which will modify
dist values to do so). If values is a single value it will be set for
all nodes, otherwise it should be a dictionary of idx numbers as keys
and values as values.
Example:
--------
tre.set_node_values(feature="Ne", default=5000)
tre.set_node_values(feature="Ne", values={0:1e5, 1:1e6, 2:1e3})
tre.set_node_values(feature="Ne", values={0:1e5, 1:1e6}, default=5000)
tre.set_node_values(feature="Ne", values={'r0':1e5, 'r1':1e6})
Parameters:
-----------
feature (str):
The name of the node attribute to modify (cannot be 'idx').
values (dict):
A dictionary of {node: value}. To select nodes you can use either
integer values corresponding to the node 'idx' labels, or strings
corresponding to the node 'name' labels.
Note: use tree.draw(node_labels='idx') to see idx labels on tree.
default (int, str, float):
You can use a default value to be filled for all other nodes not
listed in the values dictionary.
Returns:
----------
A ToyTree object is returned with the node values modified.
"""
# make a copy
nself = self.copy()
# make default ndict using idxs, regardless of values
ndict = nself.idx_dict
# if first value is a string then use name_dict instead of idx_dict
if values:
val0 = list(values.keys())[0]
if isinstance(val0, (str, bytes)):
ndict = {ndict[i].name: ndict[i] for i in ndict}
elif isinstance(val0, int):
pass
else:
raise ToytreeError("dictionary keys should be int or str")
# find special cases
if feature == "idx":
raise ToytreeError("cannot modify idx values.")
if feature == "height":
raise ToytreeError("modifying heights not supported, use dist.")
# set everyone to a default value for this attribute
if default is not None:
for key in ndict:
node = ndict[key]
node.add_feature(feature, default)
# set specific values
if values:
if not isinstance(values, dict):
print(
"Values should be a dictionary. Use default to set"
" a single value.")
else:
# check that all keys are valid
for nidx in values:
if nidx not in ndict:
raise ToytreeError(
"node idx or name {} not in tree".format(nidx))
# or, set everyone to a null value
for key in ndict:
if not hasattr(ndict[key], feature):
node = ndict[key]
node.add_feature(feature, "")
# then set selected nodes to new values
for key, val in values.items():
node = ndict[key]
node.add_feature(feature, val)
return nself
def copy(self):
""" Returns a new ToyTree equivalent to a deepcopy (but faster) """
# copy treenodes w/ topology, node attrs, nnodes, ntips, and idx_dict
nself = ToyTree(
self.treenode._clone(),
# fixed_order=self._fixed_order,
copy=True,
)
# update style dicts
nself.style = self.style.copy()
# update coords by copying instead of coords.update
# nself._coords.edges = nself._coords.get_edges()
# nself._coords.verts = self._coords.verts.copy()
return nself
# def copy(self):
# """ returns a deepcopy of the tree object"""
# return deepcopy(self)
def is_rooted(self):
"""
Returns False if the tree is unrooted.
"""
if len(self.treenode.children) > 2:
return False
return True
def is_bifurcating(self, include_root=True):
"""
Returns False if there is a polytomy in the tree, including if the tree
is unrooted (basal polytomy), unless you use the include_root=False
argument.
"""
ctn1 = -1 + (2 * len(self))
ctn2 = -2 + (2 * len(self))
if self.is_rooted():
return bool(ctn1 == sum(1 for i in self.treenode.traverse()))
if include_root:
return bool(ctn2 == -1 + sum(1 for i in self.treenode.traverse()))
return bool(ctn2 == sum(1 for i in self.treenode.traverse()))
# --------------------------------------------------------------------
# functions to modify the ete3 tree - MUST CALL ._coords.update()
# --------------------------------------------------------------------
def ladderize(self, direction=0):
"""
Ladderize tree (order descendants) so that top child has fewer
descendants than the bottom child in a left to right tree plot.
To reverse this pattern use direction=1.
"""
nself = self.copy()
nself.treenode.ladderize(direction=direction)
# nself._fixed_order = None
nself._coords.update()
return nself
def collapse_nodes(self, min_dist=1e-6, min_support=0):
"""
Returns a copy of the tree where internal nodes with dist <= min_dist
are deleted, resulting in a collapsed tree. e.g.:
newtre = tre.collapse_nodes(min_dist=0.001)
newtre = tre.collapse_nodes(min_support=50)
"""
nself = self.copy()
for node in nself.treenode.traverse():
if not node.is_leaf():
if (node.dist <= min_dist) | (node.support < min_support):
node.delete()
nself._coords.update()
return nself
def prune(self, names=None, wildcard=None, regex=None):
"""
Returns a copy of a subtree of the existing tree that includes
only the selected tips and minimal edges needed to connect them.
You can select the tip names using either names, wildcard or regex.
Parameters:
-----------
names: list of tip names.
wildcard: a string to match using wildcard characters like *
regex: a regular expression to match multiple names.
# examples:
tre = toytree.rtree.imbtree(ntips=10)
ptre = tre.prune(names=['r1', 'r2', 'r3', 'r6'])
ptre = tre.prune(regex='r[0-3]')
Returns:
----------
toytree.Toytree.ToyTree
"""
# make a deepcopy of the tree
nself = self.copy()
# return if nothing to drop
if not any([names, wildcard, regex]):
return nself
# get matching names list with fuzzy match
nas = NodeAssist(nself, names, wildcard, regex)
tipnames = nas.get_tipnames()
if len(tipnames) == len(nself):
raise ToytreeError("You cannot drop all tips from the tree.")
if not tipnames:
raise ToytreeError("No tips selected.")
nself.treenode.prune(tipnames, preserve_branch_length=True)
nself._coords.update()
return nself
def drop_tips(self, names=None, wildcard=None, regex=None):
"""
Returns a copy of the tree with the selected tips removed. The entered
value can be a name or list of names. To prune on an internal node to
create a subtree see the .prune() function instead.
Parameters:
-----------
names: list of tip names.
wildcard: a string to match using wildcard characters like *
regex: a regular expression to match multiple names.
Examples:
----------
tre = toytree.rtree.imbtree(ntips=10)
ptre = tre.prune(names=['r1', 'r2', 'r3', 'r6'])
ptre = tre.prune(regex='r[0-3]')
Returns:
----------
toytree.Toytree.ToyTree
"""
# make a deepcopy of the tree
nself = self.copy()
# return if nothing to drop
if not any([names, wildcard, regex]):
return nself
# get matching names list with fuzzy match
nas = NodeAssist(nself, names, wildcard, regex)
tipnames = nas.get_tipnames()
if len(tipnames) == len(nself):
raise ToytreeError("You cannot drop all tips from the tree.")
if not tipnames:
raise ToytreeError("No tips selected.")
keeptips = [i for i in nself.get_tip_labels() if i not in tipnames]
nself.treenode.prune(keeptips, preserve_branch_length=True)
nself._coords.update()
return nself
# TODO: could swap or reverse .children node attr to swap_children & update
def rotate_node(
self,
names=None,
wildcard=None,
regex=None,
idx=None):
# modify_tree=False,
"""
Returns a ToyTree with the selected node rotated for plotting.
tip colors do not align correct currently if nodes are rotated...
"""
# make a copy
revd = {j: i for (i, j) in enumerate(self.get_tip_labels())}
neworder = {}
# get node to rotate
treenode = fuzzy_match_tipnames(
self, names, wildcard, regex, True, True)
children = treenode.up.children
names = [[j.name for j in i.get_leaves()] for i in children]
nidxs = [[revd[i] for i in j] for j in names]
# get size of the big clade
move = max((len(i) for i in nidxs))
if len(nidxs[0]) > len(nidxs[1]):
move = min((len(i) for i in nidxs))
# newdict
cnames = list(itertools.chain(*names))
tdict = {i: None for i in cnames}
cycle = itertools.cycle(itertools.chain(*nidxs))
for m in range(move):
next(cycle)
for t in cnames:
tdict[t] = next(cycle)
for key in revd:
if key in tdict:
neworder[key] = tdict[key]
else:
neworder[key] = revd[key]
revd = {j: i for (i, j) in neworder.items()}
neworder = [revd[i] for i in range(self.ntips)]
# returns a new tree (i.e., copy) modified w/ a fixed order
nself = ToyTree(self.newick) #, fixed_order=neworder)
nself._coords.update()
return nself
def resolve_polytomy(
self,
dist=1.0,
support=100,
recursive=True):
"""
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
"""
nself = self.copy()
nself.treenode.resolve_polytomy(
default_dist=dist,
default_support=support,
recursive=recursive)
nself._coords.update()
return nself
# def speciate(self, idx, name=None, dist_prop=0.5):
# """
# Split an edge to create a new tip in the tree as in a speciation event.
# """
# # make a copy of the toytree
# nself = self.copy()
# # get Treenodes of selected node and parent
# ndict = nself.get_feature_dict('idx')
# node = ndict[idx]
# parent = node.up
# # get new node species name
# if not name:
# if node.is_leaf():
# name = node.name + ".sis"
# else:
# names = nself.get_tip_labels(idx=idx)
# name = "{}.sis".format("_".join(names))
# # create new speciation node between them at dist_prop dist.
# newnode = parent.add_child(
# name=parent.name + ".spp",
# dist=node.dist * dist_prop
# )
# # connect original node to speciation node.
# node.up = newnode
# node.dist = node.dist - newnode.dist
# newnode.add_child(node)
# # drop original node from original parent child list
# parent.children.remove(node)
# # add new tip node (new sister) and set same dist as onode
# newnode.add_child(
# name=name,
# dist=node.up.height,
# )
# # update toytree coordinates
# nself._coords.update()
# return nself
def unroot(self):
"""
Returns a copy of the tree unrooted. Does not transform tree in-place.
"""
nself = self.copy()
# updated unroot function to preserve support values to root node
nself.treenode.unroot()
nself.treenode.ladderize()
nself._coords.update()
return nself
def root(
self,
names=None,
wildcard=None,
regex=None,
resolve_root_dist=True,
edge_features=["support"],
):
"""
(Re-)root a tree by moving the tree anchor (real or phantom root node)
to a new split in the tree.
Rooting location can be selected by entering
a list of tipnames descendant from a node, or using wildcard or regex
to get a list of tipnames.
names: (list) (default=None)
A list of tip names. Root is placed along edge to mrca node.
wildcard: (str) (default=None)
A string matching multiple tip names. Root is placed along edge to
the mrca node of selected taxa.
regex: (str) (default=None)
A regex string matching multiple tip names. Root is placed along
edge to the mrca node of selected taxa.
resolve_root_dist: (float or bool) (default=True)
Length along the edge at which to place the new root, or a boolean
indicating auto methods. Default is True, which means to use mid-
point rooting along the edge. False will root at the ancestral node
creating a zero length edge. A float value will place the new node
at a point along the edge starting from the ancestral node. A float
value greater than the edge length will raise an error.
edge_features: (list) (default=["support"])
Node labels in this list are treated as edge labels (e.g., support
values represent support for a split/edge in the tree). This effects
how labels are moved when the tree is re-rooted. By default support
values are treated as edge features and moved to preserve clade
supports when the tree is re-rooted. Other node labels, such as
names do not make sense to shift in this way. New splits that are
created by rooting are set to 100 by default.
Example:
To root on a clade that includes the samples "1-A" and "1-B" you can
do any of the following:
rtre = tre.root(outgroup=["1-A", "1-B"])
rtre = tre.root(wildcard="1-")
rtre = tre.root(regex="1-[A,B]")
"""
# insure edge_features is an iterable
if not edge_features:
edge_features = []
if isinstance(edge_features, (str, int, float)):
edge_features = [edge_features]
# make a deepcopy of the tree and pass to Rooter class
nself = self.copy()
rooter = Rooter(
nself,
(names, wildcard, regex),
resolve_root_dist,
edge_features,
)
return rooter.tree
# --------------------------------------------------------------------
# Draw functions imported, but docstring here
# --------------------------------------------------------------------
def draw(
self,
tree_style=None,
height=None,
width=None,
axes=None,
layout=None,
tip_labels=None,
tip_labels_colors=None,
tip_labels_style=None,
tip_labels_align=None,
node_labels=None,
node_labels_style=None,
node_sizes=None,
node_colors=None,
node_style=None,
node_hover=None,
node_markers=None,
edge_colors=None,
edge_widths=None,
edge_type=None,
edge_style=None,
edge_align_style=None,
use_edge_lengths=None,
scalebar=None,
padding=None,
xbaseline=None,
ybaseline=None,
admixture_edges=None,
shrink=None,
fixed_order=None,
fixed_position=None,
**kwargs):
"""
Plot a Toytree tree, returns a tuple of Toyplot (Canvas, Axes) objects.
Parameters:
-----------
tree_style (or ts): str
One of several preset styles for tree plotting. The default is 'n'
(normal). Other options inlude 'c' (coalescent), 'd' (dark), and
'm' (multitree). You also create your own TreeStyle objects.
The tree_style sets a default set of styling on top of which other
arguments passed to draw() will override when plotting.
height: int (optional; default=None)
If None the plot height is autosized. If 'axes' arg is used then
tree is drawn on an existing Canvas, Axes and this arg is ignored.
width: int (optional; default=None)
Similar to height (above).
axes: Toyplot.Cartesian (default=None)
A toyplot cartesian axes object. If provided tree is drawn on it.
If not provided then a new Canvas and Cartesian axes are created
and returned with the tree plot added to it.
use_edge_lengths: bool (default=False)
Use edge lengths from .treenode (.get_edge_lengths) else
edges are set to length >=1 to make tree ultrametric.
tip_labels: [True, False, list]
If True then the tip labels from .treenode are added to the plot.
If False no tip labels are added. If a list of tip labels
is provided it must be the same length as .get_tip_labels().
tip_labels_colors:
...
tip_labels_style:
...
tip_labels_align:
...
node_labels: [True, False, list]
If True then nodes are shown, if False then nodes are suppressed
If a list of node labels is provided it must be the same length
and order as nodes in .get_node_values(). Node labels can be
generated in the proper order using the the .get_node_labels()
function from a Toytree tree to draw info from the tree features.
For example: node_labels=tree.get_node_labels("support").
node_sizes: [int, list, None]
If None then nodes are not shown, otherwise, if node_labels
then node_size can be modified. If a list of node sizes is
provided it must be the same length and order as nodes in
.get_node_dict().
node_colors: [list]
Use this argument only if you wish to set different colors for
different nodes, in which case you must enter a list of colors
as string names or HEX values the length and order of nodes in
.get_node_dict(). If all nodes will be the same color then use
instead the node_style dictionary:
e.g., node_style={"fill": 'red'}
node_style: [dict]
...
node_hover: [True, False, list, dict]
Default is True in which case node hover will show the node
values. If False then no hover is shown. If a list or dict
is provided (which should be in node order) then the values
will be shown in order. If a dict then labels can be provided
as well.
admixture_edges: [tuple, list]
Admixture edges will add colored edges to the plot in the style
of the 'edge_align_style'. These will be drawn from (source, dest,
height, width, color). Example: [(4, 3, 50000, 3, 'red')]
"""
# update kwargs to merge it with user-entered arguments:
userargs = {
"height": height,
"width": width,
"layout": layout,
"tip_labels": tip_labels,
"tip_labels_colors": tip_labels_colors,
"tip_labels_align": tip_labels_align,
"tip_labels_style": tip_labels_style,
"node_labels": node_labels,
"node_labels_style": node_labels_style,
"node_sizes": node_sizes,
"node_colors": node_colors,
"node_hover": node_hover,
"node_style": node_style,
"node_markers": node_markers,
"edge_type": edge_type,
"edge_colors": edge_colors,
"edge_widths": edge_widths,
"edge_style": edge_style,
"edge_align_style": edge_align_style,
"use_edge_lengths": use_edge_lengths,
"scalebar": scalebar,
"padding": padding,
"xbaseline": xbaseline,
"ybaseline": ybaseline,
"admixture_edges": admixture_edges,
"shrink": shrink,
"fixed_order": fixed_order,
"fixed_position": fixed_position
}
# shortcut name for tree style
if kwargs.get("ts"):
tree_style = kwargs.get("ts")
# use a base style preset over which other options override
if tree_style:
curstyle = TreeStyle(tree_style[0])
# or use current tree settings (DEFAULT unless changed by user)
else:
curstyle = self.style.copy()
# optionally override current style with style args entered to draw()
kwargs.update(userargs)
user = dict([
("_" + i, j) if isinstance(j, dict) else (i, j)
for (i, j) in kwargs.items() if j is not None
])
curstyle.update(user)
# warn user if they entered kwargs that arent't supported:
allkeys = list(userargs.keys()) + ["debug", "ts"]
unrecognized = [i for i in kwargs if i not in allkeys]
if unrecognized:
print("unrecognized arguments skipped: {}"
"\ncheck the docs, argument names may have changed."
.format(unrecognized))
# update coords based on layout
edges = self._coords.get_edges()
if layout == 'c':
verts = self._coords.get_radial_coords(curstyle.use_edge_lengths)
else:
verts = self._coords.get_linear_coords(
curstyle.layout,
curstyle.use_edge_lengths,
fixed_order,
fixed_position,
)
# check all styles
fstyle = StyleChecker(self, curstyle).style
# debugging returns the mark and prints the modified kwargs
if kwargs.get('debug'):
print(user)
return fstyle
# get canvas and axes
cs = CanvasSetup(self, axes, fstyle)
canvas = cs.canvas
axes = cs.axes
# generate toyplot Mark
mark = ToytreeMark(ntable=verts, etable=edges, **fstyle.to_dict())
# add mark to axes
axes.add_mark(mark)
return canvas, axes, mark
class RawTree():
"""
Barebones tree object that parses newick strings faster, assigns idx
to labels, and ...
"""
def __init__(self, newick, tree_format=0):
self.treenode = FastTreeParser(newick, tree_format).treenode
self.ntips = len(self.treenode)
self.nnodes = (len(self.treenode) * 2) - 1
self.update_idxs()
def write(self, tree_format=5, dist_formatter=None):
# get newick string
writer = NewickWriter(
treenode=self.treenode,
tree_format=tree_format,
dist_formatter=dist_formatter,
)
newick = writer.write_newick()
return newick
def update_idxs(self):
"set root idx highest, tip idxs lowest ordered as ladderized"
# n internal nodes - 1
idx = self.nnodes - 1
# from root to tips label idx
for node in self.treenode.traverse("levelorder"):
if not node.is_leaf():
node.add_feature("idx", idx)
if not node.name:
node.name = str(idx)
idx -= 1
# external nodes: lowest numbers are for tips (0-N)
for node in self.treenode.iter_leaves():
node.add_feature("idx", idx)
if not node.name:
node.name = str(idx)
idx -= 1
def copy(self):
return copy(self)
| [
"numpy.array",
"itertools.chain",
"copy.copy"
] | [((8505, 8520), 'numpy.array', 'np.array', (['elist'], {}), '(elist)\n', (8513, 8520), True, 'import numpy as np\n'), ((15108, 15122), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (15116, 15122), True, 'import numpy as np\n'), ((44978, 44988), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (44982, 44988), False, 'from copy import copy\n'), ((30070, 30093), 'itertools.chain', 'itertools.chain', (['*names'], {}), '(*names)\n', (30085, 30093), False, 'import itertools\n'), ((30169, 30192), 'itertools.chain', 'itertools.chain', (['*nidxs'], {}), '(*nidxs)\n', (30184, 30192), False, 'import itertools\n')] |
import argparse
import numpy as np
from PIL.JpegImagePlugin import JpegImageFile
from torch.utils.data import Dataset
from datasets import AVAILABLE_MODALITIES, AVAILABLE_MODALITIES_DIM, AVAILABLE_DATASETS
from datasets import UtdMhadDataset, MmactDataset
def calculate_means_stds(dataset: Dataset, dim=None):
"""
Calculates the mean and std for each sample based on the given dimensions and then averages them
:param dataset: Dataset
:param dim: Dimensions of sample
:return: mean, std
"""
means = []
stds = []
for (data, labels) in dataset:
if isinstance(data, JpegImageFile):
data = np.array(data)
means.append(data.mean())
stds.append(data.std())
else:
means.append(data.mean(dim))
stds.append(data.std(dim))
mean = np.array(means).mean(0 if dim is not None else None)
std = np.array(stds).mean(0 if dim is not None else None)
return mean, std
def get_dataset_class(dataset_name):
if dataset_name == 'utd_mhad':
return UtdMhadDataset
elif dataset_name == 'mmact':
return MmactDataset
else:
raise Exception('Unsupported dataset: %s' % dataset_name)
if __name__ == '__main__':
# Set argument parser
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('--dataset', choices=AVAILABLE_DATASETS, default='utd_mhad')
parser.add_argument('--modality', choices=AVAILABLE_MODALITIES, required=True)
args = parser.parse_args()
SelectedDataset = get_dataset_class(args.dataset)
selectedDim = AVAILABLE_MODALITIES_DIM[AVAILABLE_MODALITIES.index(args.modality)]
train_dataset = SelectedDataset(modality=args.modality)
mean, std = calculate_means_stds(train_dataset, dim=selectedDim)
print('Mean: %s' % mean)
print('Std: %s' % std)
| [
"datasets.AVAILABLE_MODALITIES.index",
"numpy.array",
"argparse.ArgumentParser"
] | [((1288, 1324), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""PROG"""'}), "(prog='PROG')\n", (1311, 1324), False, 'import argparse\n'), ((1622, 1663), 'datasets.AVAILABLE_MODALITIES.index', 'AVAILABLE_MODALITIES.index', (['args.modality'], {}), '(args.modality)\n', (1648, 1663), False, 'from datasets import AVAILABLE_MODALITIES, AVAILABLE_MODALITIES_DIM, AVAILABLE_DATASETS\n'), ((646, 660), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (654, 660), True, 'import numpy as np\n'), ((841, 856), 'numpy.array', 'np.array', (['means'], {}), '(means)\n', (849, 856), True, 'import numpy as np\n'), ((904, 918), 'numpy.array', 'np.array', (['stds'], {}), '(stds)\n', (912, 918), True, 'import numpy as np\n')] |
import numpy as np
import nltk
import string
from nltk.tokenize import sent_tokenize
import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import json
def generate_summary(query, docids, concept, all_docs, num_sentence, snomed=True):
stemmer = PorterStemmer()
docids.sort()
docids = docids[:min(100, len(docids))]
all_title_cand, all_abs_cand = extract_sent_candids(docids, concept, all_docs, snomed)
all_title_text = [cand['sent'] for cand in all_title_cand]
all_abs_text = [cand['sent'] for cand in all_abs_cand]
all_text = [query] + all_title_text + all_abs_text
all_cand = all_title_cand + all_abs_cand
raw_text = []
for t in all_text:
words = word_tokenize(t)
words = [w for w in words if w not in string.punctuation]
words = [stemmer.stem(w) for w in words]
raw_text.append(' '.join(words))
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(raw_text)
query = X[0, :].reshape((1, -1))
cand = X[1:, :]
sim = cosine_similarity(query, cand)
sim_mat = -np.ones((len(all_cand), len(all_cand)), dtype=np.float)
for i in range(len(all_cand)):
sim_mat[i, i] = sim[0, i]
l = 0.5
selected_sentence = []
while len(selected_sentence) < num_sentence:
new_sent_id = select_sentence(cand, selected_sentence, sim_mat, l)
selected_sentence.append(new_sent_id)
summary = []
for sent_id in selected_sentence:
summary.append(all_cand[sent_id])
return summary
def select_sentence(cand, selected_sentence, sim_mat, l):
best_sent = 0
best_score = np.NINF
if len(selected_sentence) == 0:
for i in range(sim_mat.shape[0]):
score = sim_mat[i, i]
if score > best_score:
best_score = score
best_sent = i
return best_sent
else:
sim_list = cosine_similarity(cand[selected_sentence[-1], :].reshape(1, -1), cand)
sim_mat[selected_sentence[-1], :] = sim_list
for i in range(sim_mat.shape[0]):
if i in selected_sentence:
continue
query_sim = sim_mat[i, i]
sim = []
for c in selected_sentence:
sim.append(sim_mat[c, i])
assert sim_mat[c, i]!=-1
max_sim = np.amax(sim)
score = l * query_sim - (1 - l) * max_sim
if score > best_score:
best_score = score
best_sent = i
return best_sent
def extract_sent_candids(docids, concept, all_docs, snomed):
all_title_cand = []
all_abs_cand = []
for i, docid in enumerate(docids):
doc = all_docs[docid]
title = doc['title'][0]
title_sent = split_and_span(title)
if snomed:
title_cand = find_candidates(title_sent, concept, doc['title_spans'])
else:
title_cand = find_candidates(title_sent, concept, doc['title_phrase_spans'])
all_title_cand += title_cand
abstract = doc['abstract'][0]
abstract_sent = abstract_split(abstract, json.loads(doc['abstract_sen']))
if snomed:
abstract_cand = find_candidates(abstract_sent, concept, doc['abstract_spans'])
else:
abstract_cand = find_candidates(abstract_sent, concept, doc['abstract_phrase_spans'])
all_abs_cand += abstract_cand
return all_title_cand, all_abs_cand
def split_and_span(content):
words = np.array(content.split())
sents = sent_tokenize(content)
sent_list = []
pos = 0
for i, s in enumerate(sents):
s_words = s.split()
span = [pos, pos + len(s_words)-1]
pos = pos + len(s_words)
sent_list.append({"sent": s, "span": span, "pos": i, "c_span": []})
return sent_list
def abstract_split(abstract, abstract_sen):
abstract_tokens = abstract.split(" ")
sent_list = []
for i in range(len(abstract_sen) - 1):
start = abstract_sen[i]
end = abstract_sen[i+1]
sentence = abstract_tokens[start:end]
sent_list.append({"sent": " ".join(sentence), "span": [start, end-1], "pos": i, "c_span": []})
return sent_list
def find_candidates(sent_list, target_concept_id, concepts_spans):
# find target concept's spans
target_spans = []
for span in concepts_spans:
if target_concept_id in span['cui_list']:
target_spans.append(span['span'])
# find the sentences that the targets spans exist
sent_cands = []
for span in target_spans:
for sent in sent_list:
if (span[0] >= sent['span'][0]) and (span[1] <= sent['span'][1]):
sent['c_span'].append([span[0]-sent['span'][0], span[1]-sent['span'][0]])
break
for sent in sent_list:
if len(sent['c_span']) > 0:
sent_cands.append(sent)
return sent_cands
| [
"sklearn.metrics.pairwise.cosine_similarity",
"nltk.stem.PorterStemmer",
"json.loads",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.amax",
"nltk.tokenize.sent_tokenize",
"nltk.tokenize.word_tokenize"
] | [((432, 447), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (445, 447), False, 'from nltk.stem import PorterStemmer\n'), ((1070, 1087), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (1085, 1087), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1198, 1228), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['query', 'cand'], {}), '(query, cand)\n', (1215, 1228), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3689, 3711), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['content'], {}), '(content)\n', (3702, 3711), False, 'from nltk.tokenize import sent_tokenize\n'), ((880, 896), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['t'], {}), '(t)\n', (893, 896), False, 'from nltk.tokenize import word_tokenize\n'), ((2504, 2516), 'numpy.amax', 'np.amax', (['sim'], {}), '(sim)\n', (2511, 2516), True, 'import numpy as np\n'), ((3276, 3307), 'json.loads', 'json.loads', (["doc['abstract_sen']"], {}), "(doc['abstract_sen'])\n", (3286, 3307), False, 'import json\n')] |
import pandas as pd
import numpy as np
from xloil import *
import typing
@converter(pd.DataFrame, register=True)
class PDFrame:
"""
Converter which takes tables with horizontal records to pandas dataframes.
**PDFrame(element, headings, index)**
Examples
--------
::
@xlo.func
def array1(x: xlo.PDFrame(int)):
pass
@xlo.func
def array2(y: xlo.PDFrame(float, headings=True)):
pass
@xlo.func
def array3(z: xlo.PDFrame(str, index='Index')):
pass
Parameters
----------
element : type
Pandas performance can be improved by explicitly specifying
a type. In particular, creation of a homogenously typed
Dataframe does not require copying the data. Not currently
implemented!
headings : bool
Specifies that the first row should be interpreted as column
headings
index : various
Is used in a call to pandas.DataFrame.set_index()
"""
def __init__(self, element=None, headings=True, index=None):
# TODO: use element_type in the dataframe construction
self._element_type = element
self._headings = headings
self._index = index
def read(self, x):
# A converter should check if provided value is already of the correct type.
# This can happen as xlOil expands cache strings before calling user converters
if isinstance(x, pd.DataFrame):
return x
elif isinstance(x, ExcelArray):
df = None
idx = self._index
if self._headings:
if x.nrows < 2:
raise Exception("Expected at least 2 rows")
headings = x[0,:].to_numpy(dims=1)
data = {headings[i]: x[1:, i].to_numpy(dims=1) for i in range(x.ncols)}
if idx is not None and idx in data:
index = data.pop(idx)
df = pd.DataFrame(data, index=index).rename_axis(idx)
idx = None
else:
# This will do a copy. The copy can be avoided by monkey
# patching pandas - see stackoverflow
df = pd.DataFrame(data)
else:
df = pd.DataFrame(x.to_numpy())
if idx is not None:
df.set_index(idx, inplace=True)
return df
raise CannotConvert(f"Unsupported type: {type(x)!r}")
def write(self, val):
# Construct this array
# [filler] [col_labels]
# [row_labels] [values]
row_labels = val.index.values[:, np.newaxis]
if self._headings:
col_labels = val.columns.values
filler_size = (np.atleast_2d(col_labels).shape[0], row_labels.shape[1])
filler = np.full(filler_size, ' ', dtype=object)
# Write the name of the index in the top left
filler[0, 0] = val.index.name
return np.block([[filler, col_labels], [row_labels, val.values]])
else:
return np.block([[row_labels, val.values]])
@converter(target=pd.Timestamp, register=True)
class PandasTimestamp:
"""
There is not need to use this class directly in annotations, rather
use ``pandas.Timestamp``
"""
def read(self, val):
return pd.Timestamp(val)
def write(self, val):
return val.to_pydatetime()
| [
"numpy.full",
"pandas.DataFrame",
"numpy.block",
"pandas.Timestamp",
"numpy.atleast_2d"
] | [((3427, 3444), 'pandas.Timestamp', 'pd.Timestamp', (['val'], {}), '(val)\n', (3439, 3444), True, 'import pandas as pd\n'), ((2884, 2923), 'numpy.full', 'np.full', (['filler_size', '""" """'], {'dtype': 'object'}), "(filler_size, ' ', dtype=object)\n", (2891, 2923), True, 'import numpy as np\n'), ((3060, 3118), 'numpy.block', 'np.block', (['[[filler, col_labels], [row_labels, val.values]]'], {}), '([[filler, col_labels], [row_labels, val.values]])\n', (3068, 3118), True, 'import numpy as np\n'), ((3152, 3188), 'numpy.block', 'np.block', (['[[row_labels, val.values]]'], {}), '([[row_labels, val.values]])\n', (3160, 3188), True, 'import numpy as np\n'), ((2263, 2281), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2275, 2281), True, 'import pandas as pd\n'), ((2806, 2831), 'numpy.atleast_2d', 'np.atleast_2d', (['col_labels'], {}), '(col_labels)\n', (2819, 2831), True, 'import numpy as np\n'), ((2000, 2031), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'index'}), '(data, index=index)\n', (2012, 2031), True, 'import pandas as pd\n')] |
from PIL import Image
import numpy as np
from time import time
import os
# recolor a folder of images by (r,g,b) * (shift_kern) element-wise.
# copy recolored images to out_root.
def build_shift(in_root, out_root, shift_kern):
working = None
if not os.path.exists(out_root): os.mkdir(out_root)
r, g, b = shift_kern
for root, categories, files in os.walk(in_root):
tic = time()
for file in files:
img = Image.open(os.path.join(root, file))
width, height = img.size
working = np.array(img).reshape((width, height, 3))
cha_0 = working[...,0].reshape((width, height, 1))
cha_0 = cha_0 * r
cha_0 = np.where(cha_0 > 255, 255, cha_0)
cha_0.reshape((width,height,1))
cha_1 = working[...,1].reshape((width, height, 1))
cha_1 = cha_1 * g
cha_1 = np.where(cha_1 > 255, 255, cha_1)
cha_1.reshape((width, height, 1))
cha_2 = working[...,2].reshape((width, height, 1))
cha_2 = cha_2 * b
cha_2 = np.where(cha_2 > 255, 255, cha_2)
cha_2.reshape((width, height, 1))
out = Image.fromarray(np.concatenate((cha_0, cha_1, cha_2), axis=2), 'RGB')
outname = os.path.join(out_root, os.path.basename(root), file)
if not os.path.exists(os.path.join(out_root, os.path.basename(root))): os.mkdir(os.path.join(out_root, os.path.basename(root)))
out.save(outname)
print("Saved to: {} ({:>40.2f})".format(outname, time()-tic))
if __name__ == '__main__':
shift_kern = [0,0,1]
num_cat = 15
in_root = 'images_out_{}'.format(num_cat)
out_root = 'images_shift_blue_{}'.format(num_cat)
build_shift(in_root, out_root, shift_kern)
| [
"os.mkdir",
"os.path.basename",
"os.walk",
"os.path.exists",
"time.time",
"numpy.where",
"numpy.array",
"os.path.join",
"numpy.concatenate"
] | [((367, 383), 'os.walk', 'os.walk', (['in_root'], {}), '(in_root)\n', (374, 383), False, 'import os\n'), ((260, 284), 'os.path.exists', 'os.path.exists', (['out_root'], {}), '(out_root)\n', (274, 284), False, 'import os\n'), ((286, 304), 'os.mkdir', 'os.mkdir', (['out_root'], {}), '(out_root)\n', (294, 304), False, 'import os\n'), ((399, 405), 'time.time', 'time', ([], {}), '()\n', (403, 405), False, 'from time import time\n'), ((706, 739), 'numpy.where', 'np.where', (['(cha_0 > 255)', '(255)', 'cha_0'], {}), '(cha_0 > 255, 255, cha_0)\n', (714, 739), True, 'import numpy as np\n'), ((898, 931), 'numpy.where', 'np.where', (['(cha_1 > 255)', '(255)', 'cha_1'], {}), '(cha_1 > 255, 255, cha_1)\n', (906, 931), True, 'import numpy as np\n'), ((1092, 1125), 'numpy.where', 'np.where', (['(cha_2 > 255)', '(255)', 'cha_2'], {}), '(cha_2 > 255, 255, cha_2)\n', (1100, 1125), True, 'import numpy as np\n'), ((463, 487), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (475, 487), False, 'import os\n'), ((1207, 1252), 'numpy.concatenate', 'np.concatenate', (['(cha_0, cha_1, cha_2)'], {'axis': '(2)'}), '((cha_0, cha_1, cha_2), axis=2)\n', (1221, 1252), True, 'import numpy as np\n'), ((1307, 1329), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (1323, 1329), False, 'import os\n'), ((549, 562), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (557, 562), True, 'import numpy as np\n'), ((1394, 1416), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (1410, 1416), False, 'import os\n'), ((1452, 1474), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (1468, 1474), False, 'import os\n'), ((1569, 1575), 'time.time', 'time', ([], {}), '()\n', (1573, 1575), False, 'from time import time\n')] |
import os
import gc
import time
import sys
import math
import torch
import numpy as np
from tqdm import tqdm
from KGDataset import get_dataset
from sampler import ConstructGraph, EvalDataset
from utils import CommonArgParser, thread_wrapped_func
import torch.multiprocessing as mp
class ArgParser(CommonArgParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument('--has_edge_importance', action='store_true',
help='Allow providing edge importance score for each edge during training.'
'The positive score will be adjusted '
'as pos_score = pos_score * edge_importance')
self.add_argument('--valid', action='store_true',
help='Evaluate the model on the validation set in the training.')
self.add_argument('--num_hops', type=int, default=2, help='.')
self.add_argument('--expand_factors', type=int, default=1000000, help='.')
self.add_argument('--num_workers', type=int, default=16, help='.')
self.add_argument('--print_on_screen', action='store_true', help='')
self.add_argument('--num_candidates', type=int, default=20000, help='')
self.add_argument('--save_file', type=str, default="test_tail_candidate", help='')
def prepare_save_path(args):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
def infer(args, samplers, save_paths):
candidates, spos = [], []
find, total = 0, 0
for sampler in samplers:
for candidate, is_find, spo in tqdm(sampler, disable=not args.print_on_screen, total=sampler.num_edges):
candidates.append(candidate.unsqueeze(0))
spos.append(spo)
if is_find == 1:
find += 1
total += 1
if total % 100 == 0:
print("%d/%d=%f" % (find, total, float(find)/float(total)))
candidates = torch.cat(candidates, axis=0)
spos = torch.cat(spos, axis=0)
ret = torch.cat([spos, candidates], axis=1).numpy()
return np.save(save_paths[0], ret)
@thread_wrapped_func
def infer_mp(args, samplers, save_paths, rank=0, mode='Test'):
if args.num_proc > 1:
torch.set_num_threads(args.num_thread)
infer(args, samplers, save_paths)
def main():
args = ArgParser().parse_args()
prepare_save_path(args)
dataset = get_dataset(args.data_path, args.dataset, args.format,
args.delimiter, args.data_files, False)
g, in_degree, out_degree = ConstructGraph(dataset, args)
if args.valid or args.test:
eval_dataset = EvalDataset(g, dataset, args)
if args.num_proc > 1:
if args.valid:
valid_samplers, save_paths = [], []
for i in range(args.num_proc):
valid_sampler = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.num_hops,
args.expand_factors,
'tail-batch',
in_degree,
num_workers=args.num_workers,
num_candidates=args.num_candidates,
rank=i, ranks=args.num_proc)
save_file = args.save_file + '_' + \
str(args.num_candidates) + '_' + str(i) + '.npy'
if args.dataset == 'wikikg90M':
save_path = os.path.join(args.data_path, 'wikikg90m-v2/processed/', save_file)
else:
save_path = os.path.join(args.data_path, args.dataset, save_file)
save_paths.append(save_path)
valid_samplers.append(valid_sampler)
procs = []
for i in range(args.num_proc):
proc = mp.Process(target=infer_mp, args=(
args, [valid_samplers[i]], [save_paths[i]]))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
else:
if args.valid:
valid_sampler = eval_dataset.create_sampler('valid', args.batch_size_eval,
args.num_hops,
args.expand_factors,
'tail-batch',
in_degree,
num_workers=args.num_workers,
num_candidates=args.num_candidates)
save_file = args.save_file + '_' + str(args.num_candidates) + '.npy'
if args.dataset == 'wikikg90M':
save_path = os.path.join(args.data_path, 'wikikg90m-v2/processed/', save_file)
else:
save_path = os.path.join(args.data_path, args.dataset, save_file)
candidates, spos = infer(args, [valid_sampler], [save_path])
np.save(save_path, candidates.numpy())
if __name__ == '__main__':
main()
| [
"sampler.EvalDataset",
"os.mkdir",
"tqdm.tqdm",
"numpy.save",
"sampler.ConstructGraph",
"os.path.exists",
"torch.cat",
"torch.set_num_threads",
"torch.multiprocessing.Process",
"os.path.join",
"KGDataset.get_dataset"
] | [((1951, 1980), 'torch.cat', 'torch.cat', (['candidates'], {'axis': '(0)'}), '(candidates, axis=0)\n', (1960, 1980), False, 'import torch\n'), ((1992, 2015), 'torch.cat', 'torch.cat', (['spos'], {'axis': '(0)'}), '(spos, axis=0)\n', (2001, 2015), False, 'import torch\n'), ((2083, 2110), 'numpy.save', 'np.save', (['save_paths[0]', 'ret'], {}), '(save_paths[0], ret)\n', (2090, 2110), True, 'import numpy as np\n'), ((2400, 2499), 'KGDataset.get_dataset', 'get_dataset', (['args.data_path', 'args.dataset', 'args.format', 'args.delimiter', 'args.data_files', '(False)'], {}), '(args.data_path, args.dataset, args.format, args.delimiter, args\n .data_files, False)\n', (2411, 2499), False, 'from KGDataset import get_dataset\n'), ((2552, 2581), 'sampler.ConstructGraph', 'ConstructGraph', (['dataset', 'args'], {}), '(dataset, args)\n', (2566, 2581), False, 'from sampler import ConstructGraph, EvalDataset\n'), ((1363, 1393), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (1377, 1393), False, 'import os\n'), ((1403, 1427), 'os.mkdir', 'os.mkdir', (['args.save_path'], {}), '(args.save_path)\n', (1411, 1427), False, 'import os\n'), ((1590, 1662), 'tqdm.tqdm', 'tqdm', (['sampler'], {'disable': '(not args.print_on_screen)', 'total': 'sampler.num_edges'}), '(sampler, disable=not args.print_on_screen, total=sampler.num_edges)\n', (1594, 1662), False, 'from tqdm import tqdm\n'), ((2231, 2269), 'torch.set_num_threads', 'torch.set_num_threads', (['args.num_thread'], {}), '(args.num_thread)\n', (2252, 2269), False, 'import torch\n'), ((2637, 2666), 'sampler.EvalDataset', 'EvalDataset', (['g', 'dataset', 'args'], {}), '(g, dataset, args)\n', (2648, 2666), False, 'from sampler import ConstructGraph, EvalDataset\n'), ((2026, 2063), 'torch.cat', 'torch.cat', (['[spos, candidates]'], {'axis': '(1)'}), '([spos, candidates], axis=1)\n', (2035, 2063), False, 'import torch\n'), ((4038, 4116), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'infer_mp', 'args': '(args, [valid_samplers[i]], [save_paths[i]])'}), '(target=infer_mp, args=(args, [valid_samplers[i]], [save_paths[i]]))\n', (4048, 4116), True, 'import torch.multiprocessing as mp\n'), ((4997, 5063), 'os.path.join', 'os.path.join', (['args.data_path', '"""wikikg90m-v2/processed/"""', 'save_file'], {}), "(args.data_path, 'wikikg90m-v2/processed/', save_file)\n", (5009, 5063), False, 'import os\n'), ((5110, 5163), 'os.path.join', 'os.path.join', (['args.data_path', 'args.dataset', 'save_file'], {}), '(args.data_path, args.dataset, save_file)\n', (5122, 5163), False, 'import os\n'), ((3676, 3742), 'os.path.join', 'os.path.join', (['args.data_path', '"""wikikg90m-v2/processed/"""', 'save_file'], {}), "(args.data_path, 'wikikg90m-v2/processed/', save_file)\n", (3688, 3742), False, 'import os\n'), ((3797, 3850), 'os.path.join', 'os.path.join', (['args.data_path', 'args.dataset', 'save_file'], {}), '(args.data_path, args.dataset, save_file)\n', (3809, 3850), False, 'import os\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy
x_min = 40.0
x_step = 20.0
x_max = 300.0
# pop per year at start
starting_pop_per_year = numpy.linspace(x_min, x_max, 1001)
xticks = numpy.arange(0.0, x_max + x_step * 0.5, x_step)
starting_pop_per_month = starting_pop_per_year / 12.0
required_time = numpy.zeros_like(starting_pop_per_month)
for i in range(10):
#level = max(i - 1, 0)
level_pop_per_month = i # level * 4% per level * 3 per year per level / 12 months per year
curr_rate = starting_pop_per_month + level_pop_per_month
required_time += 100.0 / curr_rate
ymax = (numpy.max(required_time) // 12 + 1) * 12.0
yticks = numpy.arange(0.0, ymax + 18.0, 12.0)
plt.plot(starting_pop_per_year, required_time)
plt.xlabel('Starting growth per year (flat + chance * 3)')
plt.xticks(xticks)
plt.yticks(yticks)
plt.ylabel('Months to city')
plt.xlim(xmin=x_min)
plt.ylim(ymin=0, ymax = ymax)
plt.grid(True)
plt.show()
| [
"matplotlib.pyplot.xlim",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yticks",
"numpy.max",
"numpy.arange",
"matplotlib.pyplot.xticks",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotl... | [((170, 204), 'numpy.linspace', 'numpy.linspace', (['x_min', 'x_max', '(1001)'], {}), '(x_min, x_max, 1001)\n', (184, 204), False, 'import numpy\n'), ((215, 262), 'numpy.arange', 'numpy.arange', (['(0.0)', '(x_max + x_step * 0.5)', 'x_step'], {}), '(0.0, x_max + x_step * 0.5, x_step)\n', (227, 262), False, 'import numpy\n'), ((339, 379), 'numpy.zeros_like', 'numpy.zeros_like', (['starting_pop_per_month'], {}), '(starting_pop_per_month)\n', (355, 379), False, 'import numpy\n'), ((691, 727), 'numpy.arange', 'numpy.arange', (['(0.0)', '(ymax + 18.0)', '(12.0)'], {}), '(0.0, ymax + 18.0, 12.0)\n', (703, 727), False, 'import numpy\n'), ((731, 777), 'matplotlib.pyplot.plot', 'plt.plot', (['starting_pop_per_year', 'required_time'], {}), '(starting_pop_per_year, required_time)\n', (739, 777), True, 'import matplotlib.pyplot as plt\n'), ((779, 837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Starting growth per year (flat + chance * 3)"""'], {}), "('Starting growth per year (flat + chance * 3)')\n", (789, 837), True, 'import matplotlib.pyplot as plt\n'), ((839, 857), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xticks'], {}), '(xticks)\n', (849, 857), True, 'import matplotlib.pyplot as plt\n'), ((859, 877), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yticks'], {}), '(yticks)\n', (869, 877), True, 'import matplotlib.pyplot as plt\n'), ((879, 907), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Months to city"""'], {}), "('Months to city')\n", (889, 907), True, 'import matplotlib.pyplot as plt\n'), ((909, 929), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmin': 'x_min'}), '(xmin=x_min)\n', (917, 929), True, 'import matplotlib.pyplot as plt\n'), ((931, 958), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)', 'ymax': 'ymax'}), '(ymin=0, ymax=ymax)\n', (939, 958), True, 'import matplotlib.pyplot as plt\n'), ((962, 976), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (970, 976), True, 'import matplotlib.pyplot as plt\n'), ((978, 988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (986, 988), True, 'import matplotlib.pyplot as plt\n'), ((638, 662), 'numpy.max', 'numpy.max', (['required_time'], {}), '(required_time)\n', (647, 662), False, 'import numpy\n')] |
import sys
sys.path.append("../")
import tensorflow as tf
from tensorflow.python.platform import flags
import numpy as np
from sklearn.tree import DecisionTreeClassifier
if sys.version_info.major==2:
from Queue import PriorityQueue
else:
from queue import PriorityQueue
from z3 import *
import os
import copy
from adf_utils.config import census, credit, bank
from adf_baseline.lime import lime_tabular
from adf_model.tutorial_models import dnn
from adf_data.census import census_data
from adf_data.credit import credit_data
from adf_data.bank import bank_data
from adf_utils.utils_tf import model_argmax
from adf_tutorial.utils import cluster
FLAGS = flags.FLAGS
def seed_test_input(dataset, cluster_num, limit):
"""
Select the seed inputs for fairness testing
:param dataset: the name of dataset
:param clusters: the results of K-means clustering
:param limit: the size of seed inputs wanted
:return: a sequence of seed inputs
"""
# build the clustering model
clf = cluster(dataset, cluster_num)
clusters = [np.where(clf.labels_ == i) for i in range(cluster_num)] # len(clusters[0][0])==32561
i = 0
rows = []
max_size = max([len(c[0]) for c in clusters])
while i < max_size:
if len(rows) == limit:
break
for c in clusters:
if i >= len(c[0]):
continue
row = c[0][i]
rows.append(row)
i += 1
return np.array(rows)
def getPath(X, sess, x, preds, input, conf):
"""
Get the path from Local Interpretable Model-agnostic Explanation Tree
:param X: the whole inputs
:param sess: TF session
:param x: input placeholder
:param preds: the model's symbolic output
:param input: instance to interpret
:param conf: the configuration of dataset
:return: the path for the decision of given instance
"""
# use the original implementation of LIME
explainer = lime_tabular.LimeTabularExplainer(X,
feature_names=conf.feature_name, class_names=conf.class_name, categorical_features=conf.categorical_features,
discretize_continuous=True)
g_data = explainer.generate_instance(input, num_samples=5000)
g_labels = model_argmax(sess, x, preds, g_data)
# build the interpretable tree
tree = DecisionTreeClassifier(random_state=2019) #min_samples_split=0.05, min_samples_leaf =0.01
tree.fit(g_data, g_labels)
# get the path for decision
path_index = tree.decision_path(np.array([input])).indices
path = []
for i in range(len(path_index)):
node = path_index[i]
i = i + 1
f = tree.tree_.feature[node]
if f != -2:
left_count = tree.tree_.n_node_samples[tree.tree_.children_left[node]]
right_count = tree.tree_.n_node_samples[tree.tree_.children_right[node]]
left_confidence = 1.0 * left_count / (left_count + right_count)
right_confidence = 1.0 - left_confidence
if tree.tree_.children_left[node] == path_index[i]:
path.append([f, "<=", tree.tree_.threshold[node], left_confidence])
else:
path.append([f, ">", tree.tree_.threshold[node], right_confidence])
return path
def check_for_error_condition(conf, sess, x, preds, t, sens):
"""
Check whether the test case is an individual discriminatory instance
:param conf: the configuration of dataset
:param sess: TF session
:param x: input placeholder
:param preds: the model's symbolic output
:param t: test case
:param sens: the index of sensitive feature
:return: whether it is an individual discriminatory instance
"""
label = model_argmax(sess, x, preds, np.array([t]))
for val in range(conf.input_bounds[sens-1][0], conf.input_bounds[sens-1][1]+1):
if val != t[sens-1]:
tnew = copy.deepcopy(t)
tnew[sens-1] = val
label_new = model_argmax(sess, x, preds, np.array([tnew]))
if label_new != label:
return True
return False
def global_solve(path_constraint, arguments, t, conf):
"""
Solve the constraint for global generation
:param path_constraint: the constraint of path
:param arguments: the name of features in path_constraint
:param t: test case
:param conf: the configuration of dataset
:return: new instance through global generation
"""
s = Solver()
for c in path_constraint:
s.add(arguments[c[0]] >= conf.input_bounds[c[0]][0])
s.add(arguments[c[0]] <= conf.input_bounds[c[0]][1])
if c[1] == "<=":
s.add(arguments[c[0]] <= c[2])
else:
s.add(arguments[c[0]] > c[2])
if s.check() == sat:
m = s.model()
else:
return None
tnew = copy.deepcopy(t)
for i in range(len(arguments)):
if m[arguments[i]] == None:
continue
else:
tnew[i] = int(m[arguments[i]].as_long())
return tnew.astype('int').tolist()
def local_solve(path_constraint, arguments, t, index, conf):
"""
Solve the constraint for local generation
:param path_constraint: the constraint of path
:param arguments: the name of features in path_constraint
:param t: test case
:param index: the index of constraint for local generation
:param conf: the configuration of dataset
:return: new instance through global generation
"""
c = path_constraint[index]
s = Solver()
s.add(arguments[c[0]] >= conf.input_bounds[c[0]][0])
s.add(arguments[c[0]] <= conf.input_bounds[c[0]][1])
for i in range(len(path_constraint)):
if path_constraint[i][0] == c[0]:
if path_constraint[i][1] == "<=":
s.add(arguments[path_constraint[i][0]] <= path_constraint[i][2])
else:
s.add(arguments[path_constraint[i][0]] > path_constraint[i][2])
if s.check() == sat:
m = s.model()
else:
return None
tnew = copy.deepcopy(t)
tnew[c[0]] = int(m[arguments[c[0]]].as_long())
return tnew.astype('int').tolist()
def average_confidence(path_constraint):
"""
The average confidence (probability) of path
:param path_constraint: the constraint of path
:return: the average confidence
"""
r = np.mean(np.array(path_constraint)[:,3].astype(float))
return r
def gen_arguments(conf):
"""
Generate the argument for all the features
:param conf: the configuration of dataset
:return: a sequence of arguments
"""
arguments = []
for i in range(conf.params):
arguments.append(Int(conf.feature_name[i]))
return arguments
def symbolic_generation(dataset, sensitive_param, model_path, cluster_num, limit):
"""
The implementation of symbolic generation
:param dataset: the name of dataset
:param sensitive_param: the index of sensitive feature
:param model_path: the path of testing model
:param cluster_num: the number of clusters to form as well as the number of
centroids to generate
:param limit: the maximum number of test case
"""
data = {"census":census_data, "credit":credit_data, "bank":bank_data}
data_config = {"census":census, "credit":credit, "bank":bank}
# the rank for priority queue, rank1 is for seed inputs, rank2 for local, rank3 for global
rank1 = 5
rank2 = 1
rank3 = 10
T1 = 0.3
# prepare the testing data and model
X, Y, input_shape, nb_classes = data[dataset]()
p = X.shape[0] * 0.8
p = int(p)
X = X[:p]
Y = Y[:p]
arguments = gen_arguments(data_config[dataset])
model = dnn(input_shape, nb_classes)
x = tf.placeholder(tf.float32, shape=input_shape)
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
preds = model(x)
tf.set_random_seed(1234)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
sess = tf.Session(config=config)
saver = tf.train.Saver()
model_path = model_path + dataset + "/test.model"
saver.restore(sess, model_path)
# store the result of fairness testing
global_disc_inputs = set()
global_disc_inputs_list = []
local_disc_inputs = set()
local_disc_inputs_list = []
tot_inputs = set()
# select the seed input for fairness testing
inputs = seed_test_input(dataset, cluster_num, limit)
q = PriorityQueue() # low push first
for inp in inputs[::-1]:
q.put((rank1,X[inp].tolist()))
visited_path = []
l_count = 0
g_count = 0
while len(tot_inputs) < limit and q.qsize() != 0:
t = q.get()
t_rank = t[0]
t = np.array(t[1])
found = check_for_error_condition(data_config[dataset], sess, x, preds, t, sensitive_param)
p = getPath(X, sess, x, preds, t, data_config[dataset])
temp = copy.deepcopy(t.tolist())
temp = temp[:sensitive_param - 1] + temp[sensitive_param:]
tot_inputs.add(tuple(temp))
if found:
if (tuple(temp) not in global_disc_inputs) and (tuple(temp) not in local_disc_inputs):
if t_rank > 2:
global_disc_inputs.add(tuple(temp))
global_disc_inputs_list.append(temp)
else:
local_disc_inputs.add(tuple(temp))
local_disc_inputs_list.append(temp)
if len(tot_inputs) == limit:
break
# local search
for i in range(len(p)):
path_constraint = copy.deepcopy(p)
c = path_constraint[i]
if c[0] == sensitive_param - 1:
continue
if c[1] == "<=":
c[1] = ">"
c[3] = 1.0 - c[3]
else:
c[1] = "<="
c[3] = 1.0 - c[3]
if path_constraint not in visited_path:
visited_path.append(path_constraint)
input = local_solve(path_constraint, arguments, t, i, data_config[dataset])
l_count += 1
if input != None:
r = average_confidence(path_constraint)
q.put((rank2 + r, input))
# global search
prefix_pred = []
for c in p:
if c[0] == sensitive_param - 1:
continue
if c[3] < T1:
break
n_c = copy.deepcopy(c)
if n_c[1] == "<=":
n_c[1] = ">"
n_c[3] = 1.0 - c[3]
else:
n_c[1] = "<="
n_c[3] = 1.0 - c[3]
path_constraint = prefix_pred + [n_c]
# filter out the path_constraint already solved before
if path_constraint not in visited_path:
visited_path.append(path_constraint)
input = global_solve(path_constraint, arguments, t, data_config[dataset])
g_count += 1
if input != None:
r = average_confidence(path_constraint)
q.put((rank3-r, input))
prefix_pred = prefix_pred + [c]
# create the folder for storing the fairness testing result
if not os.path.exists('../results/'):
os.makedirs('../results/')
if not os.path.exists('../results/' + dataset + '/'):
os.makedirs('../results/' + dataset + '/')
if not os.path.exists('../results/'+ dataset + '/'+ str(sensitive_param) + '/'):
os.makedirs('../results/' + dataset + '/'+ str(sensitive_param) + '/')
# storing the fairness testing result
np.save('../results/' + dataset + '/' + str(sensitive_param) + '/global_samples_symbolic.npy',
np.array(global_disc_inputs_list))
np.save('../results/' + dataset + '/' + str(sensitive_param) + '/local_samples_symbolic.npy',
np.array(local_disc_inputs_list))
# print the overview information of result
print("Total Inputs are " + str(len(tot_inputs)))
print("Total discriminatory inputs of global search- " + str(len(global_disc_inputs)), g_count)
print("Total discriminatory inputs of local search- " + str(len(local_disc_inputs)), l_count)
def main(argv=None):
symbolic_generation(dataset=FLAGS.dataset,
sensitive_param=FLAGS.sens_param,
model_path=FLAGS.model_path,
cluster_num=FLAGS.cluster_num,
limit=FLAGS.sample_limit)
if __name__ == '__main__':
flags.DEFINE_string('dataset', 'census', 'the name of dataset')
flags.DEFINE_integer('sens_param', 9, 'sensitive index, index start from 1, 9 for gender, 8 for race.')
flags.DEFINE_string('model_path', '../model/', 'the path for testing model')
flags.DEFINE_integer('sample_limit', 1000, 'number of samples to search')
flags.DEFINE_integer('cluster_num', 4, 'the number of clusters to form as well as the number of centroids to generate')
tf.app.run() | [
"tensorflow.python.platform.flags.DEFINE_string",
"sklearn.tree.DecisionTreeClassifier",
"tensorflow.ConfigProto",
"sys.path.append",
"os.path.exists",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"queue.PriorityQueue",
"tensorflow.app.run",
"copy.deepcopy",
"tensorflow.train.Saver",
... | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((1014, 1043), 'adf_tutorial.utils.cluster', 'cluster', (['dataset', 'cluster_num'], {}), '(dataset, cluster_num)\n', (1021, 1043), False, 'from adf_tutorial.utils import cluster\n'), ((1458, 1472), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (1466, 1472), True, 'import numpy as np\n'), ((1952, 2135), 'adf_baseline.lime.lime_tabular.LimeTabularExplainer', 'lime_tabular.LimeTabularExplainer', (['X'], {'feature_names': 'conf.feature_name', 'class_names': 'conf.class_name', 'categorical_features': 'conf.categorical_features', 'discretize_continuous': '(True)'}), '(X, feature_names=conf.feature_name,\n class_names=conf.class_name, categorical_features=conf.\n categorical_features, discretize_continuous=True)\n', (1985, 2135), False, 'from adf_baseline.lime import lime_tabular\n'), ((2308, 2344), 'adf_utils.utils_tf.model_argmax', 'model_argmax', (['sess', 'x', 'preds', 'g_data'], {}), '(sess, x, preds, g_data)\n', (2320, 2344), False, 'from adf_utils.utils_tf import model_argmax\n'), ((2392, 2433), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(2019)'}), '(random_state=2019)\n', (2414, 2433), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((4892, 4908), 'copy.deepcopy', 'copy.deepcopy', (['t'], {}), '(t)\n', (4905, 4908), False, 'import copy\n'), ((6091, 6107), 'copy.deepcopy', 'copy.deepcopy', (['t'], {}), '(t)\n', (6104, 6107), False, 'import copy\n'), ((7740, 7768), 'adf_model.tutorial_models.dnn', 'dnn', (['input_shape', 'nb_classes'], {}), '(input_shape, nb_classes)\n', (7743, 7768), False, 'from adf_model.tutorial_models import dnn\n'), ((7777, 7822), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'input_shape'}), '(tf.float32, shape=input_shape)\n', (7791, 7822), True, 'import tensorflow as tf\n'), ((7831, 7883), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, nb_classes)'}), '(tf.float32, shape=(None, nb_classes))\n', (7845, 7883), True, 'import tensorflow as tf\n'), ((7909, 7933), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (7927, 7933), True, 'import tensorflow as tf\n'), ((7947, 7963), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7961, 7963), True, 'import tensorflow as tf\n'), ((8036, 8061), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (8046, 8061), True, 'import tensorflow as tf\n'), ((8074, 8090), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (8088, 8090), True, 'import tensorflow as tf\n'), ((8490, 8505), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (8503, 8505), False, 'from queue import PriorityQueue\n'), ((12661, 12724), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""', '"""census"""', '"""the name of dataset"""'], {}), "('dataset', 'census', 'the name of dataset')\n", (12680, 12724), False, 'from tensorflow.python.platform import flags\n'), ((12729, 12836), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""sens_param"""', '(9)', '"""sensitive index, index start from 1, 9 for gender, 8 for race."""'], {}), "('sens_param', 9,\n 'sensitive index, index start from 1, 9 for gender, 8 for race.')\n", (12749, 12836), False, 'from tensorflow.python.platform import flags\n'), ((12837, 12913), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_path"""', '"""../model/"""', '"""the path for testing model"""'], {}), "('model_path', '../model/', 'the path for testing model')\n", (12856, 12913), False, 'from tensorflow.python.platform import flags\n'), ((12918, 12991), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""sample_limit"""', '(1000)', '"""number of samples to search"""'], {}), "('sample_limit', 1000, 'number of samples to search')\n", (12938, 12991), False, 'from tensorflow.python.platform import flags\n'), ((12996, 13124), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""cluster_num"""', '(4)', '"""the number of clusters to form as well as the number of centroids to generate"""'], {}), "('cluster_num', 4,\n 'the number of clusters to form as well as the number of centroids to generate'\n )\n", (13016, 13124), False, 'from tensorflow.python.platform import flags\n'), ((13121, 13133), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (13131, 13133), True, 'import tensorflow as tf\n'), ((1060, 1086), 'numpy.where', 'np.where', (['(clf.labels_ == i)'], {}), '(clf.labels_ == i)\n', (1068, 1086), True, 'import numpy as np\n'), ((3809, 3822), 'numpy.array', 'np.array', (['[t]'], {}), '([t])\n', (3817, 3822), True, 'import numpy as np\n'), ((8754, 8768), 'numpy.array', 'np.array', (['t[1]'], {}), '(t[1])\n', (8762, 8768), True, 'import numpy as np\n'), ((11372, 11401), 'os.path.exists', 'os.path.exists', (['"""../results/"""'], {}), "('../results/')\n", (11386, 11401), False, 'import os\n'), ((11411, 11437), 'os.makedirs', 'os.makedirs', (['"""../results/"""'], {}), "('../results/')\n", (11422, 11437), False, 'import os\n'), ((11449, 11494), 'os.path.exists', 'os.path.exists', (["('../results/' + dataset + '/')"], {}), "('../results/' + dataset + '/')\n", (11463, 11494), False, 'import os\n'), ((11504, 11546), 'os.makedirs', 'os.makedirs', (["('../results/' + dataset + '/')"], {}), "('../results/' + dataset + '/')\n", (11515, 11546), False, 'import os\n'), ((11865, 11898), 'numpy.array', 'np.array', (['global_disc_inputs_list'], {}), '(global_disc_inputs_list)\n', (11873, 11898), True, 'import numpy as np\n'), ((12010, 12042), 'numpy.array', 'np.array', (['local_disc_inputs_list'], {}), '(local_disc_inputs_list)\n', (12018, 12042), True, 'import numpy as np\n'), ((2582, 2599), 'numpy.array', 'np.array', (['[input]'], {}), '([input])\n', (2590, 2599), True, 'import numpy as np\n'), ((3956, 3972), 'copy.deepcopy', 'copy.deepcopy', (['t'], {}), '(t)\n', (3969, 3972), False, 'import copy\n'), ((10574, 10590), 'copy.deepcopy', 'copy.deepcopy', (['c'], {}), '(c)\n', (10587, 10590), False, 'import copy\n'), ((4057, 4073), 'numpy.array', 'np.array', (['[tnew]'], {}), '([tnew])\n', (4065, 4073), True, 'import numpy as np\n'), ((9641, 9657), 'copy.deepcopy', 'copy.deepcopy', (['p'], {}), '(p)\n', (9654, 9657), False, 'import copy\n'), ((6408, 6433), 'numpy.array', 'np.array', (['path_constraint'], {}), '(path_constraint)\n', (6416, 6433), True, 'import numpy as np\n')] |
#------------------------------------------------------------------------------------------------------------------
# train_test_plot_def
#
# MIT License
# Dr <NAME> (<EMAIL>)
#------------------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from IPython.display import HTML
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
# Import LogisticRegression, KNeighborsClassifier, SVM, DecisionTreeClassifier, RandomForestClassifier, XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings("ignore")
# Pre-defined Classifier Models
model_lrg = LogisticRegression(max_iter=10000)
model_knn = KNeighborsClassifier()
model_svm = svm.SVC()
model_dtr = DecisionTreeClassifier()
model_rfr = RandomForestClassifier()
model_xgb = XGBClassifier()
# List of pre-defined models
models = [model_lrg, model_knn, model_svm, model_dtr, model_rfr, model_xgb]
# List of pre-defined model names
model_names = ['Logistic Regression', 'K-Neighbors', 'Support Vector',
'Decision Tree', 'Random Forest', 'XGBoost']
# First letters of model names
model_ids = 'LKSDRX'
# Initialize an empty list of classification algorithms
algorithm_list = []
# Initialize an empty list for the accuracy of each algorithm
accuracy_list = []
def _plot_confusion_matrix(conf_mat, classes, normalize = False, title = 'Confusion Matrix',
cmap = plt.cm.Greens, size = 5):
"""
Plots confusion matrix for binary or multi-class classification
Parameters:
----------
conf_mat: confusion matrix, given test and predicted values of the target (dependent) variable
classes: comma separated unique class names of the target variable to be predicted
normalize: boolean flag indicating if normalization is to be applied
title: title of the confusion matrix plot
ax: axes object(s) of the plot
cmap: color map
size: integer controlling size of the plot and the labels proportionally
Returns:
-------
None
"""
fig, ax = plt.subplots(figsize = (size, size))
ax.set_title(title, fontsize = size + 8)
plt.tick_params(axis = 'x', labelsize = size + 8)
plt.tick_params(axis = 'y', labelsize = size + 8)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation = 45, size = size + 8)
plt.yticks(tick_marks, classes, size = size + 8)
plt.sca(ax)
fmt = '.2f' if normalize else 'd'
thresh = conf_mat.max() / 2.
for i, j in itertools.product(range(conf_mat.shape[0]), range(conf_mat.shape[1])):
ax.text(j, i, format(conf_mat[i, j], fmt),
horizontalalignment = "center",
color = "white" if conf_mat[i, j] > thresh else "black", size = size + 8)
ax.set_ylabel('True Label', fontsize = size + 8)
ax.set_xlabel('Predicted Label', fontsize = size + 8)
ax.imshow(conf_mat, interpolation = 'nearest', cmap = cmap)
plt.show()
return
def _compare_algos(algorithm_list, accuracy_list, size = 5):
"""
Plots algorithm names vs the testing accuracy figures
Parameters:
----------
algorithm_list: list of names of the algorithms
accuracy_list: list of accuracy values
size: integer controlling the size of the plot and the labels proportionally
Returns:
-------
None
"""
# Combine the list of algorithms and list of accuracy scores into a dataframe
# and sort the values based on accuracy score
df_accuracy = pd.DataFrame(list(zip(algorithm_list, accuracy_list)),
columns = ['Algorithm', 'Accuracy Score']).sort_values(by = ['Accuracy Score'], ascending = True)
# Plot
ax = df_accuracy.plot.barh('Algorithm', 'Accuracy Score', align = 'center', legend = False, color = 'g')
# Add the data labels
for i in ax.patches:
ax.text(i.get_width() + 0.02, i.get_y() + 0.2, str(round(i.get_width(), 2)), fontsize = 10)
# Set the limit
plt.xlim(0, 1.1)
# Set the lables
plt.xlabel('Test Accuracy Score')
# Set ticks
# Generate a list of ticks for y-axis
y_ticks = np.arange(len(algorithm_list))
plt.yticks(y_ticks, df_accuracy['Algorithm'], rotation = 0)
# Set title
plt.title('Algorithm performance')
# Turn of top and right frames
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
return
def train_test_plot_def(df, target, algos, size):
"""
Performs the following operations:
---------------------------------
1. Splits the dataframe into target (dependent variable) and predictors (independent variable)
2. Scales the values of independent variables (all input values must be numeric)
3. Splits the dataset into training and testing sets
4. Loops through the list of classification algorithms to
a) Train
b) Test
c) Evaluate and report performance
d) Plot Confusion Matrix
e) Plot feature importance (if it is available for this particular algorithm)
5. Shows comparative plot of accuracies for all the algorithms
Parameters:
----------
df (pandas dataframe): the whole dataset containing observations for both target and predictor variables
target (string): column name of the target variable in df, e.g. 'Species'
algos (comma separated character string): the first letters of classification algorithms to be applied, e.g. l,r,x
l: LogisticRegression
k: KNeighborsClassifier
s: Support Vector Machine
d: DecisionTreeClassifier
r: RandomForestClassifier
x: XGBClassifier
size (int): size of the plots, typical values are 5, 10, 15
Returns:
-------
None
Example:
-------
train_test_plot_def(iris_df, 'Species', 'l,r,x', 5)
where,
iris_df: input dataframe, e.g. iris_df = pd.read_csv('Iris.csv')
'Species': name of the target column in iris_df
'l,r,x': first letters of (L)ogisticRegression', (R)andomForestClassifier and (X)GBClassifier (case insensitive)
5: size of the plots generated
"""
# set X and y
y = df[target]
X = df.drop(target, axis = 1)
# scale X
X = StandardScaler().fit(X).transform(X)
# Split the data set into training and testing data sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42, stratify = y)
# Target class names
classes = np.unique(df[target])
algorithm_list = []
accuracy_list = []
algos_selected = algos.upper().split(',')
for i in range(len(algos_selected)):
this_algo = algos_selected[i].strip()
indx = model_ids.index(this_algo)
model = models[indx]
algorithm_list.append(model_names[indx])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
disp_line = '<h1>' + model_names[indx] + '</h1>'
display(HTML(disp_line))
disp_line = '<h2>Scores:</h2>'
display(HTML(disp_line))
acc = accuracy_score(y_test, y_pred)
precision, recall, fscore, support = score(y_test, y_pred)
score_df = pd.DataFrame(list(zip(precision, recall, fscore, support)),
columns = ['Precision', 'Recall', 'F1-score', 'Support'])
score_df = pd.concat([pd.DataFrame(classes), score_df], axis = 1)
score_df.columns = ['Target Class', 'Precision', 'Recall', 'F1-score', 'Support']
display(HTML(score_df.to_html(index = False)))
accuracy_list.append(acc)
cm_model = confusion_matrix(y_test, y_pred)
_plot_confusion_matrix(cm_model, classes = classes,
title = model_names[indx] + '\nTest Accuracy: {:.2f}'.format(acc), size = size)
if hasattr(model, 'feature_importances_'):
fig, ax = plt.subplots(figsize = (size, size))
plt.tick_params(axis = 'x', labelsize = size + 8)
plt.tick_params(axis = 'y', labelsize = size + 8)
plt.xticks(size = size + 8)
plt.yticks(size = size + 8)
plt.xlabel('')
ax.set_title('Feature Importance (using '+ model_names[indx]+')', fontsize=size+10)
importances = pd.DataFrame(np.zeros((X_train.shape[1], 1)), columns = ['Importance'],
index = df.drop(target, axis = 1).columns)
importances.iloc[:,0] = model.feature_importances_
importances.sort_values(by = 'Importance', inplace = True, ascending = False)
top_importances = importances.head(10)
sns.barplot(x = 'Importance', y = top_importances.index, data = top_importances)
plt.show()
_compare_algos(algorithm_list, accuracy_list, size = size)
return | [
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.svm.SVC",
"matplotlib.pyplot.tick_params",
"sklearn.metrics.precision_recall_fscore_support",
"numpy.unique... | [((1095, 1128), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1118, 1128), False, 'import warnings\n'), ((1174, 1208), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(10000)'}), '(max_iter=10000)\n', (1192, 1208), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1221, 1243), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (1241, 1243), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1256, 1265), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (1263, 1265), False, 'from sklearn import svm\n'), ((1278, 1302), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1300, 1302), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1315, 1339), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1337, 1339), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1352, 1367), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (1365, 1367), False, 'from xgboost import XGBClassifier\n'), ((2662, 2696), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (2674, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2793), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'labelsize': '(size + 8)'}), "(axis='x', labelsize=size + 8)\n", (2763, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2802, 2847), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'labelsize': '(size + 8)'}), "(axis='y', labelsize=size + 8)\n", (2817, 2847), True, 'import matplotlib.pyplot as plt\n'), ((2897, 2956), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)', 'size': '(size + 8)'}), '(tick_marks, classes, rotation=45, size=size + 8)\n', (2907, 2956), True, 'import matplotlib.pyplot as plt\n'), ((2965, 3011), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {'size': '(size + 8)'}), '(tick_marks, classes, size=size + 8)\n', (2975, 3011), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3029), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (3025, 3029), True, 'import matplotlib.pyplot as plt\n'), ((3567, 3577), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3575, 3577), True, 'import matplotlib.pyplot as plt\n'), ((4650, 4666), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.1)'], {}), '(0, 1.1)\n', (4658, 4666), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Test Accuracy Score"""'], {}), "('Test Accuracy Score')\n", (4707, 4730), True, 'import matplotlib.pyplot as plt\n'), ((4843, 4900), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_ticks', "df_accuracy['Algorithm']"], {'rotation': '(0)'}), "(y_ticks, df_accuracy['Algorithm'], rotation=0)\n", (4853, 4900), True, 'import matplotlib.pyplot as plt\n'), ((4928, 4962), 'matplotlib.pyplot.title', 'plt.title', (['"""Algorithm performance"""'], {}), "('Algorithm performance')\n", (4937, 4962), True, 'import matplotlib.pyplot as plt\n'), ((7180, 7246), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)', 'stratify': 'y'}), '(X, y, test_size=0.2, random_state=42, stratify=y)\n', (7196, 7246), False, 'from sklearn.model_selection import train_test_split\n'), ((7297, 7318), 'numpy.unique', 'np.unique', (['df[target]'], {}), '(df[target])\n', (7306, 7318), True, 'import numpy as np\n'), ((7898, 7928), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7912, 7928), False, 'from sklearn.metrics import accuracy_score\n'), ((7974, 7995), 'sklearn.metrics.precision_recall_fscore_support', 'score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7979, 7995), True, 'from sklearn.metrics import precision_recall_fscore_support as score\n'), ((8444, 8476), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (8460, 8476), False, 'from sklearn.metrics import confusion_matrix\n'), ((7786, 7801), 'IPython.display.HTML', 'HTML', (['disp_line'], {}), '(disp_line)\n', (7790, 7801), False, 'from IPython.display import HTML\n'), ((7858, 7873), 'IPython.display.HTML', 'HTML', (['disp_line'], {}), '(disp_line)\n', (7862, 7873), False, 'from IPython.display import HTML\n'), ((8748, 8782), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (8760, 8782), True, 'import matplotlib.pyplot as plt\n'), ((8797, 8842), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'labelsize': '(size + 8)'}), "(axis='x', labelsize=size + 8)\n", (8812, 8842), True, 'import matplotlib.pyplot as plt\n'), ((8859, 8904), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'labelsize': '(size + 8)'}), "(axis='y', labelsize=size + 8)\n", (8874, 8904), True, 'import matplotlib.pyplot as plt\n'), ((8921, 8946), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(size + 8)'}), '(size=size + 8)\n', (8931, 8946), True, 'import matplotlib.pyplot as plt\n'), ((8961, 8986), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(size + 8)'}), '(size=size + 8)\n', (8971, 8986), True, 'import matplotlib.pyplot as plt\n'), ((9001, 9015), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (9011, 9015), True, 'import matplotlib.pyplot as plt\n'), ((9548, 9622), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Importance"""', 'y': 'top_importances.index', 'data': 'top_importances'}), "(x='Importance', y=top_importances.index, data=top_importances)\n", (9559, 9622), True, 'import seaborn as sns\n'), ((9654, 9664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9662, 9664), True, 'import matplotlib.pyplot as plt\n'), ((8192, 8213), 'pandas.DataFrame', 'pd.DataFrame', (['classes'], {}), '(classes)\n', (8204, 8213), True, 'import pandas as pd\n'), ((9164, 9195), 'numpy.zeros', 'np.zeros', (['(X_train.shape[1], 1)'], {}), '((X_train.shape[1], 1))\n', (9172, 9195), True, 'import numpy as np\n'), ((7042, 7058), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7056, 7058), False, 'from sklearn.preprocessing import StandardScaler\n')] |
"""
StateIO
Container to store state object.
Several useful functions are implemented in this class:
1. Saving intermediate results to files.
2. Recover workspace at any iteration (label set and unlabel set).
3. Recover workspace from the intermediate result file in case the program exits unexpectedly.
4. Gathering and checking the information stored in State object.
5. Print active learning progress: current_iteration, current_mean_performance, current_cost, etc.
"""
# Authors: <NAME>
# License: BSD 3 clause
from __future__ import division
import collections.abc
import copy
import os
import pickle
import sys
import numpy as np
import prettytable as pt
from .state import State
from ..index import IndexCollection, MultiLabelIndexCollection
from ..index.multi_label_tools import check_index_multilabel
from ..utils.interface import BaseCollection
__all__ = ['StateIO',
]
class StateIO:
"""
A class to store states.
Functions including:
1. Saving intermediate results to files.
2. Recover workspace at any iteration (label set and unlabel set).
3. Recover workspace from the intermediate result file in case the program exits unexpectedly.
4. Gathering and checking the information stored in State object.
5. Print active learning progress: current_iteration, current_mean_performance, current_cost, etc.
Parameters
----------
round: int
Number of k-fold experiments loop. 0 <= round < k
train_idx: array_like
Training index of one fold experiment.
test_idx: array_like
Testing index of one fold experiment.
init_L: array_like
Initial labeled index of one fold experiment.
init_U: array_like
Initial unlabeled index of one fold experiment.
initial_point: object, optional (default=None)
The performance before any querying.
If not specify, the initial point of different methods will be different.
saving_path: str, optional (default='.')
Path to save the intermediate files. If None is given, it will
not save the intermediate result.
check_flag: bool, optional (default=True)
Whether to check the validity of states.
verbose: bool, optional (default=True)
Whether to print query information during the AL process.
print_interval: int optional (default=1)
How many queries will trigger a print when verbose is True.
"""
def __init__(self, round, train_idx, test_idx, init_L, init_U, initial_point=None, saving_path=None,
check_flag=True, verbose=True, print_interval=1):
assert (isinstance(check_flag, bool))
assert (isinstance(verbose, bool))
self.__check_flag = check_flag
self.__verbose = verbose
self.__print_interval = print_interval
if self.__check_flag:
# check validity
assert (isinstance(train_idx, collections.Iterable))
assert (isinstance(test_idx, collections.Iterable))
assert (isinstance(init_U, collections.Iterable))
assert (isinstance(init_L, collections.Iterable))
assert (isinstance(round, int) and round >= 0)
self.round = round
self.train_idx = copy.copy(train_idx)
self.test_idx = copy.copy(test_idx)
if isinstance(init_U, BaseCollection) and isinstance(init_L, BaseCollection):
self.init_U = copy.deepcopy(init_U)
self.init_L = copy.deepcopy(init_L)
else:
try:
check_index_multilabel(init_L)
check_index_multilabel(init_U)
self.init_U = copy.deepcopy(MultiLabelIndexCollection(init_U))
self.init_L = copy.deepcopy(MultiLabelIndexCollection(init_L))
except TypeError:
self.init_U = copy.deepcopy(IndexCollection(init_U))
self.init_L = copy.deepcopy(IndexCollection(init_L))
# self.init_U = copy.deepcopy(IndexCollection(init_U) if not isinstance(init_U, BaseCollection) else init_U)
# self.init_L = copy.deepcopy(IndexCollection(init_L) if not isinstance(init_L, BaseCollection) else init_L)
self.initial_point = initial_point
self.batch_size = 0
self.__state_list = []
self._first_print = True
self.cost_inall = 0
self._numqdata = 0
self._saving_file_name = 'AL_round_' + str(self.round) + '.pkl'
self._saving_dir = None
if saving_path is not None:
if not isinstance(saving_path, str):
raise TypeError("A string is expected, but received: %s" % str(type(saving_path)))
saving_path = os.path.abspath(saving_path)
if os.path.isdir(saving_path):
self._saving_dir = saving_path
else:
self._saving_dir, self._saving_file_name = os.path.split(saving_path)
@classmethod
def load(cls, path):
"""Load StateIO object from file.
Parameters
----------
path: str
The path should be a specific .pkl file.
Returns
-------
object: StateIO
The StateIO object in the file.
"""
f = open(os.path.abspath(path), 'rb')
saver_from_file = pickle.load(f)
f.close()
return saver_from_file
def set_initial_point(self, perf):
"""The initial point of performance before querying.
Parameters
----------
perf: float
The performance value.
"""
self.initial_point = perf
def save(self):
"""Saving intermediate results to file."""
if self._saving_dir is None:
return
f = open(os.path.join(self._saving_dir, self._saving_file_name), 'wb')
pickle.dump(self, f)
f.close()
def add_state(self, state):
"""Add a State object to the container.
Parameters
----------
state: {dict, State}
State object to be added. Or a dictionary with
the following keys: ['select_index', 'queried_info', 'performance']
"""
if not isinstance(state, State):
assert isinstance(state, dict), "state must be dict or State object."
assert 'select_index' in state and 'queried_info' in state and 'performance' in state, "The dict must contain the following keys: ['select_index', 'queried_info', 'performance']"
self.__state_list.append(copy.deepcopy(state))
self.__update_info()
if self.__verbose and len(self) % self.__print_interval == 0:
if self._first_print:
print('\n' + self.__repr__(), end='')
self._first_print = False
else:
print('\r' + self._refresh_dataline(), end='')
sys.stdout.flush()
def get_state(self, index):
"""Get a State object in the container.
Parameters
----------
index: int
The index of the State object. 0 <= index < len(self)
Returns
-------
st: State
The State object in the previous iteration.
"""
assert (0 <= index < len(self))
return copy.deepcopy(self.__state_list[index])
def check_batch_size(self):
"""Check if all queries have the same batch size.
Returns
-------
result: bool
Whether all the states have the same batch size.
"""
ind_uni = np.unique(
[self.__state_list[i].batch_size for i in range(len(self.__state_list) - 1)], axis=0)
if len(ind_uni) == 1:
self.batch_size = ind_uni[0]
return True
else:
return False
def pop(self, i=None):
"""remove and return item at index (default last)."""
return self.__state_list.pop(i)
def recover_workspace(self, iteration=None):
"""Recover workspace after $iteration$ querying.
For example, if 0 is given, the initial workspace without any querying will be recovered.
Note that, the object itself will be recovered, the information after the iteration will be discarded.
Parameters
----------
iteration: int, optional(default=None)
Number of iteration to recover, start from 0.
If nothing given, it will return the current workspace.
Returns
-------
train_idx: list
Index of training set, shape like [n_training_samples]
test_idx: list
Index of testing set, shape like [n_testing_samples]
label_idx: list
Index of labeling set, shape like [n_labeling_samples]
unlabel_idx: list
Index of unlabeling set, shape like [n_unlabeling_samples]
"""
if iteration is None:
iteration = len(self.__state_list)
assert (0 <= iteration <= len(self))
work_U = copy.deepcopy(self.init_U)
work_L = copy.deepcopy(self.init_L)
for i in range(iteration):
state = self.__state_list[i]
work_U.difference_update(state.get_value('select_index'))
work_L.update(state.get_value('select_index'))
self.__state_list = self.__state_list[0:iteration]
return copy.copy(self.train_idx), copy.copy(self.test_idx), copy.deepcopy(work_L), copy.deepcopy(work_U)
def get_workspace(self, iteration=None):
"""Get workspace after $iteration$ querying.
For example, if 0 is given, the initial workspace without any querying will be recovered.
Parameters
----------
iteration: int, optional(default=None)
Number of iteration, start from 0.
If nothing given, it will get the current workspace.
Returns
-------
train_idx: list
Index of training set, shape like [n_training_samples]
test_idx: list
Index of testing set, shape like [n_testing_samples]
label_idx: list
Index of labeling set, shape like [n_labeling_samples]
unlabel_idx: list
Index of unlabeling set, shape like [n_unlabeling_samples]
"""
if iteration is None:
iteration = len(self.__state_list)
assert (0 <= iteration <= len(self))
work_U = copy.deepcopy(self.init_U)
work_L = copy.deepcopy(self.init_L)
for i in range(iteration):
state = self.__state_list[i]
work_U.difference_update(state.get_value('select_index'))
work_L.update(state.get_value('select_index'))
return copy.copy(self.train_idx), copy.copy(self.test_idx), copy.deepcopy(work_L), copy.deepcopy(work_U)
def num_of_query(self):
"""Return the number of queries"""
return len(self.__state_list)
def get_current_performance(self):
"""Return the mean ± std performance of all existed states.
Only available when the performance of each state is a single float value.
Returns
-------
mean: float
Mean performance of the existing states.
std: float
Std performance of the existing states.
"""
if len(self) == 0:
return 0, 0
else:
tmp = [self[i].get_value('performance') for i in range(self.__len__())]
if isinstance(tmp[0], collections.Iterable):
return np.NaN, np.NaN
else:
return np.mean(tmp), np.std(tmp)
def __len__(self):
return len(self.__state_list)
def __getitem__(self, item):
return self.__state_list.__getitem__(item)
def __contains__(self, other):
return other in self.__state_list
def __iter__(self):
return iter(self.__state_list)
def refresh_info(self):
"""re-calculate current active learning progress."""
numqdata = 0
cost = 0.0
for state in self.__state_list:
numqdata += len(state.get_value('select_index'))
if 'cost' in state.keys():
cost += np.sum(state.get_value('cost'))
self.cost_inall = cost
self._numqdata = numqdata
return numqdata, cost
def __update_info(self):
"""Update current active learning progress"""
state = self.__state_list[len(self) - 1]
if 'cost' in state.keys():
self.cost_inall += np.sum(state.get_value('cost'))
self._numqdata += len(state.get_value('select_index'))
def __repr__(self):
numqdata = self._numqdata
cost = self.cost_inall
tb = pt.PrettyTable()
tb.set_style(pt.MSWORD_FRIENDLY)
tb.add_column('round', [self.round])
tb.add_column('initially labeled data', [
" %d (%.2f%% of all)" % (len(self.init_L), 100 * len(self.init_L) / (len(self.init_L) + len(self.init_U)))])
tb.add_column('number of queries', [len(self.__state_list)])
# tb.add_column('queried data', ["%d (%.2f%% of unlabeled data)" % (numqdata, self.queried_percentage)])
tb.add_column('cost', [cost])
# tb.add_column('saving path', [self._saving_dir])
tb.add_column('Performance:', ["%.3f ± %.2f" % self.get_current_performance()])
return str(tb)
def _refresh_dataline(self):
tb = self.__repr__()
return tb.splitlines()[1]
# class StateIO_all_labels(StateIO):
# """StateIO for all _labels querying"""
# def add_state(self, state):
# assert (isinstance(state, experiment_saver.state.State))
# self.__state_list.append(copy.deepcopy(state))
# if self.__check_flag:
# res, err_st, err_ind = self.check_select_index()
# if res == -1:
# warnings.warn(
# 'Checking validity fails, there is a queried instance not in set_U in '
# 'State:%d, index:%s.' % (err_st, str(err_ind)),
# category=ValidityWarning)
# if res == -2:
# warnings.warn('Checking validity fails, there are instances already queried '
# 'in previous iteration in State:%d, index:%s.' % (err_st, str(err_ind)),
# category=ValidityWarning)
# self.__update_info()
#
#
# if self.__verbose and len(self) % self.__print_interval == 0:
# if self._first_print:
# print('\n' + self.__repr__(), end='')
# self._first_print = False
# else:
# print('\r' + self._refresh_dataline(), end='')
# sys.stdout.flush()
#
# def check_select_index(self):
# """
# check:
# - Q has no repeating elements
# - Q in U
# Returns
# -------
# result: int
# check result
# - if -1 is returned, there is a queried instance not in U
# - if -2 is returned, there are repeated instances in Q
# - if 1 is returned, CHECK OK
#
# state_index: int
# the state index when checking fails (start from 0)
# if CHECK OK, None is returned.
#
# select_index: object
# the select_index when checking fails.
# if CHECK OK, None is returned.
# """
# repeat_dict = dict()
# ind = -1
# for st in self.__state_list:
# ind += 1
# for instance in st.get_value('select_index'):
# if instance not in self.init_U:
# return -1, ind, instance
# if instance not in repeat_dict.keys():
# repeat_dict[instance] = 1
# else:
# return -2, ind, instance
# return 1, None, None
#
# @property
# def queried_percentage(self):
# """return the queried percentage of unlabeled data"""
# return 100 * self._numqdata / len(self.init_U)
| [
"copy.deepcopy",
"pickle.dump",
"os.path.abspath",
"os.path.isdir",
"numpy.std",
"copy.copy",
"pickle.load",
"sys.stdout.flush",
"prettytable.PrettyTable",
"numpy.mean",
"os.path.split",
"os.path.join"
] | [((3241, 3261), 'copy.copy', 'copy.copy', (['train_idx'], {}), '(train_idx)\n', (3250, 3261), False, 'import copy\n'), ((3286, 3305), 'copy.copy', 'copy.copy', (['test_idx'], {}), '(test_idx)\n', (3295, 3305), False, 'import copy\n'), ((5280, 5294), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5291, 5294), False, 'import pickle\n'), ((5800, 5820), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (5811, 5820), False, 'import pickle\n'), ((7233, 7272), 'copy.deepcopy', 'copy.deepcopy', (['self.__state_list[index]'], {}), '(self.__state_list[index])\n', (7246, 7272), False, 'import copy\n'), ((8964, 8990), 'copy.deepcopy', 'copy.deepcopy', (['self.init_U'], {}), '(self.init_U)\n', (8977, 8990), False, 'import copy\n'), ((9008, 9034), 'copy.deepcopy', 'copy.deepcopy', (['self.init_L'], {}), '(self.init_L)\n', (9021, 9034), False, 'import copy\n'), ((10361, 10387), 'copy.deepcopy', 'copy.deepcopy', (['self.init_U'], {}), '(self.init_U)\n', (10374, 10387), False, 'import copy\n'), ((10405, 10431), 'copy.deepcopy', 'copy.deepcopy', (['self.init_L'], {}), '(self.init_L)\n', (10418, 10431), False, 'import copy\n'), ((12660, 12676), 'prettytable.PrettyTable', 'pt.PrettyTable', ([], {}), '()\n', (12674, 12676), True, 'import prettytable as pt\n'), ((3418, 3439), 'copy.deepcopy', 'copy.deepcopy', (['init_U'], {}), '(init_U)\n', (3431, 3439), False, 'import copy\n'), ((3466, 3487), 'copy.deepcopy', 'copy.deepcopy', (['init_L'], {}), '(init_L)\n', (3479, 3487), False, 'import copy\n'), ((4677, 4705), 'os.path.abspath', 'os.path.abspath', (['saving_path'], {}), '(saving_path)\n', (4692, 4705), False, 'import os\n'), ((4721, 4747), 'os.path.isdir', 'os.path.isdir', (['saving_path'], {}), '(saving_path)\n', (4734, 4747), False, 'import os\n'), ((5225, 5246), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (5240, 5246), False, 'import os\n'), ((5730, 5784), 'os.path.join', 'os.path.join', (['self._saving_dir', 'self._saving_file_name'], {}), '(self._saving_dir, self._saving_file_name)\n', (5742, 5784), False, 'import os\n'), ((6486, 6506), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (6499, 6506), False, 'import copy\n'), ((9314, 9339), 'copy.copy', 'copy.copy', (['self.train_idx'], {}), '(self.train_idx)\n', (9323, 9339), False, 'import copy\n'), ((9341, 9365), 'copy.copy', 'copy.copy', (['self.test_idx'], {}), '(self.test_idx)\n', (9350, 9365), False, 'import copy\n'), ((9367, 9388), 'copy.deepcopy', 'copy.deepcopy', (['work_L'], {}), '(work_L)\n', (9380, 9388), False, 'import copy\n'), ((9390, 9411), 'copy.deepcopy', 'copy.deepcopy', (['work_U'], {}), '(work_U)\n', (9403, 9411), False, 'import copy\n'), ((10652, 10677), 'copy.copy', 'copy.copy', (['self.train_idx'], {}), '(self.train_idx)\n', (10661, 10677), False, 'import copy\n'), ((10679, 10703), 'copy.copy', 'copy.copy', (['self.test_idx'], {}), '(self.test_idx)\n', (10688, 10703), False, 'import copy\n'), ((10705, 10726), 'copy.deepcopy', 'copy.deepcopy', (['work_L'], {}), '(work_L)\n', (10718, 10726), False, 'import copy\n'), ((10728, 10749), 'copy.deepcopy', 'copy.deepcopy', (['work_U'], {}), '(work_U)\n', (10741, 10749), False, 'import copy\n'), ((4873, 4899), 'os.path.split', 'os.path.split', (['saving_path'], {}), '(saving_path)\n', (4886, 4899), False, 'import os\n'), ((6835, 6853), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6851, 6853), False, 'import sys\n'), ((11527, 11539), 'numpy.mean', 'np.mean', (['tmp'], {}), '(tmp)\n', (11534, 11539), True, 'import numpy as np\n'), ((11541, 11552), 'numpy.std', 'np.std', (['tmp'], {}), '(tmp)\n', (11547, 11552), True, 'import numpy as np\n')] |
"""
Missile Defence Game
Cannon drawing and simulation module.
Copyright (C) 2011-2012 <NAME>.
See LICENSE (GNU GPL version 3 or later).
"""
from numpy import array
import pygame
from maths import normalize
import math
import projectiles
class CounterMissile(projectiles.Missile):
def __init__(self, centre, target):
super(CounterMissile, self).__init__(centre, target, 3.6)
self.size_increase_remaining = 30
self.draw_radius = 2
self.trail_length = 8
self.trail_radius = 4
self.blast_colour_a = (150, 180, 255)
self.blast_colour_b = (255, 0, 0)
self.radius = 0
self.invulnerable_ticks = 6
self.cannon_fire = 1
def draw_marker(self, screen):
rad = 2
pygame.draw.line(
screen,
(255, 255, 255),
(int(self.target[0]) - rad, int(self.target[1]) - rad),
(int(self.target[0]) + rad, int(self.target[1]) + rad)
)
pygame.draw.line(
screen,
(255, 255, 255),
(int(self.target[0]) + rad, int(self.target[1]) - rad),
(int(self.target[0]) - rad, int(self.target[1]) + rad)
)
class MissileLauncher(object):
def __init__(self, centre, game):
self.target = array([100, -100])
self.centre = array(centre)
self.length = 30
self.game = game
self.ticks_since_firing = 0
self.destroyed = False
def apply_physics(self):
self.ticks_since_firing += 1
# must have support
if not self.destroyed:
if not self.game.buildings.get(self.centre[0], self.centre[1]):
self.destroyed = True
explosion = projectiles.Missile(self.centre, (0, 0), 0.)
explosion.exploding = True
explosion.blast_colour_a = (100, 200, 150)
explosion.blast_colour_b = (20, 50, 20)
explosion.blast_radius = self.length
explosion.blast_ticks = 30
self.game.projectiles.append(explosion)
def draw(self, surface):
if not self.destroyed:
launcher_size = 3
pygame.draw.line(surface,
(100,200,100),
(self.centre[0] - launcher_size, self.centre[1]),
(self.centre[0] + launcher_size, self.centre[1]),
4)
def can_fire(self):
return self.ticks_since_firing > 8 and not self.destroyed
def create_missile(self, target):
new_missile = CounterMissile(self.centre, target)
self.game.projectiles.append(new_missile)
def fire(self, target):
if self.can_fire():
self.target = target
self.ticks_since_firing = 0
self.create_missile(target)
| [
"projectiles.Missile",
"pygame.draw.line",
"numpy.array"
] | [((1353, 1371), 'numpy.array', 'array', (['[100, -100]'], {}), '([100, -100])\n', (1358, 1371), False, 'from numpy import array\n'), ((1394, 1407), 'numpy.array', 'array', (['centre'], {}), '(centre)\n', (1399, 1407), False, 'from numpy import array\n'), ((2318, 2467), 'pygame.draw.line', 'pygame.draw.line', (['surface', '(100, 200, 100)', '(self.centre[0] - launcher_size, self.centre[1])', '(self.centre[0] + launcher_size, self.centre[1])', '(4)'], {}), '(surface, (100, 200, 100), (self.centre[0] - launcher_size,\n self.centre[1]), (self.centre[0] + launcher_size, self.centre[1]), 4)\n', (2334, 2467), False, 'import pygame\n'), ((1838, 1883), 'projectiles.Missile', 'projectiles.Missile', (['self.centre', '(0, 0)', '(0.0)'], {}), '(self.centre, (0, 0), 0.0)\n', (1857, 1883), False, 'import projectiles\n')] |
import os
import torch
import torchvision
import torch.nn as nn
from PIL import Image
from scipy import stats
import random
import numpy as np
import time
import scipy.io
from torch.optim import lr_scheduler
import torch.nn.functional as F
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class DBCNN(torch.nn.Module):
def __init__(self, options):
"""Declare all needed layers."""
nn.Module.__init__(self)
self.features1 = torchvision.models.resnet50(pretrained=True)
# Global pooling
self.pooling = nn.AdaptiveAvgPool2d(1)
# Linear classifier.
self.fc_D = torch.nn.Linear(2048, 25)
# Linear classifier.
self.fc_DL = torch.nn.Linear(2048, 1)
# Linear regression.
self.fc_Q = torch.nn.Linear(2048, 1)
if options['fc'] == True:
# Freeze all previous layers.
for param in self.features1.parameters():
param.requires_grad = False
# Freeze all studied FC layers.
for param in self.fc_D.parameters():
param.requires_grad = False
for param in self.fc_DL.parameters():
param.requires_grad = False
# Initial
nn.init.kaiming_normal_(self.fc_Q.weight.data)
if self.fc_Q.bias is not None:
nn.init.constant_(self.fc_Q.bias.data, val=0)
def forward(self, X):
"""Forward pass of the network.
"""
N = X.size()[0]
X1 = self.features1.conv1(X)
X1 = self.features1.bn1(X1)
X1 = self.features1.relu(X1)
X1 = self.features1.maxpool(X1)
X1 = self.features1.layer1(X1)
X1 = self.features1.layer2(X1)
X1 = self.features1.layer3(X1)
X1 = self.features1.layer4(X1)
H = X1.size()[2]
W = X1.size()[3]
assert X1.size()[1] == 2048
X1 = self.pooling(X1)
assert X1.size() == (N, 2048, 1, 1)
X1 = X1.view(N, 2048)
predict_D = self.fc_D(X1)
predict_DL = self.fc_DL(X1)
predict_Q = self.fc_Q(X1)
assert predict_D.size() == (N, 25)
assert predict_DL.size() == (N, 1)
assert predict_Q.size() == (N, 1)
return predict_D, predict_DL, predict_Q
class DBCNNManager(object):
def __init__(self, options, path):
"""Prepare the network, criterion, solver, and data.
Args:
options, dict: Hyperparameters.
"""
print('Prepare the network and data.')
self._options = options
self._path = path
# Network.
self._net = torch.nn.DataParallel(DBCNN(self._options), device_ids=[0]).cuda()
if self._options['fc'] == True:
self._net.load_state_dict(torch.load(path['pretrainkadis700k_root']))
if self._options['fc'] == False:
self._net.load_state_dict(torch.load(path['fc_root']))
print(self._net)
# Criterion.
self._criterion_D = torch.nn.CrossEntropyLoss().cuda()
self._criterion_DL = self.loss_m
self._criterion_Q = torch.nn.MSELoss().cuda()
# Solver.
if self._options['fc'] == True:
self._solver = torch.optim.SGD(
self._net.module.fc_Q.parameters(), lr=self._options['base_lr'],
momentum=0.9, weight_decay=self._options['weight_decay'])
else:
self._solver = torch.optim.Adam(
self._net.module.parameters(), lr=self._options['base_lr'],
weight_decay=self._options['weight_decay'])
if self._options['dataset'] == 'kadid10k':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
if self._options['dataset'] == 'kadid10k':
import Kadid10kFolder_DistortionNet_Finetune
train_data = Kadid10kFolder_DistortionNet_Finetune.Kadid10kFolder_DistortionNet_Finetune(
root=self._path['kadid10k'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = Kadid10kFolder_DistortionNet_Finetune.Kadid10kFolder_DistortionNet_Finetune(
root=self._path['kadid10k'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
else:
raise AttributeError('Only support KADID-10k right now!')
self._train_loader = torch.utils.data.DataLoader(
train_data, batch_size=self._options['batch_size'],
shuffle=True, num_workers=0, pin_memory=True)
self._test_loader = torch.utils.data.DataLoader(
test_data, batch_size=1,
shuffle=False, num_workers=0, pin_memory=True)
self.scheduler = lr_scheduler.StepLR(self._solver,
last_epoch=-1,
step_size=10,
gamma=0.1)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(self):
"""Train the network."""
print('Training.')
best_srcc = 0.0
best_plcc = 0.0
best_acc = 0.0
print('Epoch\tLr\tTotal loss\tCross loss\tHinge loss\tMSE loss\tTrain_ACC\tTest_ACC\tTrain_SRCC\tTest_SRCC\tTest_PLCC')
for t in range(self._options['epochs']):
self._net.train(True) # Set the model to training phase
time_start = time.time()
epoch_loss = []
pscores = []
tscores = []
num_total = 0.0
num_correct = 0.0
cross_loss = []
hinge_loss = []
mse_loss = []
for sample in self._train_loader:
# Data.
I1, I2, I3, I4, I5, I1_D, I2_D, I3_D, I4_D, I5_D, I1_DL, I2_DL, I3_DL, I4_DL, I5_DL, I1_M, I2_M, I3_M, I4_M, I5_M = \
sample['I1'], sample['I2'], sample['I3'], sample['I4'], sample['I5'],\
sample['I1_D'], sample['I2_D'], sample['I3_D'], sample['I4_D'], sample['I5_D'],\
sample['I1_DL'], sample['I2_DL'], sample['I3_DL'], sample['I4_DL'], sample['I5_DL'],\
sample['I1_M'], sample['I2_M'], sample['I3_M'], sample['I4_M'], sample['I5_M']
I1 = I1.to(self.device)
I2 = I2.to(self.device)
I3 = I3.to(self.device)
I4 = I4.to(self.device)
I5 = I5.to(self.device)
I1_D = I1_D.to(self.device)
I2_D = I2_D.to(self.device)
I3_D = I3_D.to(self.device)
I4_D = I4_D.to(self.device)
I5_D = I5_D.to(self.device)
I1_DL = I1_DL.to(self.device)
I2_DL = I2_DL.to(self.device)
I3_DL = I3_DL.to(self.device)
I4_DL = I4_DL.to(self.device)
I5_DL = I5_DL.to(self.device)
I1_M = I1_M.to(self.device)
I2_M = I2_M.to(self.device)
I3_M = I3_M.to(self.device)
I4_M = I4_M.to(self.device)
I5_M = I5_M.to(self.device)
# Clear the existing gradients.
self._solver.zero_grad()
# Forward pass.
I = torch.Tensor().to(self.device)
y_D = torch.Tensor().to(self.device).long()
y_DL = torch.Tensor().to(self.device)
y_M = torch.Tensor().to(self.device)
for i in range(0, len(I1)):
I = torch.cat((I, I1[i].unsqueeze(0), I2[i].unsqueeze(0), I3[i].unsqueeze(0),
I4[i].unsqueeze(0), I5[i].unsqueeze(0)))
y_D = torch.cat((y_D, I1_D[i].unsqueeze(0), I2_D[i].unsqueeze(0), I3_D[i].unsqueeze(0),
I4_D[i].unsqueeze(0), I5_D[i].unsqueeze(0)))
y_DL = torch.cat((y_DL, I1_DL[i].unsqueeze(0), I2_DL[i].unsqueeze(0), I3_DL[i].unsqueeze(0),
I4_DL[i].unsqueeze(0), I5_DL[i].unsqueeze(0)))
y_M = torch.cat((y_M, I1_M[i].unsqueeze(0), I2_M[i].unsqueeze(0), I3_M[i].unsqueeze(0),
I4_M[i].unsqueeze(0), I5_M[i].unsqueeze(0)))
predict_D, predict_DL, predict_Q = self._net(I)
pscores = pscores + predict_Q.cpu().tolist()
tscores = tscores + y_M.cpu().tolist()
loss_D = self._criterion_D(predict_D, y_D.detach())
loss_DL = self._criterion_DL(predict_DL, y_DL.unsqueeze(1).detach())
loss_Q = self._criterion_Q(predict_Q, y_M.unsqueeze(1).detach())
loss = loss_D + 0.1*loss_DL + loss_Q
epoch_loss.append(loss.item())
cross_loss.append(loss_D.item())
hinge_loss.append(loss_DL.item())
mse_loss.append(loss_Q.item())
_, prediction = torch.max(F.softmax(predict_D.data, dim=1), 1)
num_total += y_D.size(0)
num_correct += torch.sum(prediction == y_D)
# Backward pass.
loss.backward()
self._solver.step()
self._net.eval()
train_acc = 100 * num_correct.float() / num_total
test_acc = self._accuracy(self._test_loader)
train_srcc, _ = stats.spearmanr(pscores, tscores)
test_srcc, test_plcc = self._consitency(self._test_loader)
time_end = time.time()
print('%d epoch done; total time = %f sec' % ((t + 1), (time_end - time_start)))
if test_srcc > best_srcc:
best_srcc = test_srcc
best_plcc = test_plcc
print('*', end='')
pwd = os.getcwd()
if self._options['fc'] == True:
modelpath = os.path.join(pwd, 'fc_models', ('net_params' + '_best_srcc' + '.pkl'))
else:
modelpath = os.path.join(pwd, 'db_models', ('net_params' + '_best_srcc' + '.pkl'))
torch.save(self._net.state_dict(), modelpath)
if test_acc > best_acc:
best_acc = test_acc
print('*', end='')
pwd = os.getcwd()
if self._options['fc'] == True:
modelpath = os.path.join(pwd, 'fc_models', ('net_params' + '_best_acc' + '.pkl'))
else:
modelpath = os.path.join(pwd, 'db_models', ('net_params' + '_best_acc' + '.pkl'))
torch.save(self._net.state_dict(), modelpath)
if (t+1) == self._options['epochs']:
pwd = os.getcwd()
if self._options['fc'] == True:
modelpath = os.path.join(pwd, 'fc_models', ('net_params' + '_latest' + '.pkl'))
else:
modelpath = os.path.join(pwd, 'db_models', ('net_params' + '_latest' + '.pkl'))
torch.save(self._net.state_dict(), modelpath)
print('%d\t\t%4.10f\t\t%4.3f\t\t%4.3f\t\t%4.3f\t\t%4.3f\t\t%4.4f\t\t%4.4f\t\t%4.4f\t\t%4.4f\t\t%4.4f' %
(t + 1, self._solver.param_groups[0]['lr'], sum(epoch_loss) / len(epoch_loss), sum(cross_loss) / len(cross_loss),
sum(hinge_loss) / len(hinge_loss), sum(mse_loss) / len(mse_loss), train_acc, test_acc, train_srcc, test_srcc, test_plcc))
self.scheduler.step()
# if self._options['fc'] != True:
# self.scheduler.step()
return best_srcc, best_plcc
def _consitency(self, data_loader):
pscores = []
tscores = []
for sample in data_loader:
# Data.
I1, I2, I3, I4, I5, I1_M, I2_M, I3_M, I4_M, I5_M = \
sample['I1'], sample['I2'], sample['I3'], sample['I4'], sample['I5'], \
sample['I1_M'], sample['I2_M'], sample['I3_M'], sample['I4_M'], sample['I5_M']
I1 = I1.to(self.device)
I2 = I2.to(self.device)
I3 = I3.to(self.device)
I4 = I4.to(self.device)
I5 = I5.to(self.device)
I1_M = I1_M.to(self.device)
I2_M = I2_M.to(self.device)
I3_M = I3_M.to(self.device)
I4_M = I4_M.to(self.device)
I5_M = I5_M.to(self.device)
# Prediction.
I = torch.Tensor().to(self.device)
y_M = torch.Tensor().to(self.device)
for i in range(0, len(I1)):
I = torch.cat((I, I1[i].unsqueeze(0), I2[i].unsqueeze(0), I3[i].unsqueeze(0),
I4[i].unsqueeze(0), I5[i].unsqueeze(0)))
y_M = torch.cat((y_M, I1_M[i].unsqueeze(0), I2_M[i].unsqueeze(0), I3_M[i].unsqueeze(0),
I4_M[i].unsqueeze(0), I5_M[i].unsqueeze(0)))
predict_D, predict_DL, predict_Q = self._net(I)
pscores = pscores + predict_Q[:, 0].cpu().tolist()
tscores = tscores + y_M.cpu().tolist()
test_srcc, _ = stats.spearmanr(pscores, tscores)
tscores = torch.Tensor(tscores).reshape(-1).tolist()
test_plcc, _ = stats.pearsonr(pscores, tscores)
return test_srcc, test_plcc
def _accuracy(self, data_loader):
"""Compute the train/test accuracy.
Args:
data_loader: Train/Test DataLoader.
Returns:
Train/Test accuracy in percentage.
"""
num_correct = 0.0
num_total = 0.0
for sample in data_loader:
# Data.
I1, I2, I3, I4, I5, I1_D, I2_D, I3_D, I4_D, I5_D = \
sample['I1'], sample['I2'], sample['I3'], sample['I4'], sample['I5'], \
sample['I1_D'], sample['I2_D'], sample['I3_D'], sample['I4_D'], sample['I5_D']
I1 = I1.to(self.device)
I2 = I2.to(self.device)
I3 = I3.to(self.device)
I4 = I4.to(self.device)
I5 = I5.to(self.device)
I1_D = I1_D.to(self.device)
I2_D = I2_D.to(self.device)
I3_D = I3_D.to(self.device)
I4_D = I4_D.to(self.device)
I5_D = I5_D.to(self.device)
# Prediction.
I = torch.Tensor().to(self.device)
y_D = torch.Tensor().to(self.device).long()
for i in range(0, len(I1)):
I = torch.cat((I, I1[i].unsqueeze(0), I2[i].unsqueeze(0), I3[i].unsqueeze(0),
I4[i].unsqueeze(0), I5[i].unsqueeze(0)))
y_D = torch.cat((y_D, I1_D[i].unsqueeze(0), I2_D[i].unsqueeze(0), I3_D[i].unsqueeze(0),
I4_D[i].unsqueeze(0), I5_D[i].unsqueeze(0)))
predict_D, predict_DL, predict_Q = self._net(I)
_, prediction = torch.max(predict_D.data, 1)
num_total += y_D.size(0)
num_correct += torch.sum(prediction == y_D.data)
return 100 * num_correct.float() / num_total
def loss_m(self, y_pred, y):
"""prediction monotonicity related loss"""
assert y_pred.size(0) > 1
loss = torch.Tensor().to(self.device)
#for i in range(0, self._options['batch_size']):
for i in range(0, (y_pred.size(0) // 5)):
y_pred_one = y_pred[i*5:(i+1)*5, :]
y_one = y[i*5:(i+1)*5, :]
tmp = F.relu((y_pred_one - (y_pred_one+10).t()) * torch.sign((y_one.t() - y_one)))
loss = torch.cat((loss, tmp.unsqueeze(0)), 0)
return torch.mean(loss.view(-1, 1))
def main():
"""The main function."""
import argparse
parser = argparse.ArgumentParser(
description='Train DB-CNN for BIQA.')
parser.add_argument("--seed", type=int, default=19901116)
parser.add_argument('--base_lr', dest='base_lr', type=float, default=1e-6,
help='Base learning rate for training.')
parser.add_argument('--batch_size', dest='batch_size', type=int,
default=8, help='Batch size.')
parser.add_argument('--epochs', dest='epochs', type=int,
default=20, help='Epochs for training.')
parser.add_argument('--weight_decay', dest='weight_decay', type=float,
default=5e-4, help='Weight decay.')
parser.add_argument('--dataset', dest='dataset', type=str, default='kadid10k',
help='dataset: kadid10k')
args = parser.parse_args()
if args.base_lr <= 0:
raise AttributeError('--base_lr parameter must >0.')
if args.batch_size <= 0:
raise AttributeError('--batch_size parameter must >0.')
if args.epochs < 0:
raise AttributeError('--epochs parameter must >=0.')
if args.weight_decay <= 0:
raise AttributeError('--weight_decay parameter must >0.')
options = {
'base_lr': args.base_lr,
'batch_size': args.batch_size,
'epochs': args.epochs,
'weight_decay': args.weight_decay,
'dataset': args.dataset,
'fc': [],
'train_index': [],
'test_index': []
}
path = {
'kadid10k': os.path.join('dataset', '/mnt/sda2/New/kadid10k'),
'pretrainkadis700k_root': os.path.join('db_models/kadis700k', 'net_params_best_srcc.pkl'),
'fc_model': os.path.join('fc_models'),
'fc_root': os.path.join('fc_models', 'net_params_latest.pkl'),
'db_model': os.path.join('db_models'),
'db_root': os.path.join('db_models', 'net_params_latest.pkl')
}
if options['dataset'] == 'kadid10k':
index = list(range(0, 81)) # 81
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
random.seed(args.seed)
lr_backup = options['base_lr']
iter_num = 1
srcc_all = np.zeros((1, iter_num + 1), dtype=np.float)
plcc_all = np.zeros((1, iter_num + 1), dtype=np.float)
for i in range(0, iter_num):
# randomly split train-test set
random.shuffle(index)
train_index = index[0:round(0.8 * len(index))]
test_index = index[round(0.8 * len(index)):len(index)]
d_type_num = 23
train_index_new = []
for train_index_idx in train_index:
train_index_new = train_index_new + np.arange(train_index_idx * d_type_num, (train_index_idx+1) * d_type_num).tolist()
test_index_new = []
for test_index_idx in test_index:
test_index_new = test_index_new + np.arange(test_index_idx * d_type_num, (test_index_idx + 1) * d_type_num).tolist()
train_index = train_index_new
test_index = test_index_new
options['train_index'] = train_index
options['test_index'] = test_index
pwd = os.getcwd()
# train FC layers only
options['fc'] = True
options['base_lr'] = 1e-3
options['epochs'] = 20
manager = DBCNNManager(options, path)
best_srcc, best_plcc = manager.train()
# fine-tune all model
options['fc'] = False
options['base_lr'] = lr_backup
options['epochs'] = 20
manager = DBCNNManager(options, path)
best_srcc, best_plcc = manager.train()
result_path = os.path.join(pwd, 'result', ('db_result_' + str(i + 1) + '.mat'))
scipy.io.savemat(result_path, mdict={'best_srcc': best_srcc, 'best_plcc': best_plcc})
srcc_all[0][i] = best_srcc
plcc_all[0][i] = best_plcc
srcc_mean = np.mean(srcc_all[0][0:iter_num])
plcc_mean = np.mean(plcc_all[0][0:iter_num])
print('\n Average srcc:%4.4f, Average plcc:%4.4f' % (srcc_mean, plcc_mean))
srcc_all[0][iter_num] = srcc_mean
plcc_all[0][iter_num] = plcc_mean
print(srcc_all)
print(plcc_all)
final_result_path = os.path.join(pwd, 'result', ('final_result' + '.mat'))
scipy.io.savemat(final_result_path, mdict={'srcc_all': srcc_all, 'plcc_all': plcc_all})
return best_srcc
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.optim.lr_scheduler.StepLR",
"random.shuffle",
"accimage.Image",
"numpy.mean",
"torch.nn.init.constant_",
"numpy.arange",
"torchvision.get_image_backend",
"Kadid10kFolder_DistortionNet_Finetune.Kadid10kFolder_DistortionNet_Finetune",
"torchvi... | [((16769, 16830), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train DB-CNN for BIQA."""'}), "(description='Train DB-CNN for BIQA.')\n", (16792, 16830), False, 'import argparse\n'), ((18743, 18771), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (18760, 18771), False, 'import torch\n'), ((18865, 18890), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (18879, 18890), True, 'import numpy as np\n'), ((18895, 18917), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (18906, 18917), False, 'import random\n'), ((18986, 19029), 'numpy.zeros', 'np.zeros', (['(1, iter_num + 1)'], {'dtype': 'np.float'}), '((1, iter_num + 1), dtype=np.float)\n', (18994, 19029), True, 'import numpy as np\n'), ((19045, 19088), 'numpy.zeros', 'np.zeros', (['(1, iter_num + 1)'], {'dtype': 'np.float'}), '((1, iter_num + 1), dtype=np.float)\n', (19053, 19088), True, 'import numpy as np\n'), ((20640, 20672), 'numpy.mean', 'np.mean', (['srcc_all[0][0:iter_num]'], {}), '(srcc_all[0][0:iter_num])\n', (20647, 20672), True, 'import numpy as np\n'), ((20689, 20721), 'numpy.mean', 'np.mean', (['plcc_all[0][0:iter_num]'], {}), '(plcc_all[0][0:iter_num])\n', (20696, 20721), True, 'import numpy as np\n'), ((20943, 20995), 'os.path.join', 'os.path.join', (['pwd', '"""result"""', "('final_result' + '.mat')"], {}), "(pwd, 'result', 'final_result' + '.mat')\n", (20955, 20995), False, 'import os\n'), ((411, 424), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (421, 424), False, 'from PIL import Image\n'), ((531, 551), 'accimage.Image', 'accimage.Image', (['path'], {}), '(path)\n', (545, 551), False, 'import accimage\n'), ((750, 769), 'torchvision.get_image_backend', 'get_image_backend', ([], {}), '()\n', (767, 769), False, 'from torchvision import get_image_backend\n'), ((978, 1002), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (996, 1002), True, 'import torch.nn as nn\n'), ((1028, 1072), 'torchvision.models.resnet50', 'torchvision.models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1055, 1072), False, 'import torchvision\n'), ((1122, 1145), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (1142, 1145), True, 'import torch.nn as nn\n'), ((1196, 1221), 'torch.nn.Linear', 'torch.nn.Linear', (['(2048)', '(25)'], {}), '(2048, 25)\n', (1211, 1221), False, 'import torch\n'), ((1273, 1297), 'torch.nn.Linear', 'torch.nn.Linear', (['(2048)', '(1)'], {}), '(2048, 1)\n', (1288, 1297), False, 'import torch\n'), ((1348, 1372), 'torch.nn.Linear', 'torch.nn.Linear', (['(2048)', '(1)'], {}), '(2048, 1)\n', (1363, 1372), False, 'import torch\n'), ((5527, 5657), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': "self._options['batch_size']", 'shuffle': '(True)', 'num_workers': '(0)', 'pin_memory': '(True)'}), "(train_data, batch_size=self._options[\n 'batch_size'], shuffle=True, num_workers=0, pin_memory=True)\n", (5554, 5657), False, 'import torch\n'), ((5706, 5809), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)', 'pin_memory': '(True)'}), '(test_data, batch_size=1, shuffle=False,\n num_workers=0, pin_memory=True)\n', (5733, 5809), False, 'import torch\n'), ((5856, 5929), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['self._solver'], {'last_epoch': '(-1)', 'step_size': '(10)', 'gamma': '(0.1)'}), '(self._solver, last_epoch=-1, step_size=10, gamma=0.1)\n', (5875, 5929), False, 'from torch.optim import lr_scheduler\n'), ((14202, 14235), 'scipy.stats.spearmanr', 'stats.spearmanr', (['pscores', 'tscores'], {}), '(pscores, tscores)\n', (14217, 14235), False, 'from scipy import stats\n'), ((14320, 14352), 'scipy.stats.pearsonr', 'stats.pearsonr', (['pscores', 'tscores'], {}), '(pscores, tscores)\n', (14334, 14352), False, 'from scipy import stats\n'), ((18264, 18313), 'os.path.join', 'os.path.join', (['"""dataset"""', '"""/mnt/sda2/New/kadid10k"""'], {}), "('dataset', '/mnt/sda2/New/kadid10k')\n", (18276, 18313), False, 'import os\n'), ((18349, 18412), 'os.path.join', 'os.path.join', (['"""db_models/kadis700k"""', '"""net_params_best_srcc.pkl"""'], {}), "('db_models/kadis700k', 'net_params_best_srcc.pkl')\n", (18361, 18412), False, 'import os\n'), ((18434, 18459), 'os.path.join', 'os.path.join', (['"""fc_models"""'], {}), "('fc_models')\n", (18446, 18459), False, 'import os\n'), ((18480, 18530), 'os.path.join', 'os.path.join', (['"""fc_models"""', '"""net_params_latest.pkl"""'], {}), "('fc_models', 'net_params_latest.pkl')\n", (18492, 18530), False, 'import os\n'), ((18552, 18577), 'os.path.join', 'os.path.join', (['"""db_models"""'], {}), "('db_models')\n", (18564, 18577), False, 'import os\n'), ((18598, 18648), 'os.path.join', 'os.path.join', (['"""db_models"""', '"""net_params_latest.pkl"""'], {}), "('db_models', 'net_params_latest.pkl')\n", (18610, 18648), False, 'import os\n'), ((19171, 19192), 'random.shuffle', 'random.shuffle', (['index'], {}), '(index)\n', (19185, 19192), False, 'import random\n'), ((19916, 19927), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19925, 19927), False, 'import os\n'), ((1813, 1859), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.fc_Q.weight.data'], {}), '(self.fc_Q.weight.data)\n', (1836, 1859), True, 'import torch.nn as nn\n'), ((4942, 5143), 'Kadid10kFolder_DistortionNet_Finetune.Kadid10kFolder_DistortionNet_Finetune', 'Kadid10kFolder_DistortionNet_Finetune.Kadid10kFolder_DistortionNet_Finetune', ([], {'root': "self._path['kadid10k']", 'loader': 'default_loader', 'index': "self._options['train_index']", 'transform': 'train_transforms'}), "(\n root=self._path['kadid10k'], loader=default_loader, index=self._options\n ['train_index'], transform=train_transforms)\n", (5017, 5143), False, 'import Kadid10kFolder_DistortionNet_Finetune\n'), ((5191, 5390), 'Kadid10kFolder_DistortionNet_Finetune.Kadid10kFolder_DistortionNet_Finetune', 'Kadid10kFolder_DistortionNet_Finetune.Kadid10kFolder_DistortionNet_Finetune', ([], {'root': "self._path['kadid10k']", 'loader': 'default_loader', 'index': "self._options['test_index']", 'transform': 'test_transforms'}), "(\n root=self._path['kadid10k'], loader=default_loader, index=self._options\n ['test_index'], transform=test_transforms)\n", (5266, 5390), False, 'import Kadid10kFolder_DistortionNet_Finetune\n'), ((6573, 6584), 'time.time', 'time.time', ([], {}), '()\n', (6582, 6584), False, 'import time\n'), ((10528, 10561), 'scipy.stats.spearmanr', 'stats.spearmanr', (['pscores', 'tscores'], {}), '(pscores, tscores)\n', (10543, 10561), False, 'from scipy import stats\n'), ((10657, 10668), 'time.time', 'time.time', ([], {}), '()\n', (10666, 10668), False, 'import time\n'), ((15957, 15985), 'torch.max', 'torch.max', (['predict_D.data', '(1)'], {}), '(predict_D.data, 1)\n', (15966, 15985), False, 'import torch\n'), ((16050, 16083), 'torch.sum', 'torch.sum', (['(prediction == y_D.data)'], {}), '(prediction == y_D.data)\n', (16059, 16083), False, 'import torch\n'), ((1919, 1964), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc_Q.bias.data'], {'val': '(0)'}), '(self.fc_Q.bias.data, val=0)\n', (1936, 1964), True, 'import torch.nn as nn\n'), ((3338, 3380), 'torch.load', 'torch.load', (["path['pretrainkadis700k_root']"], {}), "(path['pretrainkadis700k_root'])\n", (3348, 3380), False, 'import torch\n'), ((3461, 3488), 'torch.load', 'torch.load', (["path['fc_root']"], {}), "(path['fc_root'])\n", (3471, 3488), False, 'import torch\n'), ((3565, 3592), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3590, 3592), False, 'import torch\n'), ((3669, 3687), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (3685, 3687), False, 'import torch\n'), ((4617, 4650), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (4648, 4650), False, 'import torchvision\n'), ((4664, 4756), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, \n 0.224, 0.225))\n', (4696, 4756), False, 'import torchvision\n'), ((6111, 6136), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6134, 6136), False, 'import torch\n'), ((10221, 10249), 'torch.sum', 'torch.sum', (['(prediction == y_D)'], {}), '(prediction == y_D)\n', (10230, 10249), False, 'import torch\n'), ((10934, 10945), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10943, 10945), False, 'import os\n'), ((11413, 11424), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11422, 11424), False, 'import os\n'), ((11833, 11844), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11842, 11844), False, 'import os\n'), ((16272, 16286), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (16284, 16286), False, 'import torch\n'), ((4279, 4324), 'torchvision.transforms.RandomHorizontalFlip', 'torchvision.transforms.RandomHorizontalFlip', ([], {}), '()\n', (4322, 4324), False, 'import torchvision\n'), ((4342, 4375), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (4373, 4375), False, 'import torchvision\n'), ((4393, 4485), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, \n 0.224, 0.225))\n', (4425, 4485), False, 'import torchvision\n'), ((10112, 10144), 'torch.nn.functional.softmax', 'F.softmax', (['predict_D.data'], {'dim': '(1)'}), '(predict_D.data, dim=1)\n', (10121, 10144), True, 'import torch.nn.functional as F\n'), ((11026, 11094), 'os.path.join', 'os.path.join', (['pwd', '"""fc_models"""', "('net_params' + '_best_srcc' + '.pkl')"], {}), "(pwd, 'fc_models', 'net_params' + '_best_srcc' + '.pkl')\n", (11038, 11094), False, 'import os\n'), ((11151, 11219), 'os.path.join', 'os.path.join', (['pwd', '"""db_models"""', "('net_params' + '_best_srcc' + '.pkl')"], {}), "(pwd, 'db_models', 'net_params' + '_best_srcc' + '.pkl')\n", (11163, 11219), False, 'import os\n'), ((11505, 11572), 'os.path.join', 'os.path.join', (['pwd', '"""fc_models"""', "('net_params' + '_best_acc' + '.pkl')"], {}), "(pwd, 'fc_models', 'net_params' + '_best_acc' + '.pkl')\n", (11517, 11572), False, 'import os\n'), ((11629, 11696), 'os.path.join', 'os.path.join', (['pwd', '"""db_models"""', "('net_params' + '_best_acc' + '.pkl')"], {}), "(pwd, 'db_models', 'net_params' + '_best_acc' + '.pkl')\n", (11641, 11696), False, 'import os\n'), ((11925, 11990), 'os.path.join', 'os.path.join', (['pwd', '"""fc_models"""', "('net_params' + '_latest' + '.pkl')"], {}), "(pwd, 'fc_models', 'net_params' + '_latest' + '.pkl')\n", (11937, 11990), False, 'import os\n'), ((12047, 12112), 'os.path.join', 'os.path.join', (['pwd', '"""db_models"""', "('net_params' + '_latest' + '.pkl')"], {}), "(pwd, 'db_models', 'net_params' + '_latest' + '.pkl')\n", (12059, 12112), False, 'import os\n'), ((13535, 13549), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (13547, 13549), False, 'import torch\n'), ((13584, 13598), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (13596, 13598), False, 'import torch\n'), ((15392, 15406), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (15404, 15406), False, 'import torch\n'), ((8418, 8432), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (8430, 8432), False, 'import torch\n'), ((8532, 8546), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (8544, 8546), False, 'import torch\n'), ((8585, 8599), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (8597, 8599), False, 'import torch\n'), ((14254, 14275), 'torch.Tensor', 'torch.Tensor', (['tscores'], {}), '(tscores)\n', (14266, 14275), False, 'import torch\n'), ((19457, 19532), 'numpy.arange', 'np.arange', (['(train_index_idx * d_type_num)', '((train_index_idx + 1) * d_type_num)'], {}), '(train_index_idx * d_type_num, (train_index_idx + 1) * d_type_num)\n', (19466, 19532), True, 'import numpy as np\n'), ((19656, 19729), 'numpy.arange', 'np.arange', (['(test_index_idx * d_type_num)', '((test_index_idx + 1) * d_type_num)'], {}), '(test_index_idx * d_type_num, (test_index_idx + 1) * d_type_num)\n', (19665, 19729), True, 'import numpy as np\n'), ((15441, 15455), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (15453, 15455), False, 'import torch\n'), ((8471, 8485), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (8483, 8485), False, 'import torch\n')] |
# %% [markdown]
# # Noise-free optimization with Expected Improvement
# %%
import numpy as np
import tensorflow as tf
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
# In this example, we look to find the minimum value of the two-dimensional Branin function over the hypercube $[0, 1]^2$. We can represent the search space using a `Box`, and plot contours of the Branin over this space.
# %%
import trieste
from trieste.utils.objectives import branin
from util.plotting_plotly import plot_function_plotly
search_space = trieste.space.Box([0, 0], [1, 1])
fig = plot_function_plotly(
branin, search_space.lower, search_space.upper, grid_density=20
)
fig.update_layout(height=400, width=400)
fig.show()
# %% [markdown]
# ## Sample the observer over the search space
#
# Sometimes we don't have direct access to the objective function. We only have an observer that indirectly observes it. In _Trieste_, the observer outputs a number of datasets, each of which must be labelled so the optimization process knows which is which. In our case, we only have one dataset, the objective. We'll use _Trieste_'s default label for single-model setups, `OBJECTIVE`. We can convert a function with `branin`'s signature to a single-output observer using `mk_observer`.
#
# The optimization procedure will benefit from having some starting data from the objective function to base its search on. We sample five points from the search space and evaluate them on the observer.
# %%
from trieste.acquisition.rule import OBJECTIVE
observer = trieste.utils.objectives.mk_observer(branin, OBJECTIVE)
num_initial_points = 5
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Model the objective function
#
# The Bayesian optimization procedure estimates the next best points to query by using a probabilistic model of the objective. We'll use Gaussian process regression for this, provided by GPflow. The model will need to be trained on each step as more points are evaluated, so we'll package it with GPflow's Scipy optimizer.
#
# Just like the data output by the observer, the optimization process assumes multiple models, so we'll need to label the model in the same way.
# %%
import gpflow
def build_model(data):
variance = tf.math.reduce_variance(data.observations)
kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2, 0.2])
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return {OBJECTIVE: {
"model": gpr,
"optimizer": gpflow.optimizers.Scipy(),
"optimizer_args": {
"minimize_args": {"options": dict(maxiter=100)},
},
}}
model = build_model(initial_data[OBJECTIVE])
# %% [markdown]
# ## Run the optimization loop
#
# We can now run the Bayesian optimization loop by defining a `BayesianOptimizer` and calling its `optimize` method.
#
# The optimizer uses an acquisition rule to choose where in the search space to try on each optimization step. We'll use the default acquisition rule, which is Efficient Global Optimization with Expected Improvement.
#
# We'll run the optimizer for fifteen steps.
#
# The optimization loop catches errors so as not to lose progress, which means the optimization loop might not complete and the data from the last step may not exist. Here we'll handle this crudely by asking for the data regardless, using `.try_get_final_datasets()`, which will re-raise the error if one did occur. For a review of how to handle errors systematically, there is a [dedicated tutorial](recovering_from_errors.ipynb). Finally, like the observer, the optimizer outputs labelled datasets, so we'll get the (only) dataset here by indexing with tag `OBJECTIVE`.
# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(15, initial_data, model)
dataset = result.try_get_final_datasets()[OBJECTIVE]
# %% [markdown]
# ## Explore the results
#
# We can now get the best point found by the optimizer. Note this isn't necessarily the point that was last evaluated.
# %%
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
print(f"query point: {query_points[arg_min_idx, :]}")
print(f"observation: {observations[arg_min_idx, :]}")
# %% [markdown]
# We can visualise how the optimizer performed by plotting all the acquired observations, along with the true function values and optima, either in a two-dimensional contour plot ...
# %%
from util.plotting import plot_function_2d, plot_bo_points
_, ax = plot_function_2d(
branin, search_space.lower, search_space.upper, grid_density=30, contour=True
)
plot_bo_points(query_points, ax[0, 0], num_initial_points, arg_min_idx)
# %% [markdown]
# ... or as a three-dimensional plot
# %%
from util.plotting_plotly import add_bo_points_plotly
fig = plot_function_plotly(
branin, search_space.lower, search_space.upper, grid_density=20
)
fig.update_layout(height=500, width=500)
fig = add_bo_points_plotly(
x=query_points[:, 0],
y=query_points[:, 1],
z=observations[:, 0],
num_init=num_initial_points,
idx_best=arg_min_idx,
fig=fig,
)
fig.show()
# %% [markdown]
# We can also visualise the how each successive point compares the current best.
#
# We produce two plots. The left hand plot shows the observations (crosses and dots), the current best (orange line), and the start of the optimization loop (blue line). The right hand plot is the same as the previous two-dimensional contour plot, but without the resulting observations. The best point is shown in each (purple dot).
# %%
import matplotlib.pyplot as plt
from util.plotting import plot_regret
_, ax = plt.subplots(1, 2)
plot_regret(observations, ax[0], num_init=num_initial_points, idx_best=arg_min_idx)
plot_bo_points(
query_points, ax[1], num_init=num_initial_points, idx_best=arg_min_idx
)
# %% [markdown]
# We can visualise the model over the objective function by plotting the mean and 95% confidence intervals of its predictive distribution. Like with the data before, we can get the model with `.try_get_final_models()` and indexing with `OBJECTIVE`.
# %%
from util.plotting_plotly import plot_gp_plotly
fig = plot_gp_plotly(
result.try_get_final_models()[OBJECTIVE].model,
search_space.lower,
search_space.upper,
grid_density=30
)
fig = add_bo_points_plotly(
x=query_points[:, 0],
y=query_points[:, 1],
z=observations[:, 0],
num_init=num_initial_points,
idx_best=arg_min_idx,
fig=fig,
figrow=1,
figcol=1,
)
fig.show()
# %% [markdown]
# We can also inspect the model hyperparameters, and use the history to see how the length scales evolved over iterations. Note the history is saved at the *start* of each step, and as such never includes the final result, so we'll add that ourselves.
# %%
gpflow.utilities.print_summary(result.try_get_final_models()[OBJECTIVE].model)
ls_list = [
step.models[OBJECTIVE].model.kernel.lengthscales.numpy() # type: ignore
for step in result.history + [result.final_result.unwrap()]
]
ls = np.array(ls_list)
plt.plot(ls[:, 0])
plt.plot(ls[:, 1])
# %% [markdown]
# ## Run the optimizer for more steps
#
# If we need more iterations for better convergence, we can run the optimizer again using the data produced from the last run, as well as the model. We'll visualise the final data.
# %%
result = bo.optimize(
5, result.try_get_final_datasets(), result.try_get_final_models()
)
dataset = result.try_get_final_datasets()[OBJECTIVE]
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
_, ax = plot_function_2d(
branin, search_space.lower, search_space.upper, grid_density=40, contour=True
)
plot_bo_points(
dataset.query_points.numpy(),
ax=ax[0, 0],
num_init=len(dataset.query_points),
idx_best=arg_min_idx,
)
# %% [markdown]
# ## Batch-sequential strategy
#
# Sometimes it is practically convenient to query several points at a time. We can do this in `trieste` using a `BatchAcquisitionRule` and a `BatchAcquisitionFunctionBuilder`, that together recommend a number of query points `num_query_points` (instead of one as previously). The optimizer then queries the observer at all these points simultaneously.
# Here we use the `BatchMonteCarloExpectedImprovement` function. Note that this acquisition function is computed using a Monte-Carlo method (so it requires a `sample_size`), but with a reparametrisation trick, which makes it deterministic.
# %%
qei = trieste.acquisition.BatchMonteCarloExpectedImprovement(sample_size=1000)
batch_rule = trieste.acquisition.rule.BatchAcquisitionRule(
num_query_points=3, builder=qei.using(OBJECTIVE)
)
model = build_model(initial_data[OBJECTIVE])
batch_result = bo.optimize(5, initial_data, model, acquisition_rule=batch_rule)
# %% [markdown]
# We can again visualise the GP model and query points.
# %%
batch_dataset = batch_result.try_get_final_datasets()[OBJECTIVE]
batch_query_points = batch_dataset.query_points.numpy()
batch_observations = batch_dataset.observations.numpy()
fig = plot_gp_plotly(
batch_result.try_get_final_models()[OBJECTIVE].model,
search_space.lower,
search_space.upper,
grid_density=30
)
batch_arg_min_idx = tf.squeeze(tf.argmin(batch_dataset.observations, axis=0))
fig = add_bo_points_plotly(
x=batch_query_points[:, 0],
y=batch_query_points[:, 1],
z=batch_observations[:, 0],
num_init=num_initial_points,
idx_best=batch_arg_min_idx,
fig=fig,
figrow=1,
figcol=1,
)
fig.show()
# %% [markdown]
# We can also compare the regret between the purely sequential approach and the batch one.
# %%
_, ax = plt.subplots(1, 2)
plot_regret(observations, ax[0], num_init=num_initial_points, idx_best=arg_min_idx)
plot_regret(
batch_observations, ax[1], num_init=num_initial_points, idx_best=batch_arg_min_idx
)
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| [
"tensorflow.random.set_seed",
"util.plotting.plot_function_2d",
"trieste.acquisition.BatchMonteCarloExpectedImprovement",
"numpy.random.seed",
"trieste.utils.objectives.mk_observer",
"gpflow.optimizers.Scipy",
"util.plotting.plot_regret",
"tensorflow.argmin",
"util.plotting_plotly.plot_function_plot... | [((120, 140), 'numpy.random.seed', 'np.random.seed', (['(1793)'], {}), '(1793)\n', (134, 140), True, 'import numpy as np\n'), ((141, 165), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(1793)'], {}), '(1793)\n', (159, 165), True, 'import tensorflow as tf\n'), ((565, 598), 'trieste.space.Box', 'trieste.space.Box', (['[0, 0]', '[1, 1]'], {}), '([0, 0], [1, 1])\n', (582, 598), False, 'import trieste\n'), ((606, 695), 'util.plotting_plotly.plot_function_plotly', 'plot_function_plotly', (['branin', 'search_space.lower', 'search_space.upper'], {'grid_density': '(20)'}), '(branin, search_space.lower, search_space.upper,\n grid_density=20)\n', (626, 695), False, 'from util.plotting_plotly import plot_function_plotly\n'), ((1574, 1629), 'trieste.utils.objectives.mk_observer', 'trieste.utils.objectives.mk_observer', (['branin', 'OBJECTIVE'], {}), '(branin, OBJECTIVE)\n', (1610, 1629), False, 'import trieste\n'), ((3857, 3925), 'trieste.bayesian_optimizer.BayesianOptimizer', 'trieste.bayesian_optimizer.BayesianOptimizer', (['observer', 'search_space'], {}), '(observer, search_space)\n', (3901, 3925), False, 'import trieste\n'), ((4725, 4824), 'util.plotting.plot_function_2d', 'plot_function_2d', (['branin', 'search_space.lower', 'search_space.upper'], {'grid_density': '(30)', 'contour': '(True)'}), '(branin, search_space.lower, search_space.upper,\n grid_density=30, contour=True)\n', (4741, 4824), False, 'from util.plotting import plot_function_2d, plot_bo_points\n'), ((4827, 4898), 'util.plotting.plot_bo_points', 'plot_bo_points', (['query_points', 'ax[0, 0]', 'num_initial_points', 'arg_min_idx'], {}), '(query_points, ax[0, 0], num_initial_points, arg_min_idx)\n', (4841, 4898), False, 'from util.plotting import plot_function_2d, plot_bo_points\n'), ((5020, 5109), 'util.plotting_plotly.plot_function_plotly', 'plot_function_plotly', (['branin', 'search_space.lower', 'search_space.upper'], {'grid_density': '(20)'}), '(branin, search_space.lower, search_space.upper,\n grid_density=20)\n', (5040, 5109), False, 'from util.plotting_plotly import plot_function_plotly\n'), ((5160, 5315), 'util.plotting_plotly.add_bo_points_plotly', 'add_bo_points_plotly', ([], {'x': 'query_points[:, 0]', 'y': 'query_points[:, 1]', 'z': 'observations[:, 0]', 'num_init': 'num_initial_points', 'idx_best': 'arg_min_idx', 'fig': 'fig'}), '(x=query_points[:, 0], y=query_points[:, 1], z=\n observations[:, 0], num_init=num_initial_points, idx_best=arg_min_idx,\n fig=fig)\n', (5180, 5315), False, 'from util.plotting_plotly import add_bo_points_plotly\n'), ((5864, 5882), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (5876, 5882), True, 'import matplotlib.pyplot as plt\n'), ((5883, 5971), 'util.plotting.plot_regret', 'plot_regret', (['observations', 'ax[0]'], {'num_init': 'num_initial_points', 'idx_best': 'arg_min_idx'}), '(observations, ax[0], num_init=num_initial_points, idx_best=\n arg_min_idx)\n', (5894, 5971), False, 'from util.plotting import plot_regret\n'), ((5967, 6058), 'util.plotting.plot_bo_points', 'plot_bo_points', (['query_points', 'ax[1]'], {'num_init': 'num_initial_points', 'idx_best': 'arg_min_idx'}), '(query_points, ax[1], num_init=num_initial_points, idx_best=\n arg_min_idx)\n', (5981, 6058), False, 'from util.plotting import plot_function_2d, plot_bo_points\n'), ((6532, 6707), 'util.plotting_plotly.add_bo_points_plotly', 'add_bo_points_plotly', ([], {'x': 'query_points[:, 0]', 'y': 'query_points[:, 1]', 'z': 'observations[:, 0]', 'num_init': 'num_initial_points', 'idx_best': 'arg_min_idx', 'fig': 'fig', 'figrow': '(1)', 'figcol': '(1)'}), '(x=query_points[:, 0], y=query_points[:, 1], z=\n observations[:, 0], num_init=num_initial_points, idx_best=arg_min_idx,\n fig=fig, figrow=1, figcol=1)\n', (6552, 6707), False, 'from util.plotting_plotly import add_bo_points_plotly\n'), ((7262, 7279), 'numpy.array', 'np.array', (['ls_list'], {}), '(ls_list)\n', (7270, 7279), True, 'import numpy as np\n'), ((7280, 7298), 'matplotlib.pyplot.plot', 'plt.plot', (['ls[:, 0]'], {}), '(ls[:, 0])\n', (7288, 7298), True, 'import matplotlib.pyplot as plt\n'), ((7299, 7317), 'matplotlib.pyplot.plot', 'plt.plot', (['ls[:, 1]'], {}), '(ls[:, 1])\n', (7307, 7317), True, 'import matplotlib.pyplot as plt\n'), ((7784, 7883), 'util.plotting.plot_function_2d', 'plot_function_2d', (['branin', 'search_space.lower', 'search_space.upper'], {'grid_density': '(40)', 'contour': '(True)'}), '(branin, search_space.lower, search_space.upper,\n grid_density=40, contour=True)\n', (7800, 7883), False, 'from util.plotting import plot_function_2d, plot_bo_points\n'), ((8676, 8748), 'trieste.acquisition.BatchMonteCarloExpectedImprovement', 'trieste.acquisition.BatchMonteCarloExpectedImprovement', ([], {'sample_size': '(1000)'}), '(sample_size=1000)\n', (8730, 8748), False, 'import trieste\n'), ((9482, 9681), 'util.plotting_plotly.add_bo_points_plotly', 'add_bo_points_plotly', ([], {'x': 'batch_query_points[:, 0]', 'y': 'batch_query_points[:, 1]', 'z': 'batch_observations[:, 0]', 'num_init': 'num_initial_points', 'idx_best': 'batch_arg_min_idx', 'fig': 'fig', 'figrow': '(1)', 'figcol': '(1)'}), '(x=batch_query_points[:, 0], y=batch_query_points[:, 1],\n z=batch_observations[:, 0], num_init=num_initial_points, idx_best=\n batch_arg_min_idx, fig=fig, figrow=1, figcol=1)\n', (9502, 9681), False, 'from util.plotting_plotly import add_bo_points_plotly\n'), ((9843, 9861), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (9855, 9861), True, 'import matplotlib.pyplot as plt\n'), ((9862, 9950), 'util.plotting.plot_regret', 'plot_regret', (['observations', 'ax[0]'], {'num_init': 'num_initial_points', 'idx_best': 'arg_min_idx'}), '(observations, ax[0], num_init=num_initial_points, idx_best=\n arg_min_idx)\n', (9873, 9950), False, 'from util.plotting import plot_regret\n'), ((9946, 10045), 'util.plotting.plot_regret', 'plot_regret', (['batch_observations', 'ax[1]'], {'num_init': 'num_initial_points', 'idx_best': 'batch_arg_min_idx'}), '(batch_observations, ax[1], num_init=num_initial_points,\n idx_best=batch_arg_min_idx)\n', (9957, 10045), False, 'from util.plotting import plot_regret\n'), ((2345, 2387), 'tensorflow.math.reduce_variance', 'tf.math.reduce_variance', (['data.observations'], {}), '(data.observations)\n', (2368, 2387), True, 'import tensorflow as tf\n'), ((2401, 2468), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {'variance': 'variance', 'lengthscales': '[0.2, 0.2]'}), '(variance=variance, lengthscales=[0.2, 0.2])\n', (2424, 2468), False, 'import gpflow\n'), ((2546, 2589), 'gpflow.set_trainable', 'gpflow.set_trainable', (['gpr.likelihood', '(False)'], {}), '(gpr.likelihood, False)\n', (2566, 2589), False, 'import gpflow\n'), ((4309, 4340), 'tensorflow.argmin', 'tf.argmin', (['observations'], {'axis': '(0)'}), '(observations, axis=0)\n', (4318, 4340), True, 'import tensorflow as tf\n'), ((7735, 7774), 'tensorflow.argmin', 'tf.argmin', (['dataset.observations'], {'axis': '(0)'}), '(dataset.observations, axis=0)\n', (7744, 7774), True, 'import tensorflow as tf\n'), ((9428, 9473), 'tensorflow.argmin', 'tf.argmin', (['batch_dataset.observations'], {'axis': '(0)'}), '(batch_dataset.observations, axis=0)\n', (9437, 9473), True, 'import tensorflow as tf\n'), ((2659, 2684), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (2682, 2684), False, 'import gpflow\n')] |
# <NAME> - 130401064
# -*- coding: utf-8 -*-
import numpy as np
def RMSE(pred, target):
err = np.subtract(target, pred)
return (np.mean(err**2))**0.5
# veri dosyasi acilir
f = open("veriler.txt")
# veriler okunur, varsa bos satirlar silinir
data = f.readlines()
if "\n" in data: data.remove("\n")
# veriler numpy array seklinde y'ye kaydedilir, x 0'dan baslatilir
y = np.array(data, dtype=int)
x = np.array([i for i in range(len(y))], dtype=int)
# sonuc dosyasi acilir
f_sonuc = open("sonuclar.txt","w+")
f_sonuc.write("Tum veri uzerine tek bir polinom tanimlandiginda:\n\n")
## Tum veri uzerine tek bir polinom fit edilginde:
RMSE_list = [0]*6
for i in range(6):
# ip : interpolasyon fonksiyonu
poly = np.poly1d(np.polyfit(x, y, i+1))
f_sonuc.write(f"Polinom derecesi: {i+1} \n")
f_sonuc.write(f"Katsayilar: {poly.coeffs} \n")
# RMSE hesaplanir
RMSE_list[i] = RMSE(poly(x), y)
f_sonuc.write(f"RMSE: {RMSE_list[i]:.3f} \n\n")
# en iyi sonucu veren polinomun derecesi bulunur, RMSE ile birlikte yazdirilir
eniyi_derece = np.argmin(RMSE_list)+1
f_sonuc.write(f"En dusuk hatayi {eniyi_derece}. dereceden polinom vermektedir.\n")
f_sonuc.write(f"RMSE: {RMSE_list[eniyi_derece-1]:.3f} \n\n\n")
## veri onluk kisimlara bolunerek her birine polinom fit edildiginde:
f_sonuc.write("Her bir onluk icin farkli polinomlar bulundugunda:\n\n")
# kac farkli polinom gerektigi hesaplanir:
onluk_sayisi = int((len(x)/10)) + 1
for i in range(onluk_sayisi):
# polinom fit edilecek aralik icin indexler bulunur, x ve y datasi secilir:
i_min = i*10
i_max = min(i*10+9, len(x)-1)
x_curr = x[i_min:i_max+1:]
y_curr = y[i_min:i_max+1:]
# her bir dereceden polinomlarin ve RMSE'lerinin tutulacagi listler tanimlanir
poly_lst =[]
RMSE_list = []
# polinom fit edilecek aralik eger 7'den kucuk veri iceriyorsa,
# en fazla (bu araliktaki nokta sayisi) - 1 dereceli polinom denenir
for j in range(min(i_max-i_min, 6)):
# poly_lst listesine j dereceli polinom fit edilir, RMSE hesaplanir
poly_lst.append(np.poly1d(np.polyfit(x_curr, y_curr, j+1)))
RMSE_list.append(RMSE(poly_lst[j](x_curr), y_curr))
# en iyi sonucu veren polinom derecesi bulunur ve sonuc yazdirilir
eniyi_derece = np.argmin(RMSE_list) + 1
f_sonuc.write(f"x : [ {x[i_min]} {x[i_max]} ]\n")
f_sonuc.write(f"Polinom derecesi: {eniyi_derece}, ")
f_sonuc.write(f"RMSE: {RMSE_list[eniyi_derece-1]:.3f} \n\n")
f_sonuc.close()
f.close()
| [
"numpy.subtract",
"numpy.polyfit",
"numpy.argmin",
"numpy.mean",
"numpy.array"
] | [((402, 427), 'numpy.array', 'np.array', (['data'], {'dtype': 'int'}), '(data, dtype=int)\n', (410, 427), True, 'import numpy as np\n'), ((104, 129), 'numpy.subtract', 'np.subtract', (['target', 'pred'], {}), '(target, pred)\n', (115, 129), True, 'import numpy as np\n'), ((1120, 1140), 'numpy.argmin', 'np.argmin', (['RMSE_list'], {}), '(RMSE_list)\n', (1129, 1140), True, 'import numpy as np\n'), ((143, 160), 'numpy.mean', 'np.mean', (['(err ** 2)'], {}), '(err ** 2)\n', (150, 160), True, 'import numpy as np\n'), ((772, 795), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(i + 1)'], {}), '(x, y, i + 1)\n', (782, 795), True, 'import numpy as np\n'), ((2371, 2391), 'numpy.argmin', 'np.argmin', (['RMSE_list'], {}), '(RMSE_list)\n', (2380, 2391), True, 'import numpy as np\n'), ((2182, 2215), 'numpy.polyfit', 'np.polyfit', (['x_curr', 'y_curr', '(j + 1)'], {}), '(x_curr, y_curr, j + 1)\n', (2192, 2215), True, 'import numpy as np\n')] |
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import logging
import datetime
import numpy as np
import isceobj
from isceobj.Constants import SPEED_OF_LIGHT
from isceobj.Alos2Proc.Alos2ProcPublic import overlapFrequency
from contrib.alos2proc.alos2proc import rg_filter
from contrib.alos2proc.alos2proc import resamp
from contrib.alos2proc.alos2proc import mbf
logger = logging.getLogger('isce.alos2insar.runPrepareSlc')
def runPrepareSlc(self):
'''Extract images.
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
referenceTrack = self._insar.loadTrack(reference=True)
secondaryTrack = self._insar.loadTrack(reference=False)
####################################################
#1. crop slc
####################################################
#for ScanSAR-stripmap interferometry, we always crop slcs
#for other cases, up to users
if ((self._insar.modeCombination == 31) or (self._insar.modeCombination == 32)) or (self.cropSlc):
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
os.chdir(swathDir)
print('cropping frame {}, swath {}'.format(frameNumber, swathNumber))
referenceSwath = referenceTrack.frames[i].swaths[j]
secondarySwath = secondaryTrack.frames[i].swaths[j]
#crop reference
cropSlc(referenceTrack.orbit, referenceSwath, self._insar.referenceSlc, secondaryTrack.orbit, secondarySwath, edge=0, useVirtualFile=self.useVirtualFile)
#crop secondary, since secondary may go through resampling, we set edge=9
#cropSlc(secondaryTrack.orbit, secondarySwath, self._insar.secondarySlc, referenceTrack.orbit, referenceSwath, edge=9, useVirtualFile=self.useVirtualFile)
cropSlc(secondaryTrack.orbit, secondarySwath, self._insar.secondarySlc, referenceTrack.orbit, referenceSwath, edge=0, useVirtualFile=self.useVirtualFile)
os.chdir('../')
os.chdir('../')
####################################################
#2. range-filter slc
####################################################
#compute filtering parameters, radarwavelength and range bandwidth should be the same across all swaths and frames
centerfreq1 = SPEED_OF_LIGHT / referenceTrack.radarWavelength
bandwidth1 = referenceTrack.frames[0].swaths[0].rangeBandwidth
centerfreq2 = SPEED_OF_LIGHT / secondaryTrack.radarWavelength
bandwidth2 = secondaryTrack.frames[0].swaths[0].rangeBandwidth
overlapfreq = overlapFrequency(centerfreq1, bandwidth1, centerfreq2, bandwidth2)
if overlapfreq == None:
raise Exception('there is no overlap bandwidth in range')
overlapbandwidth = overlapfreq[1] - overlapfreq[0]
if overlapbandwidth < 3e6:
print('overlap bandwidth: {}, percentage: {}%'.format(overlapbandwidth, 100.0*overlapbandwidth/bandwidth1))
raise Exception('there is not enough overlap bandwidth in range')
centerfreq = (overlapfreq[1] + overlapfreq[0]) / 2.0
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
os.chdir(swathDir)
print('range filtering frame {}, swath {}'.format(frameNumber, swathNumber))
referenceSwath = referenceTrack.frames[i].swaths[j]
secondarySwath = secondaryTrack.frames[i].swaths[j]
# #compute filtering parameters
# centerfreq1 = SPEED_OF_LIGHT / referenceTrack.radarWavelength
# bandwidth1 = referenceSwath.rangeBandwidth
# centerfreq2 = SPEED_OF_LIGHT / secondaryTrack.radarWavelength
# bandwidth2 = secondarySwath.rangeBandwidth
# overlapfreq = overlapFrequency(centerfreq1, bandwidth1, centerfreq2, bandwidth2)
# if overlapfreq == None:
# raise Exception('there is no overlap bandwidth in range')
# overlapbandwidth = overlapfreq[1] - overlapfreq[0]
# if overlapbandwidth < 3e6:
# print('overlap bandwidth: {}, percentage: {}%'.format(overlapbandwidth, 100.0*overlapbandwidth/bandwidth1))
# raise Exception('there is not enough overlap bandwidth in range')
# centerfreq = (overlapfreq[1] + overlapfreq[0]) / 2.0
#filter reference
if abs(centerfreq1 - centerfreq) < 1.0 and (bandwidth1 - 1.0) < overlapbandwidth:
print('no need to range filter {}'.format(self._insar.referenceSlc))
else:
print('range filter {}'.format(self._insar.referenceSlc))
tmpSlc = 'tmp.slc'
rg_filter(self._insar.referenceSlc, 1, [tmpSlc], [overlapbandwidth / referenceSwath.rangeSamplingRate],
[(centerfreq - centerfreq1) / referenceSwath.rangeSamplingRate],
257, 2048, 0.1, 0, 0.0)
if os.path.isfile(self._insar.referenceSlc):
os.remove(self._insar.referenceSlc)
os.remove(self._insar.referenceSlc+'.vrt')
os.remove(self._insar.referenceSlc+'.xml')
img = isceobj.createSlcImage()
img.load(tmpSlc + '.xml')
#remove original
os.remove(tmpSlc + '.vrt')
os.remove(tmpSlc + '.xml')
os.rename(tmpSlc, self._insar.referenceSlc)
#creat new
img.setFilename(self._insar.referenceSlc)
img.extraFilename = self._insar.referenceSlc + '.vrt'
img.setAccessMode('READ')
img.renderHdr()
referenceTrack.radarWavelength = SPEED_OF_LIGHT/centerfreq
referenceSwath.rangeBandwidth = overlapbandwidth
#filter secondary
if abs(centerfreq2 - centerfreq) < 1.0 and (bandwidth2 - 1.0) < overlapbandwidth:
print('no need to range filter {}'.format(self._insar.secondarySlc))
else:
print('range filter {}'.format(self._insar.secondarySlc))
tmpSlc = 'tmp.slc'
rg_filter(self._insar.secondarySlc, 1, [tmpSlc], [overlapbandwidth / secondarySwath.rangeSamplingRate],
[(centerfreq - centerfreq2) / secondarySwath.rangeSamplingRate],
257, 2048, 0.1, 0, 0.0)
if os.path.isfile(self._insar.secondarySlc):
os.remove(self._insar.secondarySlc)
os.remove(self._insar.secondarySlc+'.vrt')
os.remove(self._insar.secondarySlc+'.xml')
img = isceobj.createSlcImage()
img.load(tmpSlc + '.xml')
#remove original
os.remove(tmpSlc + '.vrt')
os.remove(tmpSlc + '.xml')
os.rename(tmpSlc, self._insar.secondarySlc)
#creat new
img.setFilename(self._insar.secondarySlc)
img.extraFilename = self._insar.secondarySlc + '.vrt'
img.setAccessMode('READ')
img.renderHdr()
secondaryTrack.radarWavelength = SPEED_OF_LIGHT/centerfreq
secondarySwath.rangeBandwidth = overlapbandwidth
os.chdir('../')
os.chdir('../')
####################################################
#3. equalize sample size
####################################################
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
os.chdir(swathDir)
print('equalize sample size frame {}, swath {}'.format(frameNumber, swathNumber))
referenceSwath = referenceTrack.frames[i].swaths[j]
secondarySwath = secondaryTrack.frames[i].swaths[j]
if abs(referenceSwath.rangeSamplingRate - secondarySwath.rangeSamplingRate) < 1.0 and abs(referenceSwath.prf - secondarySwath.prf) < 1.0:
print('no need to resample {}.'.format(self._insar.secondarySlc))
else:
outWidth = round(secondarySwath.numberOfSamples / secondarySwath.rangeSamplingRate * referenceSwath.rangeSamplingRate)
outLength = round(secondarySwath.numberOfLines / secondarySwath.prf * referenceSwath.prf)
tmpSlc = 'tmp.slc'
resamp(self._insar.secondarySlc, tmpSlc, 'fake', 'fake', outWidth, outLength, secondarySwath.prf, secondarySwath.dopplerVsPixel,
rgcoef=[0.0, (1.0/referenceSwath.rangeSamplingRate) / (1.0/secondarySwath.rangeSamplingRate) - 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
azcoef=[0.0, 0.0, (1.0/referenceSwath.prf) / (1.0/secondarySwath.prf) - 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
azpos_off=0.0)
if os.path.isfile(self._insar.secondarySlc):
os.remove(self._insar.secondarySlc)
os.remove(self._insar.secondarySlc+'.vrt')
os.remove(self._insar.secondarySlc+'.xml')
img = isceobj.createSlcImage()
img.load(tmpSlc + '.xml')
#remove original
os.remove(tmpSlc + '.vrt')
os.remove(tmpSlc + '.xml')
os.rename(tmpSlc, self._insar.secondarySlc)
#creat new
img.setFilename(self._insar.secondarySlc)
img.extraFilename = self._insar.secondarySlc + '.vrt'
img.setAccessMode('READ')
img.renderHdr()
#update parameters
#update doppler and azfmrate first
index2 = np.arange(outWidth)
index = np.arange(outWidth) * (1.0/referenceSwath.rangeSamplingRate) / (1.0/secondarySwath.rangeSamplingRate)
dop = np.polyval(secondarySwath.dopplerVsPixel[::-1], index)
p = np.polyfit(index2, dop, 3)
secondarySwath.dopplerVsPixel = [p[3], p[2], p[1], p[0]]
azfmrate = np.polyval(secondarySwath.azimuthFmrateVsPixel[::-1], index)
p = np.polyfit(index2, azfmrate, 3)
secondarySwath.azimuthFmrateVsPixel = [p[3], p[2], p[1], p[0]]
secondarySwath.numberOfSamples = outWidth
secondarySwath.numberOfLines = outLength
secondarySwath.prf = referenceSwath.prf
secondarySwath.rangeSamplingRate = referenceSwath.rangeSamplingRate
secondarySwath.rangePixelSize = referenceSwath.rangePixelSize
secondarySwath.azimuthPixelSize = referenceSwath.azimuthPixelSize
secondarySwath.azimuthLineInterval = referenceSwath.azimuthLineInterval
secondarySwath.prfFraction = referenceSwath.prfFraction
os.chdir('../')
os.chdir('../')
####################################################
#4. mbf
####################################################
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
for j, swathNumber in enumerate(range(self._insar.startingSwath, self._insar.endingSwath + 1)):
swathDir = 's{}'.format(swathNumber)
os.chdir(swathDir)
print('azimuth filter frame {}, swath {}'.format(frameNumber, swathNumber))
referenceSwath = referenceTrack.frames[i].swaths[j]
secondarySwath = secondaryTrack.frames[i].swaths[j]
#using Piyush's code for computing range and azimuth offsets
midRange = referenceSwath.startingRange + referenceSwath.rangePixelSize * referenceSwath.numberOfSamples * 0.5
midSensingStart = referenceSwath.sensingStart + datetime.timedelta(seconds = referenceSwath.numberOfLines * 0.5 / referenceSwath.prf)
llh = referenceTrack.orbit.rdr2geo(midSensingStart, midRange)
slvaz, slvrng = secondaryTrack.orbit.geo2rdr(llh)
###Translate to offsets
#at this point, secondary range pixel size and prf should be the same as those of reference
rgoff = ((slvrng - secondarySwath.startingRange) / referenceSwath.rangePixelSize) - referenceSwath.numberOfSamples * 0.5
azoff = ((slvaz - secondarySwath.sensingStart).total_seconds() * referenceSwath.prf) - referenceSwath.numberOfLines * 0.5
#filter reference
if not ((self._insar.modeCombination == 21) and (self._insar.burstSynchronization <= self.burstSynchronizationThreshold)):
print('no need to azimuth filter {}.'.format(self._insar.referenceSlc))
else:
index = np.arange(referenceSwath.numberOfSamples) + rgoff
dop = np.polyval(secondarySwath.dopplerVsPixel[::-1], index)
p = np.polyfit(index-rgoff, dop, 3)
dopplerVsPixelSecondary = [p[3], p[2], p[1], p[0]]
tmpSlc = 'tmp.slc'
mbf(self._insar.referenceSlc, tmpSlc, referenceSwath.prf, 1.0,
referenceSwath.burstLength, referenceSwath.burstCycleLength-referenceSwath.burstLength,
self._insar.burstUnsynchronizedTime * referenceSwath.prf,
(referenceSwath.burstStartTime - referenceSwath.sensingStart).total_seconds() * referenceSwath.prf,
referenceSwath.azimuthFmrateVsPixel, referenceSwath.dopplerVsPixel, dopplerVsPixelSecondary)
if os.path.isfile(self._insar.referenceSlc):
os.remove(self._insar.referenceSlc)
os.remove(self._insar.referenceSlc+'.vrt')
os.remove(self._insar.referenceSlc+'.xml')
img = isceobj.createSlcImage()
img.load(tmpSlc + '.xml')
#remove original
os.remove(tmpSlc + '.vrt')
os.remove(tmpSlc + '.xml')
os.rename(tmpSlc, self._insar.referenceSlc)
#creat new
img.setFilename(self._insar.referenceSlc)
img.extraFilename = self._insar.referenceSlc + '.vrt'
img.setAccessMode('READ')
img.renderHdr()
#filter secondary
if not(
((self._insar.modeCombination == 21) and (self._insar.burstSynchronization <= self.burstSynchronizationThreshold)) or \
(self._insar.modeCombination == 31)
):
print('no need to azimuth filter {}.'.format(self._insar.secondarySlc))
else:
index = np.arange(secondarySwath.numberOfSamples) - rgoff
dop = np.polyval(referenceSwath.dopplerVsPixel[::-1], index)
p = np.polyfit(index+rgoff, dop, 3)
dopplerVsPixelReference = [p[3], p[2], p[1], p[0]]
tmpSlc = 'tmp.slc'
mbf(self._insar.secondarySlc, tmpSlc, secondarySwath.prf, 1.0,
secondarySwath.burstLength, secondarySwath.burstCycleLength-secondarySwath.burstLength,
-self._insar.burstUnsynchronizedTime * secondarySwath.prf,
(secondarySwath.burstStartTime - secondarySwath.sensingStart).total_seconds() * secondarySwath.prf,
secondarySwath.azimuthFmrateVsPixel, secondarySwath.dopplerVsPixel, dopplerVsPixelReference)
if os.path.isfile(self._insar.secondarySlc):
os.remove(self._insar.secondarySlc)
os.remove(self._insar.secondarySlc+'.vrt')
os.remove(self._insar.secondarySlc+'.xml')
img = isceobj.createSlcImage()
img.load(tmpSlc + '.xml')
#remove original
os.remove(tmpSlc + '.vrt')
os.remove(tmpSlc + '.xml')
os.rename(tmpSlc, self._insar.secondarySlc)
#creat new
img.setFilename(self._insar.secondarySlc)
img.extraFilename = self._insar.secondarySlc + '.vrt'
img.setAccessMode('READ')
img.renderHdr()
os.chdir('../')
os.chdir('../')
#in case parameters changed
self._insar.saveTrack(referenceTrack, reference=True)
self._insar.saveTrack(secondaryTrack, reference=False)
catalog.printToLog(logger, "runPrepareSlc")
self._insar.procDoc.addAllFromCatalog(catalog)
def cropSlc(orbit, swath, slc, orbit2, swath2, edge=0, useVirtualFile=True):
from isceobj.Alos2Proc.Alos2ProcPublic import find_vrt_keyword
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
'''
orbit: orbit of the image to be cropped
swath: swath of the image to be cropped
slc: image to be cropped
orbit2: orbit of the other image
swath2: swath of the other image
'''
#find topleft and lowerright corners
#all indices start with 0
corner = []
for x in [[0, 0], [swath2.numberOfLines -1, swath2.numberOfSamples-1]]:
line2 = x[0]
sample2 = x[1]
rg2 = swath2.startingRange + swath2.rangePixelSize * sample2
az2 = swath2.sensingStart + datetime.timedelta(seconds = line2 / swath2.prf)
llh2 = orbit2.rdr2geo(az2, rg2)
az, rg = orbit.geo2rdr(llh2)
line = (az - swath.sensingStart).total_seconds() * swath.prf
sample = (rg - swath.startingRange) / swath.rangePixelSize
corner.append([line, sample])
#image (to be cropped) bounds
firstLine = 0
lastLine = swath.numberOfLines-1
firstSample = 0
lastSample = swath.numberOfSamples-1
#the othe image bounds in image (to be cropped)
#add edge
#edge = 9
firstLine2 = int(corner[0][0] - edge)
lastLine2 = int(corner[1][0] + edge)
firstSample2 = int(corner[0][1] - edge)
lastSample2 = int(corner[1][1] + edge)
#image (to be cropped) output bounds
firstLine3 = max(firstLine, firstLine2)
lastLine3 = min(lastLine, lastLine2)
firstSample3 = max(firstSample, firstSample2)
lastSample3 = min(lastSample, lastSample2)
numberOfSamples3 = lastSample3-firstSample3+1
numberOfLines3 = lastLine3-firstLine3+1
#check if there is overlap
if lastLine3 - firstLine3 +1 < 1000:
raise Exception('azimuth overlap < 1000 lines, not enough area for InSAR\n')
if lastSample3 - firstSample3 +1 < 1000:
raise Exception('range overlap < 1000 samples, not enough area for InSAR\n')
#check if there is a need to crop image
if abs(firstLine3-firstLine) < 100 and abs(lastLine3-lastLine) < 100 and \
abs(firstSample3-firstSample) < 100 and abs(lastSample3-lastSample) < 100:
print('no need to crop {}. nothing is done by crop.'.format(slc))
return
#crop image
if useVirtualFile:
#vrt
SourceFilename = find_vrt_keyword(slc+'.vrt', 'SourceFilename')
ImageOffset = int(find_vrt_keyword(slc+'.vrt', 'ImageOffset'))
PixelOffset = int(find_vrt_keyword(slc+'.vrt', 'PixelOffset'))
LineOffset = int(find_vrt_keyword(slc+'.vrt', 'LineOffset'))
#overwrite vrt and xml
img = isceobj.createImage()
img.load(slc+'.xml')
img.width = numberOfSamples3
img.length = numberOfLines3
img.renderHdr()
#overrite vrt
with open(slc+'.vrt', 'w') as fid:
fid.write('''<VRTDataset rasterXSize="{0}" rasterYSize="{1}">
<VRTRasterBand band="1" dataType="CFloat32" subClass="VRTRawRasterBand">
<SourceFilename relativeToVRT="0">{2}</SourceFilename>
<ByteOrder>MSB</ByteOrder>
<ImageOffset>{3}</ImageOffset>
<PixelOffset>8</PixelOffset>
<LineOffset>{4}</LineOffset>
</VRTRasterBand>
</VRTDataset>'''.format(numberOfSamples3,
numberOfLines3,
SourceFilename,
ImageOffset + firstLine3*LineOffset + firstSample3*8,
LineOffset))
else:
#read and crop data
with open(slc, 'rb') as f:
f.seek(firstLine3 * swath.numberOfSamples * np.dtype(np.complex64).itemsize, 0)
data = np.fromfile(f, dtype=np.complex64, count=numberOfLines3 * swath.numberOfSamples)\
.reshape(numberOfLines3,swath.numberOfSamples)
data2 = data[:, firstSample3:lastSample3+1]
#overwrite original
data2.astype(np.complex64).tofile(slc)
#creat new vrt and xml
os.remove(slc + '.xml')
os.remove(slc + '.vrt')
create_xml(slc, numberOfSamples3, numberOfLines3, 'slc')
#update parameters
#update doppler and azfmrate first
dop = np.polyval(swath.dopplerVsPixel[::-1], np.arange(swath.numberOfSamples))
dop3 = dop[firstSample3:lastSample3+1]
p = np.polyfit(np.arange(numberOfSamples3), dop3, 3)
swath.dopplerVsPixel = [p[3], p[2], p[1], p[0]]
azfmrate = np.polyval(swath.azimuthFmrateVsPixel[::-1], np.arange(swath.numberOfSamples))
azfmrate3 = azfmrate[firstSample3:lastSample3+1]
p = np.polyfit(np.arange(numberOfSamples3), azfmrate3, 3)
swath.azimuthFmrateVsPixel = [p[3], p[2], p[1], p[0]]
swath.numberOfSamples = numberOfSamples3
swath.numberOfLines = numberOfLines3
swath.startingRange += firstSample3 * swath.rangePixelSize
swath.sensingStart += datetime.timedelta(seconds = firstLine3 / swath.prf)
#no need to update frame and track, as parameters requiring changes are determined
#in swath and frame mosaicking, which is not yet done at this point.
| [
"contrib.alos2proc.alos2proc.rg_filter",
"os.remove",
"numpy.polyfit",
"isceobj.Alos2Proc.Alos2ProcPublic.overlapFrequency",
"os.path.isfile",
"numpy.arange",
"contrib.alos2proc.alos2proc.resamp",
"os.chdir",
"numpy.polyval",
"datetime.timedelta",
"os.rename",
"isceobj.Catalog.createCatalog",
... | [((400, 450), 'logging.getLogger', 'logging.getLogger', (['"""isce.alos2insar.runPrepareSlc"""'], {}), "('isce.alos2insar.runPrepareSlc')\n", (417, 450), False, 'import logging\n'), ((522, 577), 'isceobj.Catalog.createCatalog', 'isceobj.Catalog.createCatalog', (['self._insar.procDoc.name'], {}), '(self._insar.procDoc.name)\n', (551, 577), False, 'import isceobj\n'), ((2901, 2967), 'isceobj.Alos2Proc.Alos2ProcPublic.overlapFrequency', 'overlapFrequency', (['centerfreq1', 'bandwidth1', 'centerfreq2', 'bandwidth2'], {}), '(centerfreq1, bandwidth1, centerfreq2, bandwidth2)\n', (2917, 2967), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import overlapFrequency\n'), ((22149, 22199), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(firstLine3 / swath.prf)'}), '(seconds=firstLine3 / swath.prf)\n', (22167, 22199), False, 'import datetime\n'), ((3524, 3542), 'os.chdir', 'os.chdir', (['frameDir'], {}), '(frameDir)\n', (3532, 3542), False, 'import os\n'), ((7813, 7828), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (7821, 7828), False, 'import os\n'), ((8105, 8123), 'os.chdir', 'os.chdir', (['frameDir'], {}), '(frameDir)\n', (8113, 8123), False, 'import os\n'), ((11578, 11593), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (11586, 11593), False, 'import os\n'), ((11849, 11867), 'os.chdir', 'os.chdir', (['frameDir'], {}), '(frameDir)\n', (11857, 11867), False, 'import os\n'), ((16914, 16929), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (16922, 16929), False, 'import os\n'), ((19611, 19659), 'isceobj.Alos2Proc.Alos2ProcPublic.find_vrt_keyword', 'find_vrt_keyword', (["(slc + '.vrt')", '"""SourceFilename"""'], {}), "(slc + '.vrt', 'SourceFilename')\n", (19627, 19659), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import find_vrt_keyword\n'), ((19925, 19946), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (19944, 19946), False, 'import isceobj\n'), ((21284, 21307), 'os.remove', 'os.remove', (["(slc + '.xml')"], {}), "(slc + '.xml')\n", (21293, 21307), False, 'import os\n'), ((21316, 21339), 'os.remove', 'os.remove', (["(slc + '.vrt')"], {}), "(slc + '.vrt')\n", (21325, 21339), False, 'import os\n'), ((21348, 21404), 'isceobj.Alos2Proc.Alos2ProcPublic.create_xml', 'create_xml', (['slc', 'numberOfSamples3', 'numberOfLines3', '"""slc"""'], {}), "(slc, numberOfSamples3, numberOfLines3, 'slc')\n", (21358, 21404), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import create_xml\n'), ((21517, 21549), 'numpy.arange', 'np.arange', (['swath.numberOfSamples'], {}), '(swath.numberOfSamples)\n', (21526, 21549), True, 'import numpy as np\n'), ((21613, 21640), 'numpy.arange', 'np.arange', (['numberOfSamples3'], {}), '(numberOfSamples3)\n', (21622, 21640), True, 'import numpy as np\n'), ((21764, 21796), 'numpy.arange', 'np.arange', (['swath.numberOfSamples'], {}), '(swath.numberOfSamples)\n', (21773, 21796), True, 'import numpy as np\n'), ((21870, 21897), 'numpy.arange', 'np.arange', (['numberOfSamples3'], {}), '(numberOfSamples3)\n', (21879, 21897), True, 'import numpy as np\n'), ((1207, 1225), 'os.chdir', 'os.chdir', (['frameDir'], {}), '(frameDir)\n', (1215, 1225), False, 'import os\n'), ((2341, 2356), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (2349, 2356), False, 'import os\n'), ((3708, 3726), 'os.chdir', 'os.chdir', (['swathDir'], {}), '(swathDir)\n', (3716, 3726), False, 'import os\n'), ((7789, 7804), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (7797, 7804), False, 'import os\n'), ((8289, 8307), 'os.chdir', 'os.chdir', (['swathDir'], {}), '(swathDir)\n', (8297, 8307), False, 'import os\n'), ((11554, 11569), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (11562, 11569), False, 'import os\n'), ((12033, 12051), 'os.chdir', 'os.chdir', (['swathDir'], {}), '(swathDir)\n', (12041, 12051), False, 'import os\n'), ((16890, 16905), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (16898, 16905), False, 'import os\n'), ((17912, 17958), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(line2 / swath2.prf)'}), '(seconds=line2 / swath2.prf)\n', (17930, 17958), False, 'import datetime\n'), ((19687, 19732), 'isceobj.Alos2Proc.Alos2ProcPublic.find_vrt_keyword', 'find_vrt_keyword', (["(slc + '.vrt')", '"""ImageOffset"""'], {}), "(slc + '.vrt', 'ImageOffset')\n", (19703, 19732), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import find_vrt_keyword\n'), ((19761, 19806), 'isceobj.Alos2Proc.Alos2ProcPublic.find_vrt_keyword', 'find_vrt_keyword', (["(slc + '.vrt')", '"""PixelOffset"""'], {}), "(slc + '.vrt', 'PixelOffset')\n", (19777, 19806), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import find_vrt_keyword\n'), ((19835, 19879), 'isceobj.Alos2Proc.Alos2ProcPublic.find_vrt_keyword', 'find_vrt_keyword', (["(slc + '.vrt')", '"""LineOffset"""'], {}), "(slc + '.vrt', 'LineOffset')\n", (19851, 19879), False, 'from isceobj.Alos2Proc.Alos2ProcPublic import find_vrt_keyword\n'), ((1403, 1421), 'os.chdir', 'os.chdir', (['swathDir'], {}), '(swathDir)\n', (1411, 1421), False, 'import os\n'), ((2313, 2328), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (2321, 2328), False, 'import os\n'), ((5203, 5403), 'contrib.alos2proc.alos2proc.rg_filter', 'rg_filter', (['self._insar.referenceSlc', '(1)', '[tmpSlc]', '[overlapbandwidth / referenceSwath.rangeSamplingRate]', '[(centerfreq - centerfreq1) / referenceSwath.rangeSamplingRate]', '(257)', '(2048)', '(0.1)', '(0)', '(0.0)'], {}), '(self._insar.referenceSlc, 1, [tmpSlc], [overlapbandwidth /\n referenceSwath.rangeSamplingRate], [(centerfreq - centerfreq1) /\n referenceSwath.rangeSamplingRate], 257, 2048, 0.1, 0, 0.0)\n', (5212, 5403), False, 'from contrib.alos2proc.alos2proc import rg_filter\n'), ((5458, 5498), 'os.path.isfile', 'os.path.isfile', (['self._insar.referenceSlc'], {}), '(self._insar.referenceSlc)\n', (5472, 5498), False, 'import os\n'), ((5572, 5616), 'os.remove', 'os.remove', (["(self._insar.referenceSlc + '.vrt')"], {}), "(self._insar.referenceSlc + '.vrt')\n", (5581, 5616), False, 'import os\n'), ((5631, 5675), 'os.remove', 'os.remove', (["(self._insar.referenceSlc + '.xml')"], {}), "(self._insar.referenceSlc + '.xml')\n", (5640, 5675), False, 'import os\n'), ((5697, 5721), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (5719, 5721), False, 'import isceobj\n'), ((5813, 5839), 'os.remove', 'os.remove', (["(tmpSlc + '.vrt')"], {}), "(tmpSlc + '.vrt')\n", (5822, 5839), False, 'import os\n'), ((5856, 5882), 'os.remove', 'os.remove', (["(tmpSlc + '.xml')"], {}), "(tmpSlc + '.xml')\n", (5865, 5882), False, 'import os\n'), ((5899, 5942), 'os.rename', 'os.rename', (['tmpSlc', 'self._insar.referenceSlc'], {}), '(tmpSlc, self._insar.referenceSlc)\n', (5908, 5942), False, 'import os\n'), ((6666, 6866), 'contrib.alos2proc.alos2proc.rg_filter', 'rg_filter', (['self._insar.secondarySlc', '(1)', '[tmpSlc]', '[overlapbandwidth / secondarySwath.rangeSamplingRate]', '[(centerfreq - centerfreq2) / secondarySwath.rangeSamplingRate]', '(257)', '(2048)', '(0.1)', '(0)', '(0.0)'], {}), '(self._insar.secondarySlc, 1, [tmpSlc], [overlapbandwidth /\n secondarySwath.rangeSamplingRate], [(centerfreq - centerfreq2) /\n secondarySwath.rangeSamplingRate], 257, 2048, 0.1, 0, 0.0)\n', (6675, 6866), False, 'from contrib.alos2proc.alos2proc import rg_filter\n'), ((6921, 6961), 'os.path.isfile', 'os.path.isfile', (['self._insar.secondarySlc'], {}), '(self._insar.secondarySlc)\n', (6935, 6961), False, 'import os\n'), ((7035, 7079), 'os.remove', 'os.remove', (["(self._insar.secondarySlc + '.vrt')"], {}), "(self._insar.secondarySlc + '.vrt')\n", (7044, 7079), False, 'import os\n'), ((7094, 7138), 'os.remove', 'os.remove', (["(self._insar.secondarySlc + '.xml')"], {}), "(self._insar.secondarySlc + '.xml')\n", (7103, 7138), False, 'import os\n'), ((7160, 7184), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (7182, 7184), False, 'import isceobj\n'), ((7276, 7302), 'os.remove', 'os.remove', (["(tmpSlc + '.vrt')"], {}), "(tmpSlc + '.vrt')\n", (7285, 7302), False, 'import os\n'), ((7319, 7345), 'os.remove', 'os.remove', (["(tmpSlc + '.xml')"], {}), "(tmpSlc + '.xml')\n", (7328, 7345), False, 'import os\n'), ((7362, 7405), 'os.rename', 'os.rename', (['tmpSlc', 'self._insar.secondarySlc'], {}), '(tmpSlc, self._insar.secondarySlc)\n', (7371, 7405), False, 'import os\n'), ((9093, 9516), 'contrib.alos2proc.alos2proc.resamp', 'resamp', (['self._insar.secondarySlc', 'tmpSlc', '"""fake"""', '"""fake"""', 'outWidth', 'outLength', 'secondarySwath.prf', 'secondarySwath.dopplerVsPixel'], {'rgcoef': '[0.0, 1.0 / referenceSwath.rangeSamplingRate / (1.0 / secondarySwath.\n rangeSamplingRate) - 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]', 'azcoef': '[0.0, 0.0, 1.0 / referenceSwath.prf / (1.0 / secondarySwath.prf) - 1.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]', 'azpos_off': '(0.0)'}), "(self._insar.secondarySlc, tmpSlc, 'fake', 'fake', outWidth,\n outLength, secondarySwath.prf, secondarySwath.dopplerVsPixel, rgcoef=[\n 0.0, 1.0 / referenceSwath.rangeSamplingRate / (1.0 / secondarySwath.\n rangeSamplingRate) - 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n azcoef=[0.0, 0.0, 1.0 / referenceSwath.prf / (1.0 / secondarySwath.prf) -\n 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], azpos_off=0.0)\n", (9099, 9516), False, 'from contrib.alos2proc.alos2proc import resamp\n'), ((9574, 9614), 'os.path.isfile', 'os.path.isfile', (['self._insar.secondarySlc'], {}), '(self._insar.secondarySlc)\n', (9588, 9614), False, 'import os\n'), ((9688, 9732), 'os.remove', 'os.remove', (["(self._insar.secondarySlc + '.vrt')"], {}), "(self._insar.secondarySlc + '.vrt')\n", (9697, 9732), False, 'import os\n'), ((9747, 9791), 'os.remove', 'os.remove', (["(self._insar.secondarySlc + '.xml')"], {}), "(self._insar.secondarySlc + '.xml')\n", (9756, 9791), False, 'import os\n'), ((9813, 9837), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (9835, 9837), False, 'import isceobj\n'), ((9929, 9955), 'os.remove', 'os.remove', (["(tmpSlc + '.vrt')"], {}), "(tmpSlc + '.vrt')\n", (9938, 9955), False, 'import os\n'), ((9972, 9998), 'os.remove', 'os.remove', (["(tmpSlc + '.xml')"], {}), "(tmpSlc + '.xml')\n", (9981, 9998), False, 'import os\n'), ((10015, 10058), 'os.rename', 'os.rename', (['tmpSlc', 'self._insar.secondarySlc'], {}), '(tmpSlc, self._insar.secondarySlc)\n', (10024, 10058), False, 'import os\n'), ((10401, 10420), 'numpy.arange', 'np.arange', (['outWidth'], {}), '(outWidth)\n', (10410, 10420), True, 'import numpy as np\n'), ((10569, 10623), 'numpy.polyval', 'np.polyval', (['secondarySwath.dopplerVsPixel[::-1]', 'index'], {}), '(secondarySwath.dopplerVsPixel[::-1], index)\n', (10579, 10623), True, 'import numpy as np\n'), ((10644, 10670), 'numpy.polyfit', 'np.polyfit', (['index2', 'dop', '(3)'], {}), '(index2, dop, 3)\n', (10654, 10670), True, 'import numpy as np\n'), ((10772, 10832), 'numpy.polyval', 'np.polyval', (['secondarySwath.azimuthFmrateVsPixel[::-1]', 'index'], {}), '(secondarySwath.azimuthFmrateVsPixel[::-1], index)\n', (10782, 10832), True, 'import numpy as np\n'), ((10853, 10884), 'numpy.polyfit', 'np.polyfit', (['index2', 'azfmrate', '(3)'], {}), '(index2, azfmrate, 3)\n', (10863, 10884), True, 'import numpy as np\n'), ((12527, 12614), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(referenceSwath.numberOfLines * 0.5 / referenceSwath.prf)'}), '(seconds=referenceSwath.numberOfLines * 0.5 /\n referenceSwath.prf)\n', (12545, 12614), False, 'import datetime\n'), ((13524, 13578), 'numpy.polyval', 'np.polyval', (['secondarySwath.dopplerVsPixel[::-1]', 'index'], {}), '(secondarySwath.dopplerVsPixel[::-1], index)\n', (13534, 13578), True, 'import numpy as np\n'), ((13599, 13632), 'numpy.polyfit', 'np.polyfit', (['(index - rgoff)', 'dop', '(3)'], {}), '(index - rgoff, dop, 3)\n', (13609, 13632), True, 'import numpy as np\n'), ((14256, 14296), 'os.path.isfile', 'os.path.isfile', (['self._insar.referenceSlc'], {}), '(self._insar.referenceSlc)\n', (14270, 14296), False, 'import os\n'), ((14370, 14414), 'os.remove', 'os.remove', (["(self._insar.referenceSlc + '.vrt')"], {}), "(self._insar.referenceSlc + '.vrt')\n", (14379, 14414), False, 'import os\n'), ((14429, 14473), 'os.remove', 'os.remove', (["(self._insar.referenceSlc + '.xml')"], {}), "(self._insar.referenceSlc + '.xml')\n", (14438, 14473), False, 'import os\n'), ((14495, 14519), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (14517, 14519), False, 'import isceobj\n'), ((14611, 14637), 'os.remove', 'os.remove', (["(tmpSlc + '.vrt')"], {}), "(tmpSlc + '.vrt')\n", (14620, 14637), False, 'import os\n'), ((14654, 14680), 'os.remove', 'os.remove', (["(tmpSlc + '.xml')"], {}), "(tmpSlc + '.xml')\n", (14663, 14680), False, 'import os\n'), ((14697, 14740), 'os.rename', 'os.rename', (['tmpSlc', 'self._insar.referenceSlc'], {}), '(tmpSlc, self._insar.referenceSlc)\n', (14706, 14740), False, 'import os\n'), ((15430, 15484), 'numpy.polyval', 'np.polyval', (['referenceSwath.dopplerVsPixel[::-1]', 'index'], {}), '(referenceSwath.dopplerVsPixel[::-1], index)\n', (15440, 15484), True, 'import numpy as np\n'), ((15505, 15538), 'numpy.polyfit', 'np.polyfit', (['(index + rgoff)', 'dop', '(3)'], {}), '(index + rgoff, dop, 3)\n', (15515, 15538), True, 'import numpy as np\n'), ((16163, 16203), 'os.path.isfile', 'os.path.isfile', (['self._insar.secondarySlc'], {}), '(self._insar.secondarySlc)\n', (16177, 16203), False, 'import os\n'), ((16277, 16321), 'os.remove', 'os.remove', (["(self._insar.secondarySlc + '.vrt')"], {}), "(self._insar.secondarySlc + '.vrt')\n", (16286, 16321), False, 'import os\n'), ((16336, 16380), 'os.remove', 'os.remove', (["(self._insar.secondarySlc + '.xml')"], {}), "(self._insar.secondarySlc + '.xml')\n", (16345, 16380), False, 'import os\n'), ((16402, 16426), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (16424, 16426), False, 'import isceobj\n'), ((16518, 16544), 'os.remove', 'os.remove', (["(tmpSlc + '.vrt')"], {}), "(tmpSlc + '.vrt')\n", (16527, 16544), False, 'import os\n'), ((16561, 16587), 'os.remove', 'os.remove', (["(tmpSlc + '.xml')"], {}), "(tmpSlc + '.xml')\n", (16570, 16587), False, 'import os\n'), ((16604, 16647), 'os.rename', 'os.rename', (['tmpSlc', 'self._insar.secondarySlc'], {}), '(tmpSlc, self._insar.secondarySlc)\n', (16613, 16647), False, 'import os\n'), ((5520, 5555), 'os.remove', 'os.remove', (['self._insar.referenceSlc'], {}), '(self._insar.referenceSlc)\n', (5529, 5555), False, 'import os\n'), ((6983, 7018), 'os.remove', 'os.remove', (['self._insar.secondarySlc'], {}), '(self._insar.secondarySlc)\n', (6992, 7018), False, 'import os\n'), ((9636, 9671), 'os.remove', 'os.remove', (['self._insar.secondarySlc'], {}), '(self._insar.secondarySlc)\n', (9645, 9671), False, 'import os\n'), ((13452, 13493), 'numpy.arange', 'np.arange', (['referenceSwath.numberOfSamples'], {}), '(referenceSwath.numberOfSamples)\n', (13461, 13493), True, 'import numpy as np\n'), ((14318, 14353), 'os.remove', 'os.remove', (['self._insar.referenceSlc'], {}), '(self._insar.referenceSlc)\n', (14327, 14353), False, 'import os\n'), ((15358, 15399), 'numpy.arange', 'np.arange', (['secondarySwath.numberOfSamples'], {}), '(secondarySwath.numberOfSamples)\n', (15367, 15399), True, 'import numpy as np\n'), ((16225, 16260), 'os.remove', 'os.remove', (['self._insar.secondarySlc'], {}), '(self._insar.secondarySlc)\n', (16234, 16260), False, 'import os\n'), ((20945, 21030), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.complex64', 'count': '(numberOfLines3 * swath.numberOfSamples)'}), '(f, dtype=np.complex64, count=numberOfLines3 * swath.numberOfSamples\n )\n', (20956, 21030), True, 'import numpy as np\n'), ((10445, 10464), 'numpy.arange', 'np.arange', (['outWidth'], {}), '(outWidth)\n', (10454, 10464), True, 'import numpy as np\n'), ((20890, 20912), 'numpy.dtype', 'np.dtype', (['np.complex64'], {}), '(np.complex64)\n', (20898, 20912), True, 'import numpy as np\n')] |
import numpy as np
from LinConGauss import LinearConstraints
from LinConGauss.multilevel_splitting import SubsetSimulation
# define some linear constraints
n_lc = 5
n_dim = 3
np.random.seed(0) # because sometimes it is hard to find an initial point in the randomly drawn domain.
lincon = LinearConstraints(2 * np.random.randn(n_lc, n_dim), np.random.randn(n_lc, 1))
subset_simulator = SubsetSimulation(lincon, 16, 0.5)
subset_simulator.run(verbose=False)
def subset_finds_domain():
""" Test whether subset simulation finds a sample in the domain of interest. """
assert lincon.integration_domain(subset_simulator.tracker.x_inits()[:,-1]) == 1.
def shifts_larger_zero():
""" Test that shifts found by subset simulation are greater/equal to 0 """
assert np.all(subset_simulator.tracker.shift_sequence >= 0.)
| [
"numpy.random.seed",
"numpy.all",
"LinConGauss.multilevel_splitting.SubsetSimulation",
"numpy.random.randn"
] | [((177, 194), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (191, 194), True, 'import numpy as np\n'), ((388, 421), 'LinConGauss.multilevel_splitting.SubsetSimulation', 'SubsetSimulation', (['lincon', '(16)', '(0.5)'], {}), '(lincon, 16, 0.5)\n', (404, 421), False, 'from LinConGauss.multilevel_splitting import SubsetSimulation\n'), ((342, 366), 'numpy.random.randn', 'np.random.randn', (['n_lc', '(1)'], {}), '(n_lc, 1)\n', (357, 366), True, 'import numpy as np\n'), ((775, 829), 'numpy.all', 'np.all', (['(subset_simulator.tracker.shift_sequence >= 0.0)'], {}), '(subset_simulator.tracker.shift_sequence >= 0.0)\n', (781, 829), True, 'import numpy as np\n'), ((312, 340), 'numpy.random.randn', 'np.random.randn', (['n_lc', 'n_dim'], {}), '(n_lc, n_dim)\n', (327, 340), True, 'import numpy as np\n')] |
#!/usr/bin/env python3 -u
import chainer
import chainer.functions as F
import numpy as np
import scipy.linalg
from scipy.optimize import fmin_ncg
import os
import time
import sys
from tqdm import tqdm
is_print = True
def _print(*args, **kwargs):
if is_print:
print(*args, **kwargs, file=sys.stderr)
cashed = {}
class GeneralModel(chainer.Chain):
def __init__(self, out_dir):
super(GeneralModel, self).__init__()
self.out_dir = out_dir
def get_flat_param(self):
return F.hstack([F.flatten(p) for p in self.params()])
def get_flat_param_grad(self):
return F.hstack([F.flatten(p.grad) for p in self.params()])
def get_flat_param_grad_var(self):
return F.hstack([F.flatten(p.grad_var) for p in self.params()])
def hessian_vector_val(self, v):
train_iter = chainer.iterators.SerialIterator(self.train_data, batch_size=self.bathsize_for_hvp, repeat=False)
v_conv = chainer.dataset.to_device(self.device, v)
hvp = 0
for batch in train_iter:
hvp_tmp = self.minibatch_hessian_vector_val(v_conv, batch)
hvp += (hvp_tmp * len(batch) / len(self.train_data))
return hvp + self.damping * v
def minibatch_hessian_vector_val(self, v, batch):
v = chainer.dataset.to_device(self.device, v)
loss = self.__call__(*self.convertor(batch, self.device), enable_double_backprop=True)
hvp = self.get_hvp(loss, v)
return hvp
def get_hvp(self, y, v):
# First backprop
self.cleargrads()
y.backward(enable_double_backprop=True)
grads = self.get_flat_param_grad_var()
inner_product = F.sum(grads * v)
# Second backprop
self.cleargrads()
grads.cleargrad()
inner_product.backward()
hvp = self.get_flat_param_grad()
return chainer.cuda.to_cpu(hvp.data).astype(np.float64)
def get_fmin_loss_fn(self, v, scale):
def get_fmin_loss(x):
hessian_vector_val = self.hessian_vector_val(x)
return (0.5 * np.dot(hessian_vector_val, x) - np.dot(v, x)) * scale
return get_fmin_loss
def get_fmin_grad_fn(self, v, scale):
def get_fmin_grad(x):
hessian_vector_val = self.hessian_vector_val(x)
return (hessian_vector_val - v) * scale
return get_fmin_grad
def get_fmin_hvp_fn(self, scale):
def get_fmin_hvp(x, p):
hessian_vector_val = self.hessian_vector_val(p)
return hessian_vector_val * scale
return get_fmin_hvp
def get_cg_callback(self, v, verbose, scale):
fmin_loss_fn = self.get_fmin_loss_fn(v, scale)
def cg_callback(x):
# x is current params
if verbose:
# _print('Function value: %s' % fmin_loss_fn(x))
# quad, lin = fmin_loss_split(x)
# _print('Split function value: %s, %s' % (quad, lin))
_print(fmin_loss_fn(x))
return cg_callback
def get_inverse_hvp_cg(self, v, verbose, tol=1e-8, maxiter=100, batchsize=100, damping=0., scale=1e20):
self.bathsize_for_hvp = batchsize
self.damping = damping
fmin_loss_fn = self.get_fmin_loss_fn(v, scale)
fmin_grad_fn = self.get_fmin_grad_fn(v, scale)
fmin_hvp_fn = self.get_fmin_hvp_fn(scale)
cg_callback = self.get_cg_callback(v, verbose, scale)
fmin_results = fmin_ncg(
f=fmin_loss_fn,
x0=v,
fprime=fmin_grad_fn,
fhess_p=fmin_hvp_fn,
callback=cg_callback,
avextol=tol,
maxiter=maxiter)
return fmin_results
def get_hessian(self, y):
# First backprop
self.cleargrads()
y.backward(enable_double_backprop=True)
grads = self.get_flat_param_grad_var()
hessian_list = []
for g in tqdm(grads):
self.cleargrads()
g.backward()
hessian_list.append(chainer.cuda.to_cpu(self.get_flat_param_grad().data))
hessian = np.vstack(hessian_list)
return hessian
def get_inverse_hvp_lissa(self, v,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=1000):
inverse_hvp = 0
print_iter = 100
for i in range(num_samples):
cur_estimate = v
train_iter = chainer.iterators.SerialIterator(self.train_data, batch_size=batch_size, repeat=True)
for j, batch in enumerate(train_iter):
hessian_vector_val = self.minibatch_hessian_vector_val(cur_estimate, batch)
cur_estimate = v + (1 - damping) * cur_estimate - hessian_vector_val / scale
# Update: v + (I - Hessian_at_x) * cur_estimate
if (j % print_iter == 0) or (j == recursion_depth - 1):
_print(j, np.linalg.norm(cur_estimate))
# _print("Recursion at depth %s: norm is %.8lf" % (j, np.linalg.norm(np.concatenate(cur_estimate))))
if j >= recursion_depth:
break
inverse_hvp += (cur_estimate / scale)
inverse_hvp = inverse_hvp / num_samples
return inverse_hvp
def get_inverse_hvp(self, v, approx_type='cg', approx_params=None, verbose=True):
assert approx_type in ['cg', 'lissa']
if approx_type == 'lissa':
return self.get_inverse_hvp_lissa(v, **approx_params)
elif approx_type == 'cg':
return self.get_inverse_hvp_cg(v, verbose, **approx_params)
def get_grad_loss(self, test_data):
loss = self.__call__(*self.convertor([test_data], self.device))
self.cleargrads()
loss.backward()
grad = self.get_flat_param_grad()
return chainer.cuda.to_cpu(grad.data)
def get_relevance_by_grad(self, test_data, train_data, convertor, matrix='none',
approx_type='cg', approx_params={}, force_refresh=False,
test_description='', sim_func=np.dot):
self.train_data = train_data
self.convertor = convertor
if matrix == 'approx_hessian':
start_time = time.time()
test_grad = self.get_grad_loss(test_data)
test_grad = test_grad.astype(np.float64)
approx_filename = os.path.join(self.out_dir, '{}-{}.npy'.format(
approx_type, test_description))
if os.path.exists(approx_filename) and force_refresh == False:
test_feature = np.load(approx_filename)
_print('Loaded from {}'.format(approx_filename))
else:
test_feature = self.get_inverse_hvp(
test_grad,
approx_type,
approx_params)
np.save(approx_filename, test_feature)
_print('Saved to {}'.format(approx_filename))
duration = time.time() - start_time
_print('Inverse HVP took {} sec'.format(duration))
elif matrix == 'inv-hessian':
start_time = time.time()
test_grad = self.get_grad_loss(test_data)
test_grad = test_grad.astype(np.float64)
hess_filename = os.path.join(self.out_dir, 'inv-hessian-matrix.npy')
if hess_filename in cashed:
inv_hessian = cashed[hess_filename]
elif os.path.exists(hess_filename) and force_refresh == False:
inv_hessian = np.load(hess_filename)
_print('Loaded from {}'.format(hess_filename))
else:
# train_iter = chainer.iterators.SerialIterator(train_data, batch_size=len(train_data), repeat=False)
loss = self.__call__(*convertor(train_data, self.device), enable_double_backprop=True)
hessian = self.get_hessian(loss)
damped_hessian = hessian + np.mean(np.abs(hessian)) * approx_params.get('damping', 0) * np.identity(hessian.shape[0])
inv_hessian = np.linalg.inv(damped_hessian)
np.save(hess_filename, inv_hessian)
_print('Saved to {}'.format(hess_filename))
if hess_filename not in cashed:
cashed[hess_filename] = inv_hessian
test_feature = np.matmul(inv_hessian, test_grad)
duration = time.time() - start_time
_print('took {} sec'.format(duration))
elif matrix == 'inv_sqrt-hessian':
start_time = time.time()
test_grad = self.get_grad_loss(test_data)
test_grad = test_grad.astype(np.float64)
hess_filename = os.path.join(self.out_dir, 'inv_sqrt-hessian-matrix.npy')
if hess_filename in cashed:
inv_sqrt_hessian = cashed[hess_filename]
elif os.path.exists(hess_filename) and force_refresh == False:
inv_sqrt_hessian = np.load(hess_filename)
_print('Loaded from {}'.format(hess_filename))
else:
# train_iter = chainer.iterators.SerialIterator(train_data, batch_size=len(train_data), repeat=False)
inv_hessian_fn = os.path.join(self.out_dir, 'inv-hessian-matrix.npy')
inv_hessian = np.load(inv_hessian_fn)
inv_sqrt_hessian = scipy.linalg.sqrtm(inv_hessian).real
np.save(hess_filename, inv_sqrt_hessian)
inv_sqrt_hessian = np.load(hess_filename)
_print('Saved to {}'.format(hess_filename))
if hess_filename not in cashed:
cashed[hess_filename] = inv_sqrt_hessian
test_feature = np.matmul(inv_sqrt_hessian, test_grad)
duration = time.time() - start_time
_print('took {} sec'.format(duration))
elif matrix == 'inv-fisher':
start_time = time.time()
test_grad = self.get_grad_loss(test_data)
test_grad = test_grad.astype(np.float64)
fisher_filename = os.path.join(self.out_dir, 'inv-fisher-matrix.npy')
if fisher_filename in cashed:
inv_fisher = cashed[fisher_filename]
elif os.path.exists(fisher_filename) and force_refresh == False:
inv_fisher = np.load(fisher_filename)
_print('Loaded from {}'.format(fisher_filename))
else:
# train_iter = chainer.iterators.SerialIterator(train_data, batch_size=len(train_data), repeat=False)
train_grad_filename = os.path.join(self.out_dir, 'train-grad-on-loss-all.npy')
if not os.path.exists(train_grad_filename):
_print('must calculate train grads first')
sys.exit(1)
train_grads = np.load(train_grad_filename)
avg_grads = np.average(train_grads, axis=0)
fisher_matrix = []
for g in avg_grads:
fisher_matrix.append(g * avg_grads)
fisher = np.vstack(fisher_matrix)
damped_fisher = fisher + np.mean(np.abs(fisher)) * approx_params.get('damping', 0) * np.identity(fisher.shape[0])
inv_fisher = np.linalg.inv(damped_fisher)
np.save(fisher_filename, inv_fisher)
_print('Saved to {}'.format(fisher_filename))
if fisher_filename not in cashed:
cashed[fisher_filename] = inv_fisher
test_feature = np.matmul(inv_fisher, test_grad)
duration = time.time() - start_time
_print('took {} sec'.format(duration))
elif matrix == 'inv_sqrt-fisher':
start_time = time.time()
test_grad = self.get_grad_loss(test_data)
test_grad = test_grad.astype(np.float64)
hess_filename = os.path.join(self.out_dir, 'inv_sqrt-fisher-matrix.npy')
if hess_filename in cashed:
inv_sqrt_fisher = cashed[hess_filename]
elif os.path.exists(hess_filename) and force_refresh == False:
inv_sqrt_fisher = np.load(hess_filename)
_print('Loaded from {}'.format(hess_filename))
else:
# train_iter = chainer.iterators.SerialIterator(train_data, batch_size=len(train_data), repeat=False)
inv_fisher_fn = os.path.join(self.out_dir, 'inv-fisher-matrix.npy')
inv_fisher = np.load(inv_fisher_fn)
inv_sqrt_fisher = scipy.linalg.sqrtm(inv_fisher).real
np.save(hess_filename, inv_sqrt_fisher)
inv_sqrt_fisher = np.load(hess_filename)
_print('Saved to {}'.format(hess_filename))
if hess_filename not in cashed:
cashed[hess_filename] = inv_sqrt_fisher
test_feature = np.matmul(inv_sqrt_fisher, test_grad)
duration = time.time() - start_time
_print('took {} sec'.format(duration))
elif matrix == 'none':
test_grad = self.get_grad_loss(test_data)
test_grad = test_grad.astype(np.float64)
test_feature = test_grad
else:
sys.exit(1)
start_time = time.time()
predicted_loss_diffs = []
if matrix == 'inv_sqrt-hessian':
train_feature_filename = os.path.join(self.out_dir, 'train-grad-on-loss-all_inv_sqrt_hessian.npy')
if train_feature_filename in cashed:
train_features = cashed[train_feature_filename]
elif os.path.exists(train_feature_filename):
train_features = np.load(train_feature_filename)
_print('Loaded train grads from {}'.format(train_feature_filename))
else:
_train_feature_filename = os.path.join(self.out_dir, 'train-grad-on-loss-all.npy')
train_features = np.load(_train_feature_filename)
train_features = np.matmul(inv_sqrt_hessian, train_features.T).T
np.save(train_feature_filename, train_features)
_print('Saved train grads to {}'.format(train_feature_filename))
if train_feature_filename not in cashed:
cashed[train_feature_filename] = train_features
elif matrix == 'inv_sqrt-fisher':
train_feature_filename = os.path.join(self.out_dir, 'train-grad-on-loss-all_inv_sqrt_fisher.npy')
if train_feature_filename in cashed:
train_features = cashed[train_feature_filename]
elif os.path.exists(train_feature_filename):
train_features = np.load(train_feature_filename)
_print('Loaded train grads from {}'.format(train_feature_filename))
else:
_train_feature_filename = os.path.join(self.out_dir, 'train-grad-on-loss-all.npy')
train_features = np.load(_train_feature_filename)
train_features = np.matmul(inv_sqrt_fisher, train_features.T).T
np.save(train_feature_filename, train_features)
_print('Saved train grads to {}'.format(train_feature_filename))
if train_feature_filename not in cashed:
cashed[train_feature_filename] = train_features
else:
train_feature_filename = os.path.join(self.out_dir, 'train-grad-on-loss-all.npy')
if train_feature_filename in cashed:
train_features = cashed[train_feature_filename]
elif os.path.exists(train_feature_filename):
train_features = np.load(train_feature_filename)
_print('Loaded train grads from {}'.format(train_feature_filename))
else:
train_feature_list = []
for counter, remove_data in enumerate(tqdm(train_data)):
train_feature = self.get_grad_loss(remove_data)
train_feature_list.append(train_feature)
train_features = np.vstack(train_feature_list)
np.save(train_feature_filename, train_features)
_print('Saved train grads to {}'.format(train_feature_filename))
if train_feature_filename not in cashed:
cashed[train_feature_filename] = train_features
for counter, train_feature in enumerate(tqdm(train_features, leave=False)):
predicted_loss_diffs.append(sim_func(test_feature, train_feature))
duration = time.time() - start_time
_print('Multiplying by all train examples took {} sec'.format(duration))
return predicted_loss_diffs
def get_relevance_by_hsim(self, test_data, train_data, convertor, h_fn, batch_size=100, sim_func=np.dot):
if h_fn == 'top_h':
h_func = self.get_top_h
elif h_fn == 'all_h':
h_func = self.get_all_h
elif h_fn == 'x':
h_func = self.get_x
# elif h_fn == 'residual':
# h_func = self.get_residual
else:
sys.exit(1)
test_feature = h_func(*convertor([test_data], self.device))[0]
feature_sims = []
train_feature_filename = os.path.join(self.out_dir, 'train-{}-all.npy'.format(h_fn))
if train_feature_filename in cashed:
train_features = cashed[train_feature_filename]
elif os.path.exists(train_feature_filename):
train_features = np.load(train_feature_filename)
_print('Loaded train features from {}'.format(train_feature_filename))
else:
train_feature_list = []
train_iter = chainer.iterators.SerialIterator(train_data, batch_size=batch_size, repeat=False, shuffle=False)
for batch in tqdm(train_iter):
features = h_func(*convertor(batch, self.device))
train_feature_list.append(features)
train_features = np.vstack(train_feature_list)
np.save(train_feature_filename, train_features)
_print('Saved train features to {}'.format(train_feature_filename))
if train_feature_filename not in cashed:
cashed[train_feature_filename] = train_features
for counter, train_feature in enumerate(tqdm(train_features, leave=False)):
feature_sims.append(sim_func(test_feature, train_feature))
return feature_sims
| [
"numpy.load",
"numpy.abs",
"numpy.linalg.norm",
"chainer.iterators.SerialIterator",
"os.path.join",
"chainer.functions.flatten",
"os.path.exists",
"numpy.identity",
"chainer.cuda.to_cpu",
"chainer.dataset.to_device",
"tqdm.tqdm",
"numpy.save",
"numpy.average",
"chainer.functions.sum",
"n... | [((845, 947), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', (['self.train_data'], {'batch_size': 'self.bathsize_for_hvp', 'repeat': '(False)'}), '(self.train_data, batch_size=self.\n bathsize_for_hvp, repeat=False)\n', (877, 947), False, 'import chainer\n'), ((960, 1001), 'chainer.dataset.to_device', 'chainer.dataset.to_device', (['self.device', 'v'], {}), '(self.device, v)\n', (985, 1001), False, 'import chainer\n'), ((1294, 1335), 'chainer.dataset.to_device', 'chainer.dataset.to_device', (['self.device', 'v'], {}), '(self.device, v)\n', (1319, 1335), False, 'import chainer\n'), ((1688, 1704), 'chainer.functions.sum', 'F.sum', (['(grads * v)'], {}), '(grads * v)\n', (1693, 1704), True, 'import chainer.functions as F\n'), ((3467, 3595), 'scipy.optimize.fmin_ncg', 'fmin_ncg', ([], {'f': 'fmin_loss_fn', 'x0': 'v', 'fprime': 'fmin_grad_fn', 'fhess_p': 'fmin_hvp_fn', 'callback': 'cg_callback', 'avextol': 'tol', 'maxiter': 'maxiter'}), '(f=fmin_loss_fn, x0=v, fprime=fmin_grad_fn, fhess_p=fmin_hvp_fn,\n callback=cg_callback, avextol=tol, maxiter=maxiter)\n', (3475, 3595), False, 'from scipy.optimize import fmin_ncg\n'), ((3928, 3939), 'tqdm.tqdm', 'tqdm', (['grads'], {}), '(grads)\n', (3932, 3939), False, 'from tqdm import tqdm\n'), ((4100, 4123), 'numpy.vstack', 'np.vstack', (['hessian_list'], {}), '(hessian_list)\n', (4109, 4123), True, 'import numpy as np\n'), ((5865, 5895), 'chainer.cuda.to_cpu', 'chainer.cuda.to_cpu', (['grad.data'], {}), '(grad.data)\n', (5884, 5895), False, 'import chainer\n'), ((13214, 13225), 'time.time', 'time.time', ([], {}), '()\n', (13223, 13225), False, 'import time\n'), ((4467, 4556), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', (['self.train_data'], {'batch_size': 'batch_size', 'repeat': '(True)'}), '(self.train_data, batch_size=batch_size,\n repeat=True)\n', (4499, 4556), False, 'import chainer\n'), ((6232, 6243), 'time.time', 'time.time', ([], {}), '()\n', (6241, 6243), False, 'import time\n'), ((13340, 13413), 'os.path.join', 'os.path.join', (['self.out_dir', '"""train-grad-on-loss-all_inv_sqrt_hessian.npy"""'], {}), "(self.out_dir, 'train-grad-on-loss-all_inv_sqrt_hessian.npy')\n", (13352, 13413), False, 'import os\n'), ((16324, 16357), 'tqdm.tqdm', 'tqdm', (['train_features'], {'leave': '(False)'}), '(train_features, leave=False)\n', (16328, 16357), False, 'from tqdm import tqdm\n'), ((16459, 16470), 'time.time', 'time.time', ([], {}), '()\n', (16468, 16470), False, 'import time\n'), ((17327, 17365), 'os.path.exists', 'os.path.exists', (['train_feature_filename'], {}), '(train_feature_filename)\n', (17341, 17365), False, 'import os\n'), ((18201, 18234), 'tqdm.tqdm', 'tqdm', (['train_features'], {'leave': '(False)'}), '(train_features, leave=False)\n', (18205, 18234), False, 'from tqdm import tqdm\n'), ((531, 543), 'chainer.functions.flatten', 'F.flatten', (['p'], {}), '(p)\n', (540, 543), True, 'import chainer.functions as F\n'), ((630, 647), 'chainer.functions.flatten', 'F.flatten', (['p.grad'], {}), '(p.grad)\n', (639, 647), True, 'import chainer.functions as F\n'), ((738, 759), 'chainer.functions.flatten', 'F.flatten', (['p.grad_var'], {}), '(p.grad_var)\n', (747, 759), True, 'import chainer.functions as F\n'), ((1874, 1903), 'chainer.cuda.to_cpu', 'chainer.cuda.to_cpu', (['hvp.data'], {}), '(hvp.data)\n', (1893, 1903), False, 'import chainer\n'), ((6489, 6520), 'os.path.exists', 'os.path.exists', (['approx_filename'], {}), '(approx_filename)\n', (6503, 6520), False, 'import os\n'), ((6580, 6604), 'numpy.load', 'np.load', (['approx_filename'], {}), '(approx_filename)\n', (6587, 6604), True, 'import numpy as np\n'), ((6856, 6894), 'numpy.save', 'np.save', (['approx_filename', 'test_feature'], {}), '(approx_filename, test_feature)\n', (6863, 6894), True, 'import numpy as np\n'), ((6981, 6992), 'time.time', 'time.time', ([], {}), '()\n', (6990, 6992), False, 'import time\n'), ((7133, 7144), 'time.time', 'time.time', ([], {}), '()\n', (7142, 7144), False, 'import time\n'), ((7282, 7334), 'os.path.join', 'os.path.join', (['self.out_dir', '"""inv-hessian-matrix.npy"""'], {}), "(self.out_dir, 'inv-hessian-matrix.npy')\n", (7294, 7334), False, 'import os\n'), ((8337, 8370), 'numpy.matmul', 'np.matmul', (['inv_hessian', 'test_grad'], {}), '(inv_hessian, test_grad)\n', (8346, 8370), True, 'import numpy as np\n'), ((13545, 13583), 'os.path.exists', 'os.path.exists', (['train_feature_filename'], {}), '(train_feature_filename)\n', (13559, 13583), False, 'import os\n'), ((14341, 14413), 'os.path.join', 'os.path.join', (['self.out_dir', '"""train-grad-on-loss-all_inv_sqrt_fisher.npy"""'], {}), "(self.out_dir, 'train-grad-on-loss-all_inv_sqrt_fisher.npy')\n", (14353, 14413), False, 'import os\n'), ((15312, 15368), 'os.path.join', 'os.path.join', (['self.out_dir', '"""train-grad-on-loss-all.npy"""'], {}), "(self.out_dir, 'train-grad-on-loss-all.npy')\n", (15324, 15368), False, 'import os\n'), ((17396, 17427), 'numpy.load', 'np.load', (['train_feature_filename'], {}), '(train_feature_filename)\n', (17403, 17427), True, 'import numpy as np\n'), ((17586, 17687), 'chainer.iterators.SerialIterator', 'chainer.iterators.SerialIterator', (['train_data'], {'batch_size': 'batch_size', 'repeat': '(False)', 'shuffle': '(False)'}), '(train_data, batch_size=batch_size, repeat=\n False, shuffle=False)\n', (17618, 17687), False, 'import chainer\n'), ((17708, 17724), 'tqdm.tqdm', 'tqdm', (['train_iter'], {}), '(train_iter)\n', (17712, 17724), False, 'from tqdm import tqdm\n'), ((17873, 17902), 'numpy.vstack', 'np.vstack', (['train_feature_list'], {}), '(train_feature_list)\n', (17882, 17902), True, 'import numpy as np\n'), ((17915, 17962), 'numpy.save', 'np.save', (['train_feature_filename', 'train_features'], {}), '(train_feature_filename, train_features)\n', (17922, 17962), True, 'import numpy as np\n'), ((2115, 2127), 'numpy.dot', 'np.dot', (['v', 'x'], {}), '(v, x)\n', (2121, 2127), True, 'import numpy as np\n'), ((8395, 8406), 'time.time', 'time.time', ([], {}), '()\n', (8404, 8406), False, 'import time\n'), ((8539, 8550), 'time.time', 'time.time', ([], {}), '()\n', (8548, 8550), False, 'import time\n'), ((8688, 8745), 'os.path.join', 'os.path.join', (['self.out_dir', '"""inv_sqrt-hessian-matrix.npy"""'], {}), "(self.out_dir, 'inv_sqrt-hessian-matrix.npy')\n", (8700, 8745), False, 'import os\n'), ((9692, 9730), 'numpy.matmul', 'np.matmul', (['inv_sqrt_hessian', 'test_grad'], {}), '(inv_sqrt_hessian, test_grad)\n', (9701, 9730), True, 'import numpy as np\n'), ((13618, 13649), 'numpy.load', 'np.load', (['train_feature_filename'], {}), '(train_feature_filename)\n', (13625, 13649), True, 'import numpy as np\n'), ((13794, 13850), 'os.path.join', 'os.path.join', (['self.out_dir', '"""train-grad-on-loss-all.npy"""'], {}), "(self.out_dir, 'train-grad-on-loss-all.npy')\n", (13806, 13850), False, 'import os\n'), ((13884, 13916), 'numpy.load', 'np.load', (['_train_feature_filename'], {}), '(_train_feature_filename)\n', (13891, 13916), True, 'import numpy as np\n'), ((14014, 14061), 'numpy.save', 'np.save', (['train_feature_filename', 'train_features'], {}), '(train_feature_filename, train_features)\n', (14021, 14061), True, 'import numpy as np\n'), ((14545, 14583), 'os.path.exists', 'os.path.exists', (['train_feature_filename'], {}), '(train_feature_filename)\n', (14559, 14583), False, 'import os\n'), ((15500, 15538), 'os.path.exists', 'os.path.exists', (['train_feature_filename'], {}), '(train_feature_filename)\n', (15514, 15538), False, 'import os\n'), ((17004, 17015), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17012, 17015), False, 'import sys\n'), ((2083, 2112), 'numpy.dot', 'np.dot', (['hessian_vector_val', 'x'], {}), '(hessian_vector_val, x)\n', (2089, 2112), True, 'import numpy as np\n'), ((4957, 4985), 'numpy.linalg.norm', 'np.linalg.norm', (['cur_estimate'], {}), '(cur_estimate)\n', (4971, 4985), True, 'import numpy as np\n'), ((7444, 7473), 'os.path.exists', 'os.path.exists', (['hess_filename'], {}), '(hess_filename)\n', (7458, 7473), False, 'import os\n'), ((7532, 7554), 'numpy.load', 'np.load', (['hess_filename'], {}), '(hess_filename)\n', (7539, 7554), True, 'import numpy as np\n'), ((8070, 8099), 'numpy.linalg.inv', 'np.linalg.inv', (['damped_hessian'], {}), '(damped_hessian)\n', (8083, 8099), True, 'import numpy as np\n'), ((8116, 8151), 'numpy.save', 'np.save', (['hess_filename', 'inv_hessian'], {}), '(hess_filename, inv_hessian)\n', (8123, 8151), True, 'import numpy as np\n'), ((9755, 9766), 'time.time', 'time.time', ([], {}), '()\n', (9764, 9766), False, 'import time\n'), ((9893, 9904), 'time.time', 'time.time', ([], {}), '()\n', (9902, 9904), False, 'import time\n'), ((10044, 10095), 'os.path.join', 'os.path.join', (['self.out_dir', '"""inv-fisher-matrix.npy"""'], {}), "(self.out_dir, 'inv-fisher-matrix.npy')\n", (10056, 10095), False, 'import os\n'), ((11500, 11532), 'numpy.matmul', 'np.matmul', (['inv_fisher', 'test_grad'], {}), '(inv_fisher, test_grad)\n', (11509, 11532), True, 'import numpy as np\n'), ((13950, 13995), 'numpy.matmul', 'np.matmul', (['inv_sqrt_hessian', 'train_features.T'], {}), '(inv_sqrt_hessian, train_features.T)\n', (13959, 13995), True, 'import numpy as np\n'), ((14618, 14649), 'numpy.load', 'np.load', (['train_feature_filename'], {}), '(train_feature_filename)\n', (14625, 14649), True, 'import numpy as np\n'), ((14794, 14850), 'os.path.join', 'os.path.join', (['self.out_dir', '"""train-grad-on-loss-all.npy"""'], {}), "(self.out_dir, 'train-grad-on-loss-all.npy')\n", (14806, 14850), False, 'import os\n'), ((14884, 14916), 'numpy.load', 'np.load', (['_train_feature_filename'], {}), '(_train_feature_filename)\n', (14891, 14916), True, 'import numpy as np\n'), ((15013, 15060), 'numpy.save', 'np.save', (['train_feature_filename', 'train_features'], {}), '(train_feature_filename, train_features)\n', (15020, 15060), True, 'import numpy as np\n'), ((15573, 15604), 'numpy.load', 'np.load', (['train_feature_filename'], {}), '(train_feature_filename)\n', (15580, 15604), True, 'import numpy as np\n'), ((15982, 16011), 'numpy.vstack', 'np.vstack', (['train_feature_list'], {}), '(train_feature_list)\n', (15991, 16011), True, 'import numpy as np\n'), ((16028, 16075), 'numpy.save', 'np.save', (['train_feature_filename', 'train_features'], {}), '(train_feature_filename, train_features)\n', (16035, 16075), True, 'import numpy as np\n'), ((8860, 8889), 'os.path.exists', 'os.path.exists', (['hess_filename'], {}), '(hess_filename)\n', (8874, 8889), False, 'import os\n'), ((8953, 8975), 'numpy.load', 'np.load', (['hess_filename'], {}), '(hess_filename)\n', (8960, 8975), True, 'import numpy as np\n'), ((9208, 9260), 'os.path.join', 'os.path.join', (['self.out_dir', '"""inv-hessian-matrix.npy"""'], {}), "(self.out_dir, 'inv-hessian-matrix.npy')\n", (9220, 9260), False, 'import os\n'), ((9291, 9314), 'numpy.load', 'np.load', (['inv_hessian_fn'], {}), '(inv_hessian_fn)\n', (9298, 9314), True, 'import numpy as np\n'), ((9403, 9443), 'numpy.save', 'np.save', (['hess_filename', 'inv_sqrt_hessian'], {}), '(hess_filename, inv_sqrt_hessian)\n', (9410, 9443), True, 'import numpy as np\n'), ((9479, 9501), 'numpy.load', 'np.load', (['hess_filename'], {}), '(hess_filename)\n', (9486, 9501), True, 'import numpy as np\n'), ((11557, 11568), 'time.time', 'time.time', ([], {}), '()\n', (11566, 11568), False, 'import time\n'), ((11700, 11711), 'time.time', 'time.time', ([], {}), '()\n', (11709, 11711), False, 'import time\n'), ((11849, 11905), 'os.path.join', 'os.path.join', (['self.out_dir', '"""inv_sqrt-fisher-matrix.npy"""'], {}), "(self.out_dir, 'inv_sqrt-fisher-matrix.npy')\n", (11861, 11905), False, 'import os\n'), ((12841, 12878), 'numpy.matmul', 'np.matmul', (['inv_sqrt_fisher', 'test_grad'], {}), '(inv_sqrt_fisher, test_grad)\n', (12850, 12878), True, 'import numpy as np\n'), ((14950, 14994), 'numpy.matmul', 'np.matmul', (['inv_sqrt_fisher', 'train_features.T'], {}), '(inv_sqrt_fisher, train_features.T)\n', (14959, 14994), True, 'import numpy as np\n'), ((15801, 15817), 'tqdm.tqdm', 'tqdm', (['train_data'], {}), '(train_data)\n', (15805, 15817), False, 'from tqdm import tqdm\n'), ((8010, 8039), 'numpy.identity', 'np.identity', (['hessian.shape[0]'], {}), '(hessian.shape[0])\n', (8021, 8039), True, 'import numpy as np\n'), ((10208, 10239), 'os.path.exists', 'os.path.exists', (['fisher_filename'], {}), '(fisher_filename)\n', (10222, 10239), False, 'import os\n'), ((10297, 10321), 'numpy.load', 'np.load', (['fisher_filename'], {}), '(fisher_filename)\n', (10304, 10321), True, 'import numpy as np\n'), ((10561, 10617), 'os.path.join', 'os.path.join', (['self.out_dir', '"""train-grad-on-loss-all.npy"""'], {}), "(self.out_dir, 'train-grad-on-loss-all.npy')\n", (10573, 10617), False, 'import os\n'), ((10803, 10831), 'numpy.load', 'np.load', (['train_grad_filename'], {}), '(train_grad_filename)\n', (10810, 10831), True, 'import numpy as np\n'), ((10860, 10891), 'numpy.average', 'np.average', (['train_grads'], {'axis': '(0)'}), '(train_grads, axis=0)\n', (10870, 10891), True, 'import numpy as np\n'), ((11044, 11068), 'numpy.vstack', 'np.vstack', (['fisher_matrix'], {}), '(fisher_matrix)\n', (11053, 11068), True, 'import numpy as np\n'), ((11228, 11256), 'numpy.linalg.inv', 'np.linalg.inv', (['damped_fisher'], {}), '(damped_fisher)\n', (11241, 11256), True, 'import numpy as np\n'), ((11273, 11309), 'numpy.save', 'np.save', (['fisher_filename', 'inv_fisher'], {}), '(fisher_filename, inv_fisher)\n', (11280, 11309), True, 'import numpy as np\n'), ((12903, 12914), 'time.time', 'time.time', ([], {}), '()\n', (12912, 12914), False, 'import time\n'), ((13180, 13191), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13188, 13191), False, 'import sys\n'), ((10641, 10676), 'os.path.exists', 'os.path.exists', (['train_grad_filename'], {}), '(train_grad_filename)\n', (10655, 10676), False, 'import os\n'), ((10761, 10772), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10769, 10772), False, 'import sys\n'), ((12019, 12048), 'os.path.exists', 'os.path.exists', (['hess_filename'], {}), '(hess_filename)\n', (12033, 12048), False, 'import os\n'), ((12111, 12133), 'numpy.load', 'np.load', (['hess_filename'], {}), '(hess_filename)\n', (12118, 12133), True, 'import numpy as np\n'), ((12365, 12416), 'os.path.join', 'os.path.join', (['self.out_dir', '"""inv-fisher-matrix.npy"""'], {}), "(self.out_dir, 'inv-fisher-matrix.npy')\n", (12377, 12416), False, 'import os\n'), ((12446, 12468), 'numpy.load', 'np.load', (['inv_fisher_fn'], {}), '(inv_fisher_fn)\n', (12453, 12468), True, 'import numpy as np\n'), ((12555, 12594), 'numpy.save', 'np.save', (['hess_filename', 'inv_sqrt_fisher'], {}), '(hess_filename, inv_sqrt_fisher)\n', (12562, 12594), True, 'import numpy as np\n'), ((12629, 12651), 'numpy.load', 'np.load', (['hess_filename'], {}), '(hess_filename)\n', (12636, 12651), True, 'import numpy as np\n'), ((7957, 7972), 'numpy.abs', 'np.abs', (['hessian'], {}), '(hessian)\n', (7963, 7972), True, 'import numpy as np\n'), ((11170, 11198), 'numpy.identity', 'np.identity', (['fisher.shape[0]'], {}), '(fisher.shape[0])\n', (11181, 11198), True, 'import numpy as np\n'), ((11118, 11132), 'numpy.abs', 'np.abs', (['fisher'], {}), '(fisher)\n', (11124, 11132), True, 'import numpy as np\n')] |
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Gaussian operations vectorizing commonly used operation on covariance matrices and vectors of means"""
import numpy as np
def chop_in_blocks_multi(m, id_to_delete):
r"""
Splits an array of (symmetric) matrices each into 3 blocks (``A``, ``B``, ``C``).
Blocks ``A`` and ``C`` are diagonal blocks and ``B`` is the offdiagonal block.
Args:
m (ndarray): array of matrices
id_to_delete (ndarray): array for the indices that go into ``C``
Returns:
tuple: tuple of the ``A``, ``B`` and ``C`` matrices
"""
A = np.delete(m, id_to_delete, axis=1)
A = np.delete(A, id_to_delete, axis=2)
B = np.delete(m[:, :, id_to_delete], id_to_delete, axis=1)
C = m[:, id_to_delete, :][:, :, id_to_delete]
return (A, B, C)
def chop_in_blocks_vector_multi(v, id_to_delete):
r"""
For an array of vectors ``v``, splits ``v`` into two arrays of vectors,
``va`` and ``vb``. ``vb`` contains the components of ``v`` specified by
``id_to_delete``, and ``va`` contains the remaining components.
Args:
v (ndarray): array of vectors
id_to_delete (ndarray): array for the indices that go into vb
Returns:
tuple: tuple of ``(va,vb)`` vectors
"""
id_to_keep = np.sort(list(set(np.arange(len(v[0]))) - set(id_to_delete)))
va = v[:, id_to_keep]
vb = v[:, id_to_delete]
return (va, vb)
def reassemble_multi(A, id_to_delete):
r"""
For an array of matrices ``A``, creates a new array of matrices, each with
dimension ``dim(A)+len(id_to_delete)``. The subspace of each new matrix
specified by indices ``id_to_delete`` is set to the identity matrix, while
the rest of each new matrix is filled with the matrices from ``A``.
Args:
m (ndarray): array of matrices
id_to_delete (ndarray): array of indices in the new matrices that will
be set to the identity
Returns:
array: array of new matrices, each filled with ``A`` and identity
"""
num_weights = len(A[:, 0, 0])
new_mat_dim = len(A[0]) + len(id_to_delete)
ind = np.sort(list(set(np.arange(new_mat_dim)) - set(id_to_delete)))
new_mat = np.tile(np.eye(new_mat_dim, dtype=complex), (num_weights, 1, 1))
new_mat[np.ix_(np.arange(new_mat.shape[0], dtype=int), ind, ind)] = A
return new_mat
def reassemble_vector_multi(va, id_to_delete):
r"""
For an array of vectors ``va``, creates a new array of vectors, each with
dimension ``dim(va)+len(id_to_delete)``. The subspace of each new vector
specified by indices ``id_to_delete`` is set to 0, while the rest of each
new vector is filled with the vectors from ``va``.
Args:
va (ndarray): array of vectors
id_to_delete (ndarray): array of indices in the new vectors that will
be set to 0
Returns:
array: array of new vectors, each filled with ``va`` and 0
"""
num_weights = len(va[:, 0])
new_vec_dim = len(va[0]) + len(id_to_delete)
ind = np.sort(list(set(np.arange(new_vec_dim)) - set(id_to_delete)))
new_vec = np.zeros((num_weights, new_vec_dim), dtype=complex)
new_vec[:, ind] = va
return new_vec
| [
"numpy.arange",
"numpy.eye",
"numpy.zeros",
"numpy.delete"
] | [((1157, 1191), 'numpy.delete', 'np.delete', (['m', 'id_to_delete'], {'axis': '(1)'}), '(m, id_to_delete, axis=1)\n', (1166, 1191), True, 'import numpy as np\n'), ((1200, 1234), 'numpy.delete', 'np.delete', (['A', 'id_to_delete'], {'axis': '(2)'}), '(A, id_to_delete, axis=2)\n', (1209, 1234), True, 'import numpy as np\n'), ((1243, 1297), 'numpy.delete', 'np.delete', (['m[:, :, id_to_delete]', 'id_to_delete'], {'axis': '(1)'}), '(m[:, :, id_to_delete], id_to_delete, axis=1)\n', (1252, 1297), True, 'import numpy as np\n'), ((3685, 3736), 'numpy.zeros', 'np.zeros', (['(num_weights, new_vec_dim)'], {'dtype': 'complex'}), '((num_weights, new_vec_dim), dtype=complex)\n', (3693, 3736), True, 'import numpy as np\n'), ((2780, 2814), 'numpy.eye', 'np.eye', (['new_mat_dim'], {'dtype': 'complex'}), '(new_mat_dim, dtype=complex)\n', (2786, 2814), True, 'import numpy as np\n'), ((2856, 2894), 'numpy.arange', 'np.arange', (['new_mat.shape[0]'], {'dtype': 'int'}), '(new_mat.shape[0], dtype=int)\n', (2865, 2894), True, 'import numpy as np\n'), ((2712, 2734), 'numpy.arange', 'np.arange', (['new_mat_dim'], {}), '(new_mat_dim)\n', (2721, 2734), True, 'import numpy as np\n'), ((3625, 3647), 'numpy.arange', 'np.arange', (['new_vec_dim'], {}), '(new_vec_dim)\n', (3634, 3647), True, 'import numpy as np\n')] |
import numpy
from srxraylib.metrology.dabam import dabam
from srxraylib.plot.gol import plot
dm = dabam()
def load_dabam_profile(entry_number,
mirror_length = 2 * 0.07,
mirror_points = 100,
mirror_rms = 25e-9,
do_plot = True
):
# print(dm.inputs)
dm.inputs['entryNumber'] = entry_number
dm.load() # access data
# dm.inputs['plot'] = "heights"
# dm.plot()
x00 = dm.y.copy()
y00 = dm.zHeights.copy()
#center
x0 = x00 - x00[x00.size//2]
y0 = y00.copy()
#rescale abscissas
x0 = x0 / numpy.abs(x0).max() * 0.5 * mirror_length
# rescale heights
print("RMS mirror: %5.3f nm "%(1e9*y0.std()))
y0 = y0 * mirror_rms / y0.std()
# interpolate
x = numpy.linspace(-0.5 * mirror_length, 0.5 * mirror_length, mirror_points)
y = numpy.interp(x, x0, y0)
if do_plot:
plot(x00, y00,
x0, y0,
x, y,
legend=["Original","Transformed","Interpolated"])
return x,y
if __name__ == "__main__":
for entry_number in range(1,83):
print(">>>> entry: ",entry_number)
x, y = load_dabam_profile(entry_number, do_plot=False)
filename = "deformation%02d.dat" % entry_number
f = open(filename, "w")
for i in range(x.size):
f.write("%g %g\n" % (x[i], y[i]))
f.close()
print("File written to disk: %s" % filename)
| [
"numpy.abs",
"srxraylib.metrology.dabam.dabam",
"srxraylib.plot.gol.plot",
"numpy.linspace",
"numpy.interp"
] | [((100, 107), 'srxraylib.metrology.dabam.dabam', 'dabam', ([], {}), '()\n', (105, 107), False, 'from srxraylib.metrology.dabam import dabam\n'), ((827, 899), 'numpy.linspace', 'numpy.linspace', (['(-0.5 * mirror_length)', '(0.5 * mirror_length)', 'mirror_points'], {}), '(-0.5 * mirror_length, 0.5 * mirror_length, mirror_points)\n', (841, 899), False, 'import numpy\n'), ((908, 931), 'numpy.interp', 'numpy.interp', (['x', 'x0', 'y0'], {}), '(x, x0, y0)\n', (920, 931), False, 'import numpy\n'), ((956, 1041), 'srxraylib.plot.gol.plot', 'plot', (['x00', 'y00', 'x0', 'y0', 'x', 'y'], {'legend': "['Original', 'Transformed', 'Interpolated']"}), "(x00, y00, x0, y0, x, y, legend=['Original', 'Transformed', 'Interpolated']\n )\n", (960, 1041), False, 'from srxraylib.plot.gol import plot\n'), ((651, 664), 'numpy.abs', 'numpy.abs', (['x0'], {}), '(x0)\n', (660, 664), False, 'import numpy\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import math as mt
import numpy as np
from pandas.tools.plotting import scatter_matrix
dict = {'low' : 1,
'medium' : 2,
'high': 3}
def open_file(fileName):
data = pd.read_json(fileName)
return data
def show_data_info(data):
print("Number of instance:" + str(data.shape[0]))
print("Number of features:" + str(data.shape[1]))
print("------------------------------------------")
print("Initial instance:\n")
print(data.head(10))
print("Numerical info:\n")
numerical_info = data.iloc[:, :data.shape[1]]
print(numerical_info.describe())
def count_array_elements(data, column):
temp = []
for x in range(len(data)):
temp.append(len(data.iloc[x][column]))
data[column] = temp
return data
def fill_missing_values_with_mean(data, column):
temp = data[column].fillna(data[column].mean())
data[column] = temp
return data
def fill_missing_values_with_mode(data, column):
temp = data[column].fillna(data[column].mode()[0])
data[column] = temp
return data
def number_photos_influence_interest_level(data):
numbPhotos = data['photos'].value_counts()
numbPhotosKeys = numbPhotos.keys()
interestArray = []
for number in numbPhotosKeys:
subset = data.loc[data['photos'] == number]
print('Numero de fotos:' + str(number))
print(subset['interest_level'])
interestArray.append(subset["interest_level"].mean())
print(numbPhotosKeys)
print(interestArray)
width = .2
plt.bar(numbPhotosKeys, interestArray, width, color="blue")
plt.ylabel('nivel de interes')
plt.xlabel('#fotos')
plt.title('Numero de fotos influye nivel de interes')
plt.xticks(np.arange(0, max(numbPhotosKeys), 3))
plt.yticks(np.arange(0, 3, 1))
plt.show()
def len_description_influence_interest_level(data):
numbPhotos = data['description'].value_counts()
numbPhotosKeys = numbPhotos.keys()
interestArray = []
for number in numbPhotosKeys:
subset = data.loc[data['description'] == number]
print('Longitud de la descripcion:' + str(number))
print(subset['interest_level'])
interestArray.append(subset["interest_level"].mean())
print(numbPhotosKeys)
print(interestArray)
width = .2
plt.bar(numbPhotosKeys, interestArray, width, color="blue")
plt.ylabel('nivel de interes')
plt.xlabel('Longitud de la descripcion')
plt.title('Longitud de la descripcion influye nivel de interes')
plt.xticks(np.arange(0, max(numbPhotosKeys), 40))
plt.yticks(np.arange(0, 3, 1))
plt.show()
def number_features_influence_price(data):
numbFeatures = data['features'].value_counts()
numbFeaturesKeys = numbFeatures.keys()
priceArray = []
for number in numbFeaturesKeys:
subset = data.loc[data['features'] == number]
print('Numero de caracteristicas:' + str(number))
print(subset['price'])
priceArray.append(subset["price"].mean())
print(numbFeaturesKeys)
print(priceArray)
width = .2
plt.bar(numbFeaturesKeys, priceArray, width, color="blue")
plt.ylabel('Precio')
plt.xlabel('#caracteristicas')
plt.title('Numero de caracteristicas influye precio')
plt.xticks(np.arange(0, max(numbFeaturesKeys), 2))
plt.yticks(np.arange(0, 15000, 1000))
plt.show()
def count_words(data, column):
temp = []
for x in range(len(data)):
if(data.iloc[x][column] == ' '):
temp.append(0)
else:
temp.append(len(data.iloc[x][column].split(' ')))
data[column] = temp
return data
def save(data):
data.to_csv('clean_dataset.csv', index = False)
if __name__ == '__main__':
data = open_file('train.json')
data['interest_level'] = data['interest_level'].replace(dict)
data = count_array_elements(data, 'photos')
data = count_array_elements(data, 'features')
data = count_words(data, 'description')
len_description_influence_interest_level(data)
#number_photos_influence_interest_level(data)
#number_features_influence_price(data)
#show_data_info(data)
#save(data[:500]);
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"pandas.read_json",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((236, 258), 'pandas.read_json', 'pd.read_json', (['fileName'], {}), '(fileName)\n', (248, 258), True, 'import pandas as pd\n'), ((1574, 1633), 'matplotlib.pyplot.bar', 'plt.bar', (['numbPhotosKeys', 'interestArray', 'width'], {'color': '"""blue"""'}), "(numbPhotosKeys, interestArray, width, color='blue')\n", (1581, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""nivel de interes"""'], {}), "('nivel de interes')\n", (1649, 1669), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1694), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""#fotos"""'], {}), "('#fotos')\n", (1684, 1694), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1752), 'matplotlib.pyplot.title', 'plt.title', (['"""Numero de fotos influye nivel de interes"""'], {}), "('Numero de fotos influye nivel de interes')\n", (1708, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1854, 1856), True, 'import matplotlib.pyplot as plt\n'), ((2350, 2409), 'matplotlib.pyplot.bar', 'plt.bar', (['numbPhotosKeys', 'interestArray', 'width'], {'color': '"""blue"""'}), "(numbPhotosKeys, interestArray, width, color='blue')\n", (2357, 2409), True, 'import matplotlib.pyplot as plt\n'), ((2415, 2445), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""nivel de interes"""'], {}), "('nivel de interes')\n", (2425, 2445), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2490), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitud de la descripcion"""'], {}), "('Longitud de la descripcion')\n", (2460, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2559), 'matplotlib.pyplot.title', 'plt.title', (['"""Longitud de la descripcion influye nivel de interes"""'], {}), "('Longitud de la descripcion influye nivel de interes')\n", (2504, 2559), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2662, 2664), True, 'import matplotlib.pyplot as plt\n'), ((3125, 3183), 'matplotlib.pyplot.bar', 'plt.bar', (['numbFeaturesKeys', 'priceArray', 'width'], {'color': '"""blue"""'}), "(numbFeaturesKeys, priceArray, width, color='blue')\n", (3132, 3183), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3209), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precio"""'], {}), "('Precio')\n", (3199, 3209), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3244), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""#caracteristicas"""'], {}), "('#caracteristicas')\n", (3224, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3249, 3302), 'matplotlib.pyplot.title', 'plt.title', (['"""Numero de caracteristicas influye precio"""'], {}), "('Numero de caracteristicas influye precio')\n", (3258, 3302), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3413, 3415), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1839), 'numpy.arange', 'np.arange', (['(0)', '(3)', '(1)'], {}), '(0, 3, 1)\n', (1830, 1839), True, 'import numpy as np\n'), ((2629, 2647), 'numpy.arange', 'np.arange', (['(0)', '(3)', '(1)'], {}), '(0, 3, 1)\n', (2638, 2647), True, 'import numpy as np\n'), ((3373, 3398), 'numpy.arange', 'np.arange', (['(0)', '(15000)', '(1000)'], {}), '(0, 15000, 1000)\n', (3382, 3398), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from pydantic import validator
from tqdm import tqdm
from src.core import RESHAPE
from src.core.base_model import BaseModel
from src.core.bio_process.calcification import Calcification
from src.core.bio_process.dislodgment import Dislodgement
from src.core.bio_process.flow import Flow
from src.core.bio_process.light import Light
from src.core.bio_process.morphology import Morphology
from src.core.bio_process.photosynthesis import Photosynthesis
from src.core.bio_process.population_states import PopulationStates
from src.core.bio_process.recruitment import Recruitment
from src.core.bio_process.temperature import Temperature
from src.core.common.constants import Constants
from src.core.common.environment import Environment
from src.core.common.space_time import time_series_year
from src.core.coral.coral_model import Coral
from src.core.hydrodynamics.factory import HydrodynamicsFactory
from src.core.hydrodynamics.hydrodynamic_protocol import HydrodynamicProtocol
from src.core.output.output_wrapper import OutputWrapper
class BaseSimulation(BaseModel, ABC):
"""
Implements the `SimulationProtocol`.
Facade class that can be implemented through an Adapter pattern.
CoralModel simulation.
"""
mode: str
# Directories related to working dir
working_dir: Optional[Path] = Path.cwd()
figures_dir: Path = working_dir / "figures"
output_dir: Path = working_dir / "output"
input_dir: Path = working_dir / "input"
# Other fields.
hydrodynamics: Optional[HydrodynamicProtocol]
environment: Environment = Environment()
constants: Constants = Constants()
output: Optional[OutputWrapper]
coral: Optional[Coral]
@validator("constants", pre=True)
@classmethod
def validate_constants(cls, field_value: Union[str, Path, Constants]) -> Constants:
"""
Validates the user-input constants value and transforms in case it's a filepath (str, Path).
Args:
field_value (Union[str, Path, Constants]): Value given by the user representing Constants.
Raises:
NotImplementedError: When the input value does not have any converter.
Returns:
Constants: Validated constants value.
"""
if isinstance(field_value, Constants):
return field_value
if isinstance(field_value, str):
field_value = Path(field_value)
if isinstance(field_value, Path):
return Constants.from_input_file(field_value)
raise NotImplementedError(f"Validator not available for {type(field_value)}")
@validator("coral", pre=True)
@classmethod
def validate_coral(cls, field_value: Union[dict, Coral], values: dict) -> Coral:
"""
Initializes coral in case a dictionary is provided. Ensuring the constants are also
given to the object.
Args:
field_value (Union[dict, Coral]): Value given by the user for the Coral field.
values (dict): Dictionary of remaining user-given field values.
Returns:
Coral: Validated instance of 'Coral'.
"""
if isinstance(field_value, Coral):
return field_value
if isinstance(field_value, dict):
# Check if constants present in the dictionary:
if "constants" in field_value.keys():
# It will be generated automatically.
# in case parameters are missing an error will also be displayed.
return Coral(**field_value)
if "constants" in values.keys():
field_value["constants"] = values["constants"]
return Coral(**field_value)
raise ValueError(
"Constants should be provided to initialize a Coral Model."
)
raise NotImplementedError(f"Validator not available for {type(field_value)}")
@validator("hydrodynamics", pre=True, always=True)
@classmethod
def validate_hydrodynamics_present(
cls, field_values: Union[dict, HydrodynamicProtocol], values: dict
) -> HydrodynamicProtocol:
"""
Validator to transform the given dictionary into the corresponding hydrodynamic model.
Args:
field_values (Union[dict, HydrodynamicProtocol]): Value assigned to `hydrodynamics`.
values (dict): Dictionary of values given by the user.
Raises:
ValueError: When no hydrodynamics model can be built with the given values.
Returns:
dict: Validated dictionary of values given by the user.
"""
if field_values is None:
field_values = dict()
if isinstance(field_values, dict):
return HydrodynamicsFactory.create(
field_values.get("mode", values["mode"]), **field_values
)
return field_values
@abstractmethod
def configure_hydrodynamics(self):
"""
Configures the parameters for the `HydrodynamicsProtocol`.
Raises:
NotImplementedError: When abstract method not defined in concrete class.
"""
raise NotImplementedError
@abstractmethod
def configure_output(self):
"""
Configures the parameters for the `OutputWrapper`.
"""
raise NotImplementedError
def validate_simulation_directories(self):
"""
Generates the required directories if they do not exist already.
"""
loop_dirs: List[Path] = [
"working_dir",
"output_dir",
"input_dir",
"figures_dir",
]
for loop_dir in loop_dirs:
value_dir: Path = getattr(self, loop_dir)
if not value_dir.is_dir():
value_dir.mkdir(parents=True)
def validate_environment(self):
"""Check input; if all required data is provided."""
if self.environment.light is None:
msg = "CoralModel simulation cannot run without data on light conditions."
raise ValueError(msg)
if self.environment.temperature is None:
msg = "CoralModel simulation cannot run without data on temperature conditions."
raise ValueError(msg)
if self.environment.light_attenuation is None:
self.environment.set_parameter_values(
"light_attenuation", self.constants.Kd0
)
print(
f"Light attenuation coefficient set to default: Kd = {self.constants.Kd0} [m-1]"
)
if self.environment.aragonite is None:
self.environment.set_parameter_values("aragonite", self.constants.omegaA0)
print(
f"Aragonite saturation state set to default: omega_a0 = {self.constants.omegaA0} [-]"
)
# TODO: add other dependencies based on process switches in self.constants if required
def initiate(
self,
x_range: Optional[tuple] = None,
y_range: Optional[tuple] = None,
value: Optional[float] = None,
) -> Coral:
"""Initiate the coral distribution. The default coral distribution is a full coral cover over the whole domain.
More complex initial conditions of the coral cover cannot be realised with this method. See the documentation on
workarounds to achieve this anyway.
:param x_range: minimum and maximum x-coordinate, defaults to None
:param y_range: minimum and maximum y-coordinate, defaults to None
:param value: coral cover, defaults to None
:type coral: Coral
:type x_range: tuple, optional
:type y_range: tuple, optional
:type value: float, optional
:return: coral animal initiated
:rtype: Coral
"""
self.configure_hydrodynamics()
self.configure_output()
# Load constants and validate environment.
self.validate_simulation_directories()
self.validate_environment()
RESHAPE().space = self.hydrodynamics.space
if self.output.defined:
self.output.initialize(self.coral)
else:
print("WARNING: No output defined, so none exported.")
xy = self.hydrodynamics.xy_coordinates
if value is None:
value = 1
cover = value * np.ones(RESHAPE().space)
if x_range is not None:
x_min = x_range[0] if x_range[0] is not None else min(xy[:][0])
x_max = x_range[1] if x_range[1] is not None else max(xy[:][0])
cover[np.logical_or(xy[:][0] <= x_min, xy[:][0] >= x_max)] = 0
if y_range is not None:
y_min = y_range[0] if y_range[0] is not None else min(xy[:][1])
y_max = y_range[1] if y_range[1] is not None else max(xy[:][1])
cover[np.logical_or(xy[:][1] <= y_min, xy[:][1] >= y_max)] = 0
self.coral.initiate_coral_morphology(cover)
self.output.initialize(self.coral)
def run(self, duration: Optional[int] = None):
"""Run simulation.
:param coral: coral animal
:param duration: simulation duration [yrs], defaults to None
:type coral: Coral
:type duration: int, optional
"""
# auto-set duration based on environmental time-series
environment_dates: pd.core.series.Series = self.environment.get_dates()
if duration is None:
duration = int(
environment_dates.iloc[-1].year - environment_dates.iloc[0].year
)
years = range(
int(environment_dates.iloc[0].year),
int(environment_dates.iloc[0].year + duration),
)
with tqdm(range((int(duration)))) as progress:
for i in progress:
# set dimensions (i.e. update time-dimension)
RESHAPE().time = len(
environment_dates.dt.year[environment_dates.dt.year == years[i]]
)
# if-statement that encompasses all for which the hydrodynamic should be used
progress.set_postfix(inner_loop=f"update {self.hydrodynamics}")
current_vel, wave_vel, wave_per = self.hydrodynamics.update(
self.coral, stormcat=0
)
# # environment
progress.set_postfix(inner_loop="coral environment")
# light micro-environment
lme = Light(
light_in=time_series_year(self.environment.light, years[i]),
lac=time_series_year(self.environment.light_attenuation, years[i]),
depth=self.hydrodynamics.water_depth,
)
lme.rep_light(self.coral)
# flow micro-environment
fme = Flow(
u_current=current_vel,
u_wave=wave_vel,
h=self.hydrodynamics.water_depth,
peak_period=wave_per,
constants=self.constants,
)
fme.velocities(self.coral, in_canopy=self.constants.fme)
fme.thermal_boundary_layer(self.coral)
# thermal micro-environment
tme = Temperature(
constants=self.constants,
temperature=time_series_year(
self.environment.temp_kelvin, years[i]
),
)
tme.coral_temperature(self.coral)
# # physiology
progress.set_postfix(inner_loop="coral physiology")
# photosynthetic dependencies
phd = Photosynthesis(
constants=self.constants,
light_in=time_series_year(self.environment.light, years[i]),
first_year=True if i == 0 else False,
)
phd.photo_rate(self.coral, self.environment, years[i])
# population states
ps = PopulationStates(constants=self.constants)
ps.pop_states_t(self.coral)
# calcification
cr = Calcification(constants=self.constants)
cr.calcification_rate(
self.coral, time_series_year(self.environment.aragonite, years[i])
)
# # morphology
progress.set_postfix(inner_loop="coral morphology")
# morphological development
mor = Morphology(
constants=self.constants,
calc_sum=self.coral.calc.sum(axis=1),
light_in=time_series_year(self.environment.light, years[i]),
)
mor.update(self.coral)
# # storm damage
if self.environment.storm_category is not None:
tt = self.environment.storm_category
yr = years[i]
stormcat = int(tt["stormcat"].values[tt.index == yr])
if stormcat > 0:
progress.set_postfix(inner_loop="storm damage")
# update hydrodynamic model
current_vel, wave_vel, wave_per = self.hydrodynamics.update(
self.coral, stormcat
)
# storm flow environment
sfe = Flow(
constants=self.constants,
u_current=current_vel,
u_wave=wave_vel,
h=self.hydrodynamics.water_depth,
peak_period=wave_per,
)
sfe.velocities(self.coral, in_canopy=self.constants.fme)
# storm dislodgement criterion
sdc = Dislodgement(constants=self.constants)
sdc.update(self.coral)
# # recruitment
progress.set_postfix(inner_loop="coral recruitment")
# recruitment
rec = Recruitment(constants=self.constants)
rec.update(self.coral)
# # export results
progress.set_postfix(inner_loop="export results")
# map-file
self.output.map_output.update(self.coral, years[i])
# his-file
self.output.his_output.update(
self.coral,
environment_dates[environment_dates.dt.year == years[i]],
)
def finalise(self):
"""Finalise simulation."""
self.hydrodynamics.finalise()
class Simulation(BaseSimulation):
"""
Vanilla definition of the `BaseSimulation` that allows any user
to create their flat simulation without pre-defined values.
In other words, everything should be built manually.
"""
def configure_hydrodynamics(self):
"""
This flat Simulation type does not configure anything automatically.
"""
pass
def configure_output(self):
"""
This flat Simulation type does not configure anything automatically.
"""
pass
# TODO: Define folder structure
# > working directory
# > figures directory
# > input directory
# > output directory
# > etc.
# TODO: Model initiation IV: OutputFiles
# > specify output files (i.e. define file names and directories)
# > specify model data to be included in output files
# TODO: Model initiation V: initial conditions
# > specify initial morphology
# > specify initial coral cover
# > specify carrying capacity
# TODO: Model simulation I: specify SpaceTime
# TODO: Model simulation II: hydrodynamic module
# > update hydrodynamics
# > extract variables
# TODO: Model simulation III: coral environment
# > light micro-environment
# > flow micro-environment
# > temperature micro-environment
# TODO: Model simulation IV: coral physiology
# > photosynthesis
# > population states
# > calcification
# TODO: Model simulation V: coral morphology
# > morphological development
# TODO: Model simulation VI: storm damage
# > set variables to hydrodynamic module
# > update hydrodynamics and extract variables
# > update coral storm survival
# TODO: Model simulation VII: coral recruitment
# > update recruitment's contribution
# TODO: Model simulation VIII: return morphology
# > set variables to hydrodynamic module
# TODO: Model simulation IX: export output
# > write map-file
# > write his-file
# TODO: Model finalisation
| [
"src.core.common.environment.Environment",
"src.core.RESHAPE",
"src.core.bio_process.flow.Flow",
"src.core.common.space_time.time_series_year",
"src.core.common.constants.Constants",
"src.core.bio_process.calcification.Calcification",
"src.core.bio_process.population_states.PopulationStates",
"src.cor... | [((1458, 1468), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1466, 1468), False, 'from pathlib import Path\n'), ((1709, 1722), 'src.core.common.environment.Environment', 'Environment', ([], {}), '()\n', (1720, 1722), False, 'from src.core.common.environment import Environment\n'), ((1750, 1761), 'src.core.common.constants.Constants', 'Constants', ([], {}), '()\n', (1759, 1761), False, 'from src.core.common.constants import Constants\n'), ((1831, 1863), 'pydantic.validator', 'validator', (['"""constants"""'], {'pre': '(True)'}), "('constants', pre=True)\n", (1840, 1863), False, 'from pydantic import validator\n'), ((2735, 2763), 'pydantic.validator', 'validator', (['"""coral"""'], {'pre': '(True)'}), "('coral', pre=True)\n", (2744, 2763), False, 'from pydantic import validator\n'), ((4031, 4080), 'pydantic.validator', 'validator', (['"""hydrodynamics"""'], {'pre': '(True)', 'always': '(True)'}), "('hydrodynamics', pre=True, always=True)\n", (4040, 4080), False, 'from pydantic import validator\n'), ((2525, 2542), 'pathlib.Path', 'Path', (['field_value'], {}), '(field_value)\n', (2529, 2542), False, 'from pathlib import Path\n'), ((2604, 2642), 'src.core.common.constants.Constants.from_input_file', 'Constants.from_input_file', (['field_value'], {}), '(field_value)\n', (2629, 2642), False, 'from src.core.common.constants import Constants\n'), ((8133, 8142), 'src.core.RESHAPE', 'RESHAPE', ([], {}), '()\n', (8140, 8142), False, 'from src.core import RESHAPE\n'), ((3646, 3666), 'src.core.coral.coral_model.Coral', 'Coral', ([], {}), '(**field_value)\n', (3651, 3666), False, 'from src.core.coral.coral_model import Coral\n'), ((3798, 3818), 'src.core.coral.coral_model.Coral', 'Coral', ([], {}), '(**field_value)\n', (3803, 3818), False, 'from src.core.coral.coral_model import Coral\n'), ((8687, 8738), 'numpy.logical_or', 'np.logical_or', (['(xy[:][0] <= x_min)', '(xy[:][0] >= x_max)'], {}), '(xy[:][0] <= x_min, xy[:][0] >= x_max)\n', (8700, 8738), True, 'import numpy as np\n'), ((8947, 8998), 'numpy.logical_or', 'np.logical_or', (['(xy[:][1] <= y_min)', '(xy[:][1] >= y_max)'], {}), '(xy[:][1] <= y_min, xy[:][1] >= y_max)\n', (8960, 8998), True, 'import numpy as np\n'), ((10926, 11057), 'src.core.bio_process.flow.Flow', 'Flow', ([], {'u_current': 'current_vel', 'u_wave': 'wave_vel', 'h': 'self.hydrodynamics.water_depth', 'peak_period': 'wave_per', 'constants': 'self.constants'}), '(u_current=current_vel, u_wave=wave_vel, h=self.hydrodynamics.\n water_depth, peak_period=wave_per, constants=self.constants)\n', (10930, 11057), False, 'from src.core.bio_process.flow import Flow\n'), ((12144, 12186), 'src.core.bio_process.population_states.PopulationStates', 'PopulationStates', ([], {'constants': 'self.constants'}), '(constants=self.constants)\n', (12160, 12186), False, 'from src.core.bio_process.population_states import PopulationStates\n'), ((12284, 12323), 'src.core.bio_process.calcification.Calcification', 'Calcification', ([], {'constants': 'self.constants'}), '(constants=self.constants)\n', (12297, 12323), False, 'from src.core.bio_process.calcification import Calcification\n'), ((14250, 14287), 'src.core.bio_process.recruitment.Recruitment', 'Recruitment', ([], {'constants': 'self.constants'}), '(constants=self.constants)\n', (14261, 14287), False, 'from src.core.bio_process.recruitment import Recruitment\n'), ((8467, 8476), 'src.core.RESHAPE', 'RESHAPE', ([], {}), '()\n', (8474, 8476), False, 'from src.core import RESHAPE\n'), ((9965, 9974), 'src.core.RESHAPE', 'RESHAPE', ([], {}), '()\n', (9972, 9974), False, 'from src.core import RESHAPE\n'), ((12395, 12449), 'src.core.common.space_time.time_series_year', 'time_series_year', (['self.environment.aragonite', 'years[i]'], {}), '(self.environment.aragonite, years[i])\n', (12411, 12449), False, 'from src.core.common.space_time import time_series_year\n'), ((10605, 10655), 'src.core.common.space_time.time_series_year', 'time_series_year', (['self.environment.light', 'years[i]'], {}), '(self.environment.light, years[i])\n', (10621, 10655), False, 'from src.core.common.space_time import time_series_year\n'), ((10681, 10743), 'src.core.common.space_time.time_series_year', 'time_series_year', (['self.environment.light_attenuation', 'years[i]'], {}), '(self.environment.light_attenuation, years[i])\n', (10697, 10743), False, 'from src.core.common.space_time import time_series_year\n'), ((11457, 11513), 'src.core.common.space_time.time_series_year', 'time_series_year', (['self.environment.temp_kelvin', 'years[i]'], {}), '(self.environment.temp_kelvin, years[i])\n', (11473, 11513), False, 'from src.core.common.space_time import time_series_year\n'), ((11888, 11938), 'src.core.common.space_time.time_series_year', 'time_series_year', (['self.environment.light', 'years[i]'], {}), '(self.environment.light, years[i])\n', (11904, 11938), False, 'from src.core.common.space_time import time_series_year\n'), ((12778, 12828), 'src.core.common.space_time.time_series_year', 'time_series_year', (['self.environment.light', 'years[i]'], {}), '(self.environment.light, years[i])\n', (12794, 12828), False, 'from src.core.common.space_time import time_series_year\n'), ((13550, 13681), 'src.core.bio_process.flow.Flow', 'Flow', ([], {'constants': 'self.constants', 'u_current': 'current_vel', 'u_wave': 'wave_vel', 'h': 'self.hydrodynamics.water_depth', 'peak_period': 'wave_per'}), '(constants=self.constants, u_current=current_vel, u_wave=wave_vel, h=\n self.hydrodynamics.water_depth, peak_period=wave_per)\n', (13554, 13681), False, 'from src.core.bio_process.flow import Flow\n'), ((14010, 14048), 'src.core.bio_process.dislodgment.Dislodgement', 'Dislodgement', ([], {'constants': 'self.constants'}), '(constants=self.constants)\n', (14022, 14048), False, 'from src.core.bio_process.dislodgment import Dislodgement\n')] |
import time
import pickle
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pystorm.PyDriver import bddriver as bd
from pystorm.hal import HAL
from pystorm.hal.neuromorph import graph # to describe HAL/neuromorph network
HAL = HAL()
def create_decode_encode_network(width, height, d_val, stim_mode='both'):
N = width * height
net = graph.Network("net")
decoders = np.ones((1, N)) * d_val
tap_list = []
if stim_mode == 'both':
_w1 = 1
_w2 = -1
elif stim_mode == 'exc':
_w1 = 1
_w2 = 1
elif stim_mode == 'inh':
_w1 = -1
_w2 = -1
for y in range(0, height, 2):
for x in range(0, width, 2):
idx = y * width + x
tap_list.append((idx, _w1)) # this should be +1 for EXC
for y in range(0, height, 2):
for x in range(0, width, 2):
idx = y * width + x
tap_list.append((idx, _w2)) # this should be -1 for INH
i1 = net.create_input("i1", 1)
p1 = net.create_pool("p1", (N, [tap_list]))
b1 = net.create_bucket("b1", 1)
o1 = net.create_output("o1", 1)
net.create_connection("i1_to_p1", i1, p1, None)
net.create_connection("p1_to_b1", p1, b1, decoders)
net.create_connection("b1_to_o1", b1, o1, None)
return net
# TAT has only 1024 entries. Since we are hitting the same synapse twice, we can only use 1024 somas or 512 synapses.
# Hence use maximum 32x32 somas
WIDTH = 16
HEIGHT = 16
DECIMATION = 100
def setup_exp(stim_type='both'):
net = create_decode_encode_network(WIDTH, HEIGHT, 1./DECIMATION, stim_type)
HAL.map(net)
bddriver = HAL.driver
CORE = 0
for addr in range(256):
bddriver.OpenDiffusorAllCuts(CORE, addr)
# Disable all soma that are not under a synapse
xaddr, yaddr = [_x.flatten() for _x in np.meshgrid(np.arange(0, 64), np.arange(0, 64), indexing='xy')]
for _x, _y in zip(xaddr, yaddr):
soma_addr = bddriver.GetSomaAERAddr(_x, _y)
syn_row = int(np.floor(_y / 2))
# Odd row somas are not under a synapse
if (_y % 2) == 1:
bddriver.DisableSoma(CORE, soma_addr)
# Odd row synapses have odd column somas under
if (syn_row % 2) == 0:
if (_x % 2) == 1:
bddriver.DisableSoma(CORE, soma_addr)
# Even row synapses have even column somas under
else:
if (_x % 2) == 0:
bddriver.DisableSoma(CORE, soma_addr)
bddriver.SetSomaGain(CORE, soma_addr, bd.bdpars.SomaGainId.ONE)
bddriver.SetSomaOffsetSign(CORE, soma_addr, bd.bdpars.SomaOffsetSignId.POSITIVE)
bddriver.SetSomaOffsetMultiplier(CORE, soma_addr, bd.bdpars.SomaOffsetMultiplierId.ONE)
print("[INFO] Explicitly setting DAC values")
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_EXC , 512)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_DC , 638)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_INH , 512)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_LK , 10)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PD , 50)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SYN_PU , 1024)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_DIFF_G , 512)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_DIFF_R , 1)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_OFFSET, 10)
bddriver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_REF , 512)
HAL.flush()
HAL.enable_output_recording()
HAL.start_traffic()
return net
def measure_rate(net, input_rate):
# Set spike generator rate
HAL.set_input_rate(net.get_inputs()[0], 0, input_rate, 0)
# Discard first 2 seconds
time.sleep(2)
HAL.get_outputs()
# if rate of accumulator tripping / decode rate < 20M, else TX probably saturated
ovf = HAL.get_overflow_counts()
if ovf > 0:
print("WARNING: Overflow detected")
# Keep next two seconds
time.sleep(5)
res = HAL.get_outputs() # [[t_ns, id_op(o1), dim(0), count], ...]
ovf = HAL.get_overflow_counts()
if ovf > 0:
print("WARNING: Overflow detected")
t_int = (res[-1, 0] - res[0, 0]) * 1e-9 # in seconds
base_freq = np.sum(res[:, 3]) / t_int # in Hz
return base_freq
def run_exp(FREQ_LIST, stim_type='both'):
net = setup_exp(stim_type)
results = np.zeros_like(FREQ_LIST, dtype=np.float)
res_error = np.zeros_like(FREQ_LIST, dtype=np.float)
for _idx, _freq in enumerate(FREQ_LIST):
print("[INFO] freq = %d" % _freq)
_res = measure_rate(net, _freq)
results[_idx] = _res
_err = _res / results[0] - 1
res_error[_idx] = 100 * _err
return (results, res_error)
#FREQ_LIST = np.array([int(_x) for _x in 10**np.linspace(0, 4, 10)], dtype=np.int)
FREQ_LIST = np.linspace(1, 1000, 11, dtype=np.int)
res = dict()
err = dict()
for _stype in ['both', 'exc', 'inh']:
print("[INFO] Runing %s" % _stype)
res[_stype], err[_stype] = run_exp(FREQ_LIST, _stype)
print("[INFO] max common-mode error = %g" % np.amax(np.abs(res['both'])))
OF = open("syn_balance_test.pickle", "wb")
pickle.dump((FREQ_LIST, res, err), OF)
OF.close()
plt.subplot(211)
plt.plot(FREQ_LIST, res['exc'] - res['both'][0], 'd-', label='exc')
plt.plot(-FREQ_LIST, res['inh'] - res['both'][0], 'o-', label='inh')
plt.ylabel("Output Frequency (Hz)")
plt.xlabel("Input Frequency (Hz)")
plt.grid(True)
plt.legend(loc='best')
plt.subplot(223)
plt.plot(FREQ_LIST, res['both'], '.-', label='both')
plt.ylabel("Output Frequency (Hz)")
plt.xlabel("Input Frequency (Hz)")
plt.grid(True)
plt.legend(loc='best')
plt.subplot(224)
plt.plot(FREQ_LIST, err['both'], '.-', label='both')
plt.plot(FREQ_LIST, err['exc'], 'd-', label='exc')
plt.plot(FREQ_LIST, err['inh'], 'o-', label='inh')
plt.ylabel("% Error")
plt.xlabel("Input Frequency (Hz)")
plt.grid(True)
plt.tight_layout()
plt.savefig("syn_balance_test.pdf")
| [
"pickle.dump",
"numpy.sum",
"numpy.abs",
"pystorm.hal.HAL",
"numpy.floor",
"numpy.ones",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.zeros_like",
"numpy.linspace",
"pystorm.hal.HAL.get_overflow_counts",
"pystorm.hal.HAL.get_outputs",
"pystorm.hal.HAL.enable_output_recording",
... | [((70, 84), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (77, 84), True, 'import matplotlib as mpl\n'), ((274, 279), 'pystorm.hal.HAL', 'HAL', ([], {}), '()\n', (277, 279), False, 'from pystorm.hal import HAL\n'), ((4945, 4983), 'numpy.linspace', 'np.linspace', (['(1)', '(1000)', '(11)'], {'dtype': 'np.int'}), '(1, 1000, 11, dtype=np.int)\n', (4956, 4983), True, 'import numpy as np\n'), ((5266, 5304), 'pickle.dump', 'pickle.dump', (['(FREQ_LIST, res, err)', 'OF'], {}), '((FREQ_LIST, res, err), OF)\n', (5277, 5304), False, 'import pickle\n'), ((5321, 5337), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (5332, 5337), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5405), 'matplotlib.pyplot.plot', 'plt.plot', (['FREQ_LIST', "(res['exc'] - res['both'][0])", '"""d-"""'], {'label': '"""exc"""'}), "(FREQ_LIST, res['exc'] - res['both'][0], 'd-', label='exc')\n", (5346, 5405), True, 'import matplotlib.pyplot as plt\n'), ((5407, 5475), 'matplotlib.pyplot.plot', 'plt.plot', (['(-FREQ_LIST)', "(res['inh'] - res['both'][0])", '"""o-"""'], {'label': '"""inh"""'}), "(-FREQ_LIST, res['inh'] - res['both'][0], 'o-', label='inh')\n", (5415, 5475), True, 'import matplotlib.pyplot as plt\n'), ((5476, 5511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Output Frequency (Hz)"""'], {}), "('Output Frequency (Hz)')\n", (5486, 5511), True, 'import matplotlib.pyplot as plt\n'), ((5512, 5546), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Input Frequency (Hz)"""'], {}), "('Input Frequency (Hz)')\n", (5522, 5546), True, 'import matplotlib.pyplot as plt\n'), ((5547, 5561), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5555, 5561), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5584), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5572, 5584), True, 'import matplotlib.pyplot as plt\n'), ((5586, 5602), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (5597, 5602), True, 'import matplotlib.pyplot as plt\n'), ((5603, 5655), 'matplotlib.pyplot.plot', 'plt.plot', (['FREQ_LIST', "res['both']", '""".-"""'], {'label': '"""both"""'}), "(FREQ_LIST, res['both'], '.-', label='both')\n", (5611, 5655), True, 'import matplotlib.pyplot as plt\n'), ((5656, 5691), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Output Frequency (Hz)"""'], {}), "('Output Frequency (Hz)')\n", (5666, 5691), True, 'import matplotlib.pyplot as plt\n'), ((5692, 5726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Input Frequency (Hz)"""'], {}), "('Input Frequency (Hz)')\n", (5702, 5726), True, 'import matplotlib.pyplot as plt\n'), ((5727, 5741), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5735, 5741), True, 'import matplotlib.pyplot as plt\n'), ((5742, 5764), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5752, 5764), True, 'import matplotlib.pyplot as plt\n'), ((5766, 5782), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (5777, 5782), True, 'import matplotlib.pyplot as plt\n'), ((5783, 5835), 'matplotlib.pyplot.plot', 'plt.plot', (['FREQ_LIST', "err['both']", '""".-"""'], {'label': '"""both"""'}), "(FREQ_LIST, err['both'], '.-', label='both')\n", (5791, 5835), True, 'import matplotlib.pyplot as plt\n'), ((5836, 5886), 'matplotlib.pyplot.plot', 'plt.plot', (['FREQ_LIST', "err['exc']", '"""d-"""'], {'label': '"""exc"""'}), "(FREQ_LIST, err['exc'], 'd-', label='exc')\n", (5844, 5886), True, 'import matplotlib.pyplot as plt\n'), ((5887, 5937), 'matplotlib.pyplot.plot', 'plt.plot', (['FREQ_LIST', "err['inh']", '"""o-"""'], {'label': '"""inh"""'}), "(FREQ_LIST, err['inh'], 'o-', label='inh')\n", (5895, 5937), True, 'import matplotlib.pyplot as plt\n'), ((5938, 5959), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% Error"""'], {}), "('% Error')\n", (5948, 5959), True, 'import matplotlib.pyplot as plt\n'), ((5960, 5994), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Input Frequency (Hz)"""'], {}), "('Input Frequency (Hz)')\n", (5970, 5994), True, 'import matplotlib.pyplot as plt\n'), ((5995, 6009), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6003, 6009), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6029), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6027, 6029), True, 'import matplotlib.pyplot as plt\n'), ((6031, 6066), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""syn_balance_test.pdf"""'], {}), "('syn_balance_test.pdf')\n", (6042, 6066), True, 'import matplotlib.pyplot as plt\n'), ((388, 408), 'pystorm.hal.neuromorph.graph.Network', 'graph.Network', (['"""net"""'], {}), "('net')\n", (401, 408), False, 'from pystorm.hal.neuromorph import graph\n'), ((1654, 1666), 'pystorm.hal.HAL.map', 'HAL.map', (['net'], {}), '(net)\n', (1661, 1666), False, 'from pystorm.hal import HAL\n'), ((3577, 3588), 'pystorm.hal.HAL.flush', 'HAL.flush', ([], {}), '()\n', (3586, 3588), False, 'from pystorm.hal import HAL\n'), ((3594, 3623), 'pystorm.hal.HAL.enable_output_recording', 'HAL.enable_output_recording', ([], {}), '()\n', (3621, 3623), False, 'from pystorm.hal import HAL\n'), ((3628, 3647), 'pystorm.hal.HAL.start_traffic', 'HAL.start_traffic', ([], {}), '()\n', (3645, 3647), False, 'from pystorm.hal import HAL\n'), ((3826, 3839), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3836, 3839), False, 'import time\n'), ((3844, 3861), 'pystorm.hal.HAL.get_outputs', 'HAL.get_outputs', ([], {}), '()\n', (3859, 3861), False, 'from pystorm.hal import HAL\n'), ((3958, 3983), 'pystorm.hal.HAL.get_overflow_counts', 'HAL.get_overflow_counts', ([], {}), '()\n', (3981, 3983), False, 'from pystorm.hal import HAL\n'), ((4081, 4094), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4091, 4094), False, 'import time\n'), ((4105, 4122), 'pystorm.hal.HAL.get_outputs', 'HAL.get_outputs', ([], {}), '()\n', (4120, 4122), False, 'from pystorm.hal import HAL\n'), ((4176, 4201), 'pystorm.hal.HAL.get_overflow_counts', 'HAL.get_overflow_counts', ([], {}), '()\n', (4199, 4201), False, 'from pystorm.hal import HAL\n'), ((4485, 4525), 'numpy.zeros_like', 'np.zeros_like', (['FREQ_LIST'], {'dtype': 'np.float'}), '(FREQ_LIST, dtype=np.float)\n', (4498, 4525), True, 'import numpy as np\n'), ((4542, 4582), 'numpy.zeros_like', 'np.zeros_like', (['FREQ_LIST'], {'dtype': 'np.float'}), '(FREQ_LIST, dtype=np.float)\n', (4555, 4582), True, 'import numpy as np\n'), ((424, 439), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (431, 439), True, 'import numpy as np\n'), ((4336, 4353), 'numpy.sum', 'np.sum', (['res[:, 3]'], {}), '(res[:, 3])\n', (4342, 4353), True, 'import numpy as np\n'), ((2064, 2080), 'numpy.floor', 'np.floor', (['(_y / 2)'], {}), '(_y / 2)\n', (2072, 2080), True, 'import numpy as np\n'), ((5200, 5219), 'numpy.abs', 'np.abs', (["res['both']"], {}), "(res['both'])\n", (5206, 5219), True, 'import numpy as np\n'), ((1901, 1917), 'numpy.arange', 'np.arange', (['(0)', '(64)'], {}), '(0, 64)\n', (1910, 1917), True, 'import numpy as np\n'), ((1919, 1935), 'numpy.arange', 'np.arange', (['(0)', '(64)'], {}), '(0, 64)\n', (1928, 1935), True, 'import numpy as np\n')] |
"""
This program computes the logistic map equation.
The logistic map equation is a second degree polynomial equation often used as an
example in the discussions of chaos
More information:
wiki: https://en.wikipedia.org/wiki/Logistic_map#Finding_cycles_of_any_length_when_r_=_4
Author: <NAME>
github: https://github.com/vharivinay
"""
# IMPORTS
import numpy as np
import matplotlib.pyplot as plt
from numba import jit
import time
# @jit(nopython=True) # Compute this line out in the absence of a supported GPU
def lmap_compute(xn=4, r=0.0015):
"""
This functions computes the Logistic Map equationexit
"""
rvals = []
xvals = []
for r in np.arange(0, xn, r):
# print('r = {}\r'.format(r), end="") # Disabled because jit doesnt like it!
xold = 0.5
# To get equlibrium value
for i in range(2000):
xnew = (xold - (xold**2)) * r
xold = xnew
# Save equilibrium values
xsteady = xnew
for i in range(1001):
xnew = (xold - (xold**2)) * r
xold = xnew
rvals.append(r)
xvals.append(xnew)
if abs(xnew - xsteady) < 0.001:
break
return rvals, xvals
# Run the main function
# Define Inputs
xn = 4
r = 0.0025
tic = time.perf_counter()
rvals, xvals = lmap_compute(xn, r)
toc = time.perf_counter()
print("computation time: ", abs(toc - tic))
# Visualization
f = plt.figure(figsize=(16, 12))
plt.subplot(111)
ax1 = plt.scatter(rvals, xvals, s=0.05)
plt.xlim(3.447, 4.0)
plt.ylim(0, 1)
plt.axis("off")
plt.show()
f.savefig("bifircation-plot_r{}.png".format(r), bbox_inches="tight", dpi=400)
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"time.perf_counter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.arange"
] | [((1341, 1360), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1358, 1360), False, 'import time\n'), ((1404, 1423), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1421, 1423), False, 'import time\n'), ((1495, 1523), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (1505, 1523), True, 'import matplotlib.pyplot as plt\n'), ((1525, 1541), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1536, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1549, 1582), 'matplotlib.pyplot.scatter', 'plt.scatter', (['rvals', 'xvals'], {'s': '(0.05)'}), '(rvals, xvals, s=0.05)\n', (1560, 1582), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1604), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(3.447)', '(4.0)'], {}), '(3.447, 4.0)\n', (1592, 1604), True, 'import matplotlib.pyplot as plt\n'), ((1606, 1620), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1614, 1620), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1637), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1630, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1647, 1649), True, 'import matplotlib.pyplot as plt\n'), ((695, 714), 'numpy.arange', 'np.arange', (['(0)', 'xn', 'r'], {}), '(0, xn, r)\n', (704, 714), True, 'import numpy as np\n')] |
import os
from typing import Optional, Dict, List
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import numpy as np
import seaborn as sns
from tqdm import tqdm
from models.polyfit_model import PolyfitModel
from models.ridge_polyfit_model import RidgePolyfitModel
import evaluate_model
from models.model import Model
import data_generator
import constants as C
sns.set()
N_POINTS = 8
def raw_data_intro():
savedir = os.path.join("images", "raw", "intro2")
os.makedirs(savedir, exist_ok=True)
n = N_POINTS
m = 5
s=50
points = data_generator.generate_data(n, "fixed")
ylim = (-105, 205)
xlim = (-0.5, 10.5)
cmap = get_cmap("gnuplot2")
# just points
plt.figure()
plt.scatter(
points[:, 0],
points[:, 1],
s=s,
edgecolor='k',
facecolor="none",
)
plt.xlabel("X")
plt.ylabel("Y")
plt.ylim(*ylim)
plt.xlim(*xlim)
plt.tight_layout()
plt.savefig(os.path.join(savedir, f"scatter.png"))
plt.close()
predictions = []
plt.figure()
plt.scatter(
points[:, 0],
points[:, 1],
s=s,
edgecolor='k',
facecolor="none",
zorder=60,
)
for deg in range(m):
model = PolyfitModel(
x_vals=points[:, 0],
y_vals=points[:, 1],
deg=deg
)
model.fit()
_predictions = model.predict(evaluate_model.X_TEST)
predictions.append(_predictions)
mse = np.mean((model.predict(points[:, 0]) - points[:, 1])**2)
plt.plot(
evaluate_model.X_TEST,
predictions[-1],
label=f"Deg {deg}",
c=cmap((1+deg)/(1+m)),
zorder=50,
)
plt.xlabel("X")
plt.ylabel("Y")
plt.ylim(*ylim)
plt.xlim(*xlim)
legend = plt.legend(loc='upper center', framealpha=1.0)
legend.get_frame().set_alpha(0.8)
legend.get_frame().set_facecolor((1, 1, 1, 0.8))
legend.set_zorder(100)
plt.tight_layout()
plt.savefig(os.path.join(savedir, f"poly.png"))
plt.close()
os.makedirs(savedir, exist_ok=True)
model_type = PolyfitModel
num_data = 8
num_models = 100
model_kwargs = {"deg": 2}
predictions = evaluate_model.get_model_predictions(
model_type=model_type,
num_data=num_data,
num_models=num_models,
model_kwargs=model_kwargs,
)
## Upper Plot: Many Models
for i in range(predictions.shape[0]):
label = "Models" if i == 0 else None
plt.plot(
evaluate_model.X_TEST,
predictions[i, :],
c='blue',
alpha=0.8,
linewidth=0.1,
zorder=50,
label=label,
)
plt.plot(
evaluate_model.X_TEST,
np.mean(predictions, axis=0),
c='red',
alpha=1,
zorder=55,
label="Average Model"
)
plt.plot(
evaluate_model.X_TEST,
evaluate_model.Y_TEST,
c='k',
alpha=1,
zorder=60,
label="Truth",
)
legend = plt.legend(loc='upper left', framealpha=1.0)
legend.get_frame().set_alpha(0.8)
legend.get_frame().set_facecolor((1, 1, 1, 0.8))
legend.set_zorder(100)
plt.ylim(-55, 205)
plt.xlim(-0.5, 10.5)
plt.suptitle(f'Polynomial (Deg={2})', fontsize=16)
plt.tight_layout()
plt.savefig(os.path.join(savedir, f"combined.png"))
plt.close()
def raw_data_scatterplot():
savedir = os.path.join("images", "raw", "scatter")
os.makedirs(savedir, exist_ok=True)
print("Raw Data")
counts = np.geomspace(8, 100000, 200)
for index, i in tqdm(enumerate(counts), total=len(counts)):
i = int(i)
points = data_generator.generate_data(i, "fixed")
plt.scatter(
points[:, 0],
points[:, 1],
s=10,
edgecolor='k',
facecolor="none",
)
plt.xlabel("X")
plt.ylabel("Y")
plt.ylim(-105, 205)
plt.xlim(-0.5, 10.5)
plt.savefig(os.path.join(savedir, f"scatter.{i:06d}.{index}.png"))
plt.close()
def make_single_model_plot(
model_type: type(Model),
num_data: int,
num_models: int,
model_kwargs: Optional[Dict] = None,
) -> None:
# Collect Data
predictions = evaluate_model.get_model_predictions(
model_type=model_type,
num_data=num_data,
num_models=num_models,
model_kwargs=model_kwargs,
)
bias, variance = evaluate_model.get_bias_and_variance(
model_type=model_type,
num_data=num_data,
model_kwargs=model_kwargs
)
avg_squared_bias = np.mean(bias**2)
avg_variance = np.mean(variance)
# Make Plots
fig, ax = plt.subplots(2, sharex=True, figsize=(8,6))
## Upper Plot: Many Models
for i in range(predictions.shape[0]):
label = "Models" if i == 0 else None
ax[0].plot(
evaluate_model.X_TEST,
predictions[i, :],
c='blue',
alpha=0.8,
linewidth=0.1,
zorder=50,
label=label,
)
ax[0].plot(
evaluate_model.X_TEST,
np.mean(predictions, axis=0),
c='red',
alpha=1,
zorder=55,
label="Average Model"
)
ax[0].plot(
evaluate_model.X_TEST,
evaluate_model.Y_TEST,
c='k',
alpha=1,
zorder=60,
label="Truth",
)
legend = ax[0].legend(loc='upper left', framealpha=1.0)
legend.get_frame().set_alpha(0.8)
legend.get_frame().set_facecolor((1, 1, 1, 0.8))
legend.set_zorder(100)
ax[0].set_ylim(-55, 205)
ax[0].set_xlim(-0.5, 10.5)
# Bottom Plot: Bias and Variance
ax[1].plot(
evaluate_model.X_TEST,
bias**2,
label=f"bias² (avg={int(avg_squared_bias)})",
c='red',
zorder=50,
)
ax[1].plot(
evaluate_model.X_TEST,
variance,
label=f"variance (avg={int(avg_variance)})",
c='green',
zorder=50,
)
ax[1].plot(
evaluate_model.X_TEST,
bias**2 + variance,
label=f"error (avg={int(avg_squared_bias + avg_variance)})",
c='blue',
linestyle=":",
linewidth=3,
zorder=60,
)
ax[1].set_ylim(-50, 1200)
legend = ax[1].legend(loc='upper left', framealpha=1.0)
legend.set_zorder(100)
legend.get_frame().set_alpha(0.8)
legend.get_frame().set_facecolor((1, 1, 1, 0.8))
def make_model_complexity_plot(
model_type: type(Model),
num_data: int,
model_kwargs_list: List[Dict],
) -> None:
bias_squareds = []
variances = []
for model_kwargs in model_kwargs_list:
bias, variance = evaluate_model.get_bias_and_variance(
model_type=model_type,
num_data=num_data,
model_kwargs=model_kwargs
)
bias_squareds.append(np.sum(bias**2))
variances.append(np.sum(variance))
errors = [x+y for x, y in zip(variances, bias_squareds)]
plt.plot(
np.arange(len(bias_squareds)),
bias_squareds,
c="red",
label="bias²",
zorder=50,
)
plt.plot(
np.arange(len(variances)),
variances,
c="green",
label="variance",
zorder=50,
)
plt.plot(
np.arange(len(errors)),
errors,
c="blue",
label="error",
zorder=60,
linestyle=":",
linewidth=3,
)
legend = plt.legend(loc='upper center', framealpha=1.0)
legend.set_zorder(100)
legend.get_frame().set_alpha(0.8)
legend.get_frame().set_facecolor((1, 1, 1, 0.8))
def polyfit_plots() -> None:
# Run through Polyfit models of degree 0 to 9
savedir = os.path.join("images", "performance", "polyfit")
os.makedirs(savedir, exist_ok=True)
model_type = PolyfitModel
num_data = N_POINTS
num_models = 100
print("Polyfit:")
for deg in tqdm(range(num_data), total=num_data):
model_kwargs = {"deg": deg}
make_single_model_plot(
model_type=model_type,
num_data=num_data,
num_models=num_models,
model_kwargs=model_kwargs,
)
plt.suptitle(f'Polynomial (Deg={deg})', fontsize=16)
plt.tight_layout()
plt.savefig(os.path.join(savedir, f"error.d{deg:02d}.png"))
plt.close()
# Plot Polyfit model complexity
make_model_complexity_plot(
model_type=model_type,
num_data=num_data,
model_kwargs_list=[{"deg": x} for x in range(num_data)],
)
plt.xticks(np.arange(num_data), np.arange(num_data))
plt.xlabel("Polynomial Degree")
plt.suptitle(f'Polynomial Degree Vs. Error', fontsize=16)
plt.tight_layout()
plt.savefig(os.path.join(savedir, f"error.png"))
plt.close()
def ridge_plots():
# Run through Ridge Polyfit models of degree 0 to 9
savedir = os.path.join("images", "performance", "ridge")
os.makedirs(savedir, exist_ok=True)
model_type = RidgePolyfitModel
num_data = N_POINTS
num_models = 100
lam = 1
print("ridge:")
for deg in tqdm(range(num_data), total=num_data):
model_kwargs = {"deg": deg, "lam": lam}
make_single_model_plot(
model_type=model_type,
num_data=num_data,
num_models=num_models,
model_kwargs=model_kwargs,
)
plt.suptitle(f'Ridge Polynomial (Deg={deg}, lambda={lam})', fontsize=16)
plt.tight_layout()
plt.savefig(os.path.join(savedir, f"error.d{deg:02d}.png"))
plt.close()
# Plot Polyfit model complexity
make_model_complexity_plot(
model_type=model_type,
num_data=num_data,
model_kwargs_list=[{"deg": x, "lam": lam} for x in range(num_data)],
)
plt.xticks(np.arange(num_data), np.arange(num_data))
plt.xlabel("Polynomial Degree")
plt.suptitle(f'Ridge Polynomial Degree Vs. Error', fontsize=16)
plt.tight_layout()
plt.savefig(os.path.join(savedir, f"error.png"))
plt.close()
def main():
raw_data_intro()
polyfit_plots()
if __name__ == "__main__":
main()
| [
"data_generator.generate_data",
"numpy.sum",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.geomspace",
"models.polyfit_model.PolyfitModel",
... | [((384, 393), 'seaborn.set', 'sns.set', ([], {}), '()\n', (391, 393), True, 'import seaborn as sns\n'), ((445, 484), 'os.path.join', 'os.path.join', (['"""images"""', '"""raw"""', '"""intro2"""'], {}), "('images', 'raw', 'intro2')\n", (457, 484), False, 'import os\n'), ((489, 524), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (500, 524), False, 'import os\n'), ((574, 614), 'data_generator.generate_data', 'data_generator.generate_data', (['n', '"""fixed"""'], {}), "(n, 'fixed')\n", (602, 614), False, 'import data_generator\n'), ((673, 693), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""gnuplot2"""'], {}), "('gnuplot2')\n", (681, 693), False, 'from matplotlib.cm import get_cmap\n'), ((717, 729), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (727, 729), True, 'import matplotlib.pyplot as plt\n'), ((734, 811), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points[:, 0]', 'points[:, 1]'], {'s': 's', 'edgecolor': '"""k"""', 'facecolor': '"""none"""'}), "(points[:, 0], points[:, 1], s=s, edgecolor='k', facecolor='none')\n", (745, 811), True, 'import matplotlib.pyplot as plt\n'), ((863, 878), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (873, 878), True, 'import matplotlib.pyplot as plt\n'), ((883, 898), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (893, 898), True, 'import matplotlib.pyplot as plt\n'), ((903, 918), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (911, 918), True, 'import matplotlib.pyplot as plt\n'), ((923, 938), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (931, 938), True, 'import matplotlib.pyplot as plt\n'), ((943, 961), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (959, 961), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1032), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1030, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1071), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1069, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1169), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points[:, 0]', 'points[:, 1]'], {'s': 's', 'edgecolor': '"""k"""', 'facecolor': '"""none"""', 'zorder': '(60)'}), "(points[:, 0], points[:, 1], s=s, edgecolor='k', facecolor=\n 'none', zorder=60)\n", (1087, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1757, 1772), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1767, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1787, 1792), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1812), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (1805, 1812), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1832), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (1825, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1892), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'framealpha': '(1.0)'}), "(loc='upper center', framealpha=1.0)\n", (1856, 1892), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2033), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2031, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2101), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2099, 2101), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2142), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (2118, 2142), False, 'import os\n'), ((2259, 2392), 'evaluate_model.get_model_predictions', 'evaluate_model.get_model_predictions', ([], {'model_type': 'model_type', 'num_data': 'num_data', 'num_models': 'num_models', 'model_kwargs': 'model_kwargs'}), '(model_type=model_type, num_data=\n num_data, num_models=num_models, model_kwargs=model_kwargs)\n', (2295, 2392), False, 'import evaluate_model\n'), ((2936, 3036), 'matplotlib.pyplot.plot', 'plt.plot', (['evaluate_model.X_TEST', 'evaluate_model.Y_TEST'], {'c': '"""k"""', 'alpha': '(1)', 'zorder': '(60)', 'label': '"""Truth"""'}), "(evaluate_model.X_TEST, evaluate_model.Y_TEST, c='k', alpha=1,\n zorder=60, label='Truth')\n", (2944, 3036), True, 'import matplotlib.pyplot as plt\n'), ((3101, 3145), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'framealpha': '(1.0)'}), "(loc='upper left', framealpha=1.0)\n", (3111, 3145), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3286), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-55)', '(205)'], {}), '(-55, 205)\n', (3276, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3291, 3311), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', '(10.5)'], {}), '(-0.5, 10.5)\n', (3299, 3311), True, 'import matplotlib.pyplot as plt\n'), ((3316, 3366), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Polynomial (Deg={2})"""'], {'fontsize': '(16)'}), "(f'Polynomial (Deg={2})', fontsize=16)\n", (3328, 3366), True, 'import matplotlib.pyplot as plt\n'), ((3371, 3389), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3387, 3389), True, 'import matplotlib.pyplot as plt\n'), ((3450, 3461), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3459, 3461), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3550), 'os.path.join', 'os.path.join', (['"""images"""', '"""raw"""', '"""scatter"""'], {}), "('images', 'raw', 'scatter')\n", (3522, 3550), False, 'import os\n'), ((3555, 3590), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (3566, 3590), False, 'import os\n'), ((3626, 3654), 'numpy.geomspace', 'np.geomspace', (['(8)', '(100000)', '(200)'], {}), '(8, 100000, 200)\n', (3638, 3654), True, 'import numpy as np\n'), ((4371, 4504), 'evaluate_model.get_model_predictions', 'evaluate_model.get_model_predictions', ([], {'model_type': 'model_type', 'num_data': 'num_data', 'num_models': 'num_models', 'model_kwargs': 'model_kwargs'}), '(model_type=model_type, num_data=\n num_data, num_models=num_models, model_kwargs=model_kwargs)\n', (4407, 4504), False, 'import evaluate_model\n'), ((4564, 4674), 'evaluate_model.get_bias_and_variance', 'evaluate_model.get_bias_and_variance', ([], {'model_type': 'model_type', 'num_data': 'num_data', 'model_kwargs': 'model_kwargs'}), '(model_type=model_type, num_data=\n num_data, model_kwargs=model_kwargs)\n', (4600, 4674), False, 'import evaluate_model\n'), ((4727, 4745), 'numpy.mean', 'np.mean', (['(bias ** 2)'], {}), '(bias ** 2)\n', (4734, 4745), True, 'import numpy as np\n'), ((4763, 4780), 'numpy.mean', 'np.mean', (['variance'], {}), '(variance)\n', (4770, 4780), True, 'import numpy as np\n'), ((4813, 4857), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)', 'figsize': '(8, 6)'}), '(2, sharex=True, figsize=(8, 6))\n', (4825, 4857), True, 'import matplotlib.pyplot as plt\n'), ((7625, 7671), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'framealpha': '(1.0)'}), "(loc='upper center', framealpha=1.0)\n", (7635, 7671), True, 'import matplotlib.pyplot as plt\n'), ((7885, 7933), 'os.path.join', 'os.path.join', (['"""images"""', '"""performance"""', '"""polyfit"""'], {}), "('images', 'performance', 'polyfit')\n", (7897, 7933), False, 'import os\n'), ((7938, 7973), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (7949, 7973), False, 'import os\n'), ((8778, 8809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polynomial Degree"""'], {}), "('Polynomial Degree')\n", (8788, 8809), True, 'import matplotlib.pyplot as plt\n'), ((8814, 8871), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Polynomial Degree Vs. Error"""'], {'fontsize': '(16)'}), "(f'Polynomial Degree Vs. Error', fontsize=16)\n", (8826, 8871), True, 'import matplotlib.pyplot as plt\n'), ((8876, 8894), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8892, 8894), True, 'import matplotlib.pyplot as plt\n'), ((8952, 8963), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8961, 8963), True, 'import matplotlib.pyplot as plt\n'), ((9055, 9101), 'os.path.join', 'os.path.join', (['"""images"""', '"""performance"""', '"""ridge"""'], {}), "('images', 'performance', 'ridge')\n", (9067, 9101), False, 'import os\n'), ((9106, 9141), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (9117, 9141), False, 'import os\n'), ((10005, 10036), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polynomial Degree"""'], {}), "('Polynomial Degree')\n", (10015, 10036), True, 'import matplotlib.pyplot as plt\n'), ((10041, 10104), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Ridge Polynomial Degree Vs. Error"""'], {'fontsize': '(16)'}), "(f'Ridge Polynomial Degree Vs. Error', fontsize=16)\n", (10053, 10104), True, 'import matplotlib.pyplot as plt\n'), ((10109, 10127), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10125, 10127), True, 'import matplotlib.pyplot as plt\n'), ((10185, 10196), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10194, 10196), True, 'import matplotlib.pyplot as plt\n'), ((978, 1015), 'os.path.join', 'os.path.join', (['savedir', 'f"""scatter.png"""'], {}), "(savedir, f'scatter.png')\n", (990, 1015), False, 'import os\n'), ((1261, 1324), 'models.polyfit_model.PolyfitModel', 'PolyfitModel', ([], {'x_vals': 'points[:, 0]', 'y_vals': 'points[:, 1]', 'deg': 'deg'}), '(x_vals=points[:, 0], y_vals=points[:, 1], deg=deg)\n', (1273, 1324), False, 'from models.polyfit_model import PolyfitModel\n'), ((2050, 2084), 'os.path.join', 'os.path.join', (['savedir', 'f"""poly.png"""'], {}), "(savedir, f'poly.png')\n", (2062, 2084), False, 'import os\n'), ((2554, 2668), 'matplotlib.pyplot.plot', 'plt.plot', (['evaluate_model.X_TEST', 'predictions[i, :]'], {'c': '"""blue"""', 'alpha': '(0.8)', 'linewidth': '(0.1)', 'zorder': '(50)', 'label': 'label'}), "(evaluate_model.X_TEST, predictions[i, :], c='blue', alpha=0.8,\n linewidth=0.1, zorder=50, label=label)\n", (2562, 2668), True, 'import matplotlib.pyplot as plt\n'), ((2813, 2841), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (2820, 2841), True, 'import numpy as np\n'), ((3406, 3444), 'os.path.join', 'os.path.join', (['savedir', 'f"""combined.png"""'], {}), "(savedir, f'combined.png')\n", (3418, 3444), False, 'import os\n'), ((3755, 3795), 'data_generator.generate_data', 'data_generator.generate_data', (['i', '"""fixed"""'], {}), "(i, 'fixed')\n", (3783, 3795), False, 'import data_generator\n'), ((3804, 3882), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points[:, 0]', 'points[:, 1]'], {'s': '(10)', 'edgecolor': '"""k"""', 'facecolor': '"""none"""'}), "(points[:, 0], points[:, 1], s=10, edgecolor='k', facecolor='none')\n", (3815, 3882), True, 'import matplotlib.pyplot as plt\n'), ((3966, 3981), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (3976, 3981), True, 'import matplotlib.pyplot as plt\n'), ((3990, 4005), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (4000, 4005), True, 'import matplotlib.pyplot as plt\n'), ((4014, 4033), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-105)', '(205)'], {}), '(-105, 205)\n', (4022, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4042, 4062), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', '(10.5)'], {}), '(-0.5, 10.5)\n', (4050, 4062), True, 'import matplotlib.pyplot as plt\n'), ((4146, 4157), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4155, 4157), True, 'import matplotlib.pyplot as plt\n'), ((5251, 5279), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (5258, 5279), True, 'import numpy as np\n'), ((6843, 6953), 'evaluate_model.get_bias_and_variance', 'evaluate_model.get_bias_and_variance', ([], {'model_type': 'model_type', 'num_data': 'num_data', 'model_kwargs': 'model_kwargs'}), '(model_type=model_type, num_data=\n num_data, model_kwargs=model_kwargs)\n', (6879, 6953), False, 'import evaluate_model\n'), ((8351, 8403), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Polynomial (Deg={deg})"""'], {'fontsize': '(16)'}), "(f'Polynomial (Deg={deg})', fontsize=16)\n", (8363, 8403), True, 'import matplotlib.pyplot as plt\n'), ((8412, 8430), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8428, 8430), True, 'import matplotlib.pyplot as plt\n'), ((8507, 8518), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8516, 8518), True, 'import matplotlib.pyplot as plt\n'), ((8732, 8751), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (8741, 8751), True, 'import numpy as np\n'), ((8753, 8772), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (8762, 8772), True, 'import numpy as np\n'), ((8911, 8946), 'os.path.join', 'os.path.join', (['savedir', 'f"""error.png"""'], {}), "(savedir, f'error.png')\n", (8923, 8946), False, 'import os\n'), ((9546, 9618), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Ridge Polynomial (Deg={deg}, lambda={lam})"""'], {'fontsize': '(16)'}), "(f'Ridge Polynomial (Deg={deg}, lambda={lam})', fontsize=16)\n", (9558, 9618), True, 'import matplotlib.pyplot as plt\n'), ((9627, 9645), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9643, 9645), True, 'import matplotlib.pyplot as plt\n'), ((9722, 9733), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9731, 9733), True, 'import matplotlib.pyplot as plt\n'), ((9959, 9978), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (9968, 9978), True, 'import numpy as np\n'), ((9980, 9999), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (9989, 9999), True, 'import numpy as np\n'), ((10144, 10179), 'os.path.join', 'os.path.join', (['savedir', 'f"""error.png"""'], {}), "(savedir, f'error.png')\n", (10156, 10179), False, 'import os\n'), ((4083, 4136), 'os.path.join', 'os.path.join', (['savedir', 'f"""scatter.{i:06d}.{index}.png"""'], {}), "(savedir, f'scatter.{i:06d}.{index}.png')\n", (4095, 4136), False, 'import os\n'), ((7028, 7045), 'numpy.sum', 'np.sum', (['(bias ** 2)'], {}), '(bias ** 2)\n', (7034, 7045), True, 'import numpy as np\n'), ((7070, 7086), 'numpy.sum', 'np.sum', (['variance'], {}), '(variance)\n', (7076, 7086), True, 'import numpy as np\n'), ((8451, 8497), 'os.path.join', 'os.path.join', (['savedir', 'f"""error.d{deg:02d}.png"""'], {}), "(savedir, f'error.d{deg:02d}.png')\n", (8463, 8497), False, 'import os\n'), ((9666, 9712), 'os.path.join', 'os.path.join', (['savedir', 'f"""error.d{deg:02d}.png"""'], {}), "(savedir, f'error.d{deg:02d}.png')\n", (9678, 9712), False, 'import os\n')] |
import csv
import random
import numpy as np
import math
from collections import Counter
class DataGenerator:
def __init__(self, question=-1, iterations=1):
self.question = question
self.iterations = iterations
def data_Generator(self, m):
data = []
if m == 0:
return data
for i in range(m):
xy = [1]
# x 1 - x 10, x 16 - x 20
x_standard_n = np.random.standard_normal(15)
for j in x_standard_n:
xy.append(j)
x_11 = x_standard_n[0] + x_standard_n[1] + np.random.normal(0, math.sqrt(0.1))
x_12 = x_standard_n[2] + x_standard_n[3] + np.random.normal(0, math.sqrt(0.1))
x_13 = x_standard_n[3] + x_standard_n[4] + np.random.normal(0, math.sqrt(0.1))
x_14 = 0.1 * x_standard_n[6] + np.random.normal(0, math.sqrt(0.1))
x_15 = 2 * x_standard_n[1] - 10 + np.random.normal(0, math.sqrt(0.1))
xy.insert(11, x_11)
xy.insert(12, x_12)
xy.insert(13, x_13)
xy.insert(14, x_14)
xy.insert(15, x_15)
y = 10 + sum(xy[k] * pow(0.6, k+1) for k in range(0, 10)) + np.random.normal(0, math.sqrt(0.1))
xy.append(y)
data.append(xy)
filename = 'LinearRegression/data/question' + str(self.question) + '_m_' + str(m) + '.csv'
with open(filename, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in data:
spamwriter.writerow(row)
return data
'''
for test
if __name__ == "__main__":
dg = DataGenerator(1)
print(dg.data_Generator(1000))
''' | [
"numpy.random.standard_normal",
"csv.writer",
"math.sqrt"
] | [((442, 471), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(15)'], {}), '(15)\n', (467, 471), True, 'import numpy as np\n'), ((1501, 1577), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (1511, 1577), False, 'import csv\n'), ((612, 626), 'math.sqrt', 'math.sqrt', (['(0.1)'], {}), '(0.1)\n', (621, 626), False, 'import math\n'), ((703, 717), 'math.sqrt', 'math.sqrt', (['(0.1)'], {}), '(0.1)\n', (712, 717), False, 'import math\n'), ((794, 808), 'math.sqrt', 'math.sqrt', (['(0.1)'], {}), '(0.1)\n', (803, 808), False, 'import math\n'), ((873, 887), 'math.sqrt', 'math.sqrt', (['(0.1)'], {}), '(0.1)\n', (882, 887), False, 'import math\n'), ((955, 969), 'math.sqrt', 'math.sqrt', (['(0.1)'], {}), '(0.1)\n', (964, 969), False, 'import math\n'), ((1237, 1251), 'math.sqrt', 'math.sqrt', (['(0.1)'], {}), '(0.1)\n', (1246, 1251), False, 'import math\n')] |
# Plot Nunerical Ray-FEM Solution Error
import numpy as np
import math
omega = np.array([31.4159265358979,
62.8318530717959,
94.2477796076938,
125.663706143592,
188.495559215388,
251.327412287183,
376.991118430775,
502.654824574367])
NRayFEM_solu_err1 = np.array([ 0.000263551282082452,
0.000254140256937518,
0.000101494076854674,
0.000124117503972135,
6.64798443047013e-05,
6.66127507918252e-05,
6.73550207225830e-05,
5.83311590181842e-05])
NRayFEM_solu_err2 = np.array([9.59168285391563e-05,
6.87601603333895e-05,
4.91275290610534e-05,
4.98225006405050e-05,
3.76452744600659e-05,
4.11255002113623e-05,
2.87953174477915e-05,
2.49374764198247e-05])
import matplotlib.pyplot as plt
golden = 1.61803398875
width = 6
height = width/golden
fig = plt.figure(figsize=(width, height))
p1, = plt.loglog(omega, NRayFEM_solu_err1, label=r'$\Vert u_{\mathbf{d}_{\widetilde{\omega}}} - u_{ex}\Vert_{L^2(\Omega)} $',
color='b', linewidth=2, linestyle='--', marker='o', markersize=8.0, zorder=2)
p2, = plt.loglog(omega, NRayFEM_solu_err2, label=r'$\Vert u_{\mathbf{d}_{\omega}} - u_{ex}\Vert_{L^2(\Omega)}$',
color='g', linewidth=2, linestyle='--', marker='o', markersize=8.0, zorder=2)
# plt.loglog(omega, ERayFEM_solu_err, label=r'$\Vert u_{\mathbf{d}_{ex}} - u_{ex}\Vert_{L^2(\Omega)}$',
# color='r', linewidth=2, linestyle='--', marker='.', markersize=8.0, zorder=2)
p3, = plt.loglog(omega, NRayFEM_solu_err2[0]*1.02/(omega/(omega[0]))**0.5, label=r'$\mathcal{O}(\omega^{-1/2})$',
color='r', linewidth=2, linestyle='solid', markersize=8.0, zorder=2)
# plt.loglog(N_x**2, N_x**2 / 4.0e4, label=r' ', color='white', linewidth=0.0)
first_legend = plt.legend(handles=[p1, p2], loc=1, ncol=1, frameon=False, fontsize=22)
ax = plt.gca().add_artist(first_legend)
plt.legend(handles=[p3], loc=3, ncol=1, frameon=False, fontsize=22)
# plt.title('Numerical Ray-FEM',fontsize=20)
plt.xlabel(r'$\omega$', fontsize=18)
plt.ylabel(r'Error', fontsize=18)
plt.gca().tick_params(labelsize=14)
plt.autoscale(True, 'both', True)
plt.ylim(1.5*1e-5, .75*1e-3)
plt.tight_layout(pad=0.1)
fig.savefig('ex2_Num_Ray_FEM_solu_err_plot.pdf')
plt.show()
plt.close('all')
| [
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"mat... | [((83, 246), 'numpy.array', 'np.array', (['[31.4159265358979, 62.8318530717959, 94.2477796076938, 125.663706143592, \n 188.495559215388, 251.327412287183, 376.991118430775, 502.654824574367]'], {}), '([31.4159265358979, 62.8318530717959, 94.2477796076938, \n 125.663706143592, 188.495559215388, 251.327412287183, 376.991118430775,\n 502.654824574367])\n', (91, 246), True, 'import numpy as np\n'), ((259, 453), 'numpy.array', 'np.array', (['[0.000263551282082452, 0.000254140256937518, 0.000101494076854674, \n 0.000124117503972135, 6.64798443047013e-05, 6.66127507918252e-05, \n 6.7355020722583e-05, 5.83311590181842e-05]'], {}), '([0.000263551282082452, 0.000254140256937518, 0.000101494076854674,\n 0.000124117503972135, 6.64798443047013e-05, 6.66127507918252e-05, \n 6.7355020722583e-05, 5.83311590181842e-05])\n', (267, 453), True, 'import numpy as np\n'), ((469, 663), 'numpy.array', 'np.array', (['[9.59168285391563e-05, 6.87601603333895e-05, 4.91275290610534e-05, \n 4.9822500640505e-05, 3.76452744600659e-05, 4.11255002113623e-05, \n 2.87953174477915e-05, 2.49374764198247e-05]'], {}), '([9.59168285391563e-05, 6.87601603333895e-05, 4.91275290610534e-05,\n 4.9822500640505e-05, 3.76452744600659e-05, 4.11255002113623e-05, \n 2.87953174477915e-05, 2.49374764198247e-05])\n', (477, 663), True, 'import numpy as np\n'), ((755, 790), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (765, 790), True, 'import matplotlib.pyplot as plt\n'), ((799, 1015), 'matplotlib.pyplot.loglog', 'plt.loglog', (['omega', 'NRayFEM_solu_err1'], {'label': '"""$\\\\Vert u_{\\\\mathbf{d}_{\\\\widetilde{\\\\omega}}} - u_{ex}\\\\Vert_{L^2(\\\\Omega)} $"""', 'color': '"""b"""', 'linewidth': '(2)', 'linestyle': '"""--"""', 'marker': '"""o"""', 'markersize': '(8.0)', 'zorder': '(2)'}), "(omega, NRayFEM_solu_err1, label=\n '$\\\\Vert u_{\\\\mathbf{d}_{\\\\widetilde{\\\\omega}}} - u_{ex}\\\\Vert_{L^2(\\\\Omega)} $'\n , color='b', linewidth=2, linestyle='--', marker='o', markersize=8.0,\n zorder=2)\n", (809, 1015), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1216), 'matplotlib.pyplot.loglog', 'plt.loglog', (['omega', 'NRayFEM_solu_err2'], {'label': '"""$\\\\Vert u_{\\\\mathbf{d}_{\\\\omega}} - u_{ex}\\\\Vert_{L^2(\\\\Omega)}$"""', 'color': '"""g"""', 'linewidth': '(2)', 'linestyle': '"""--"""', 'marker': '"""o"""', 'markersize': '(8.0)', 'zorder': '(2)'}), "(omega, NRayFEM_solu_err2, label=\n '$\\\\Vert u_{\\\\mathbf{d}_{\\\\omega}} - u_{ex}\\\\Vert_{L^2(\\\\Omega)}$',\n color='g', linewidth=2, linestyle='--', marker='o', markersize=8.0,\n zorder=2)\n", (1025, 1216), True, 'import matplotlib.pyplot as plt\n'), ((1413, 1604), 'matplotlib.pyplot.loglog', 'plt.loglog', (['omega', '(NRayFEM_solu_err2[0] * 1.02 / (omega / omega[0]) ** 0.5)'], {'label': '"""$\\\\mathcal{O}(\\\\omega^{-1/2})$"""', 'color': '"""r"""', 'linewidth': '(2)', 'linestyle': '"""solid"""', 'markersize': '(8.0)', 'zorder': '(2)'}), "(omega, NRayFEM_solu_err2[0] * 1.02 / (omega / omega[0]) ** 0.5,\n label='$\\\\mathcal{O}(\\\\omega^{-1/2})$', color='r', linewidth=2,\n linestyle='solid', markersize=8.0, zorder=2)\n", (1423, 1604), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1769), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[p1, p2]', 'loc': '(1)', 'ncol': '(1)', 'frameon': '(False)', 'fontsize': '(22)'}), '(handles=[p1, p2], loc=1, ncol=1, frameon=False, fontsize=22)\n', (1708, 1769), True, 'import matplotlib.pyplot as plt\n'), ((1810, 1877), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[p3]', 'loc': '(3)', 'ncol': '(1)', 'frameon': '(False)', 'fontsize': '(22)'}), '(handles=[p3], loc=3, ncol=1, frameon=False, fontsize=22)\n', (1820, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1925, 1961), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega$"""'], {'fontsize': '(18)'}), "('$\\\\omega$', fontsize=18)\n", (1935, 1961), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {'fontsize': '(18)'}), "('Error', fontsize=18)\n", (1972, 1994), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2067), 'matplotlib.pyplot.autoscale', 'plt.autoscale', (['(True)', '"""both"""', '(True)'], {}), "(True, 'both', True)\n", (2047, 2067), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2103), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1.5 * 1e-05)', '(0.75 * 0.001)'], {}), '(1.5 * 1e-05, 0.75 * 0.001)\n', (2076, 2103), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2122), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.1)'}), '(pad=0.1)\n', (2113, 2122), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2182, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2202), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2195, 2202), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1784), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1782, 1784), True, 'import matplotlib.pyplot as plt\n'), ((1997, 2006), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2004, 2006), True, 'import matplotlib.pyplot as plt\n')] |
# set directory
import os
directory = "C:/Users/" # complete directory
os.chdir(directory)
# load libraries
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate
from tensorflow.keras.layers import LeakyReLU, Dropout, Embedding, multiply
from tensorflow.keras.layers import BatchNormalization, Activation, Flatten
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.constraints import Constraint
from tensorflow.keras.datasets.mnist import load_data
import pandas as pd
import numpy as np
import math
from numpy.random import randint, rand, randn, random, choice
from numpy import ones, zeros, vstack
import sklearn
from sklearn.preprocessing import LabelEncoder
import time
import matplotlib
from matplotlib import pyplot as plt
# Print package versions and ensure they have loaded correctly
print("Versions:")
print("Tensorflow %s" % tf.__version__)
print("Keras %s" % keras.__version__)
print("Numpy %s" % np.__version__)
print("Matplotlib %s" % matplotlib.__version__)
print("Pandas %s" % pd.__version__)
print("SciKit Learn %s" % sklearn.__version__)
# Set random number generator
seed_value = 100
np.random.seed(seed_value)
# If you want to alter the number of rows shown in pandas:
# pd.set_option("display.max_rows", 200)
# Matplotlib style = ggplot
plt.style.use("ggplot")
# Define functions and models
# Define custom loss
def wasserstein_loss(y_true, y_pred):
return backend.mean(y_true * y_pred)
# Define discriminator or "critic"
def define_critic_gp(dataset):
init = RandomNormal(stddev = 0.1)
feature_data = Input(shape = (dataset.shape[1],))
label_data = Input(shape = (1,))
label_embedding = Flatten()(Embedding(key.shape[0],
math.ceil((1/4)*dataset.shape[1]))
(label_data))
label_dense = Dense(dataset.shape[1]) (label_embedding)
inputs = multiply([feature_data, label_dense])
main_disc = Dense(math.ceil((1/2)*dataset.shape[1]),
kernel_initializer = init) (inputs)
main_disc = BatchNormalization() (main_disc)
main_disc = Activation("tanh") (main_disc)
main_disc = Dense(math.ceil((1/4)*dataset.shape[1]),
kernel_initializer = init) (main_disc)
main_disc = BatchNormalization() (main_disc)
main_disc = Activation("tanh") (main_disc)
main_disc = Dropout(0.4) (main_disc)
disc_out = Dense(1, activation = "linear") (main_disc)
discrim = Model([feature_data, label_data], disc_out)
opt = RMSprop(lr = 0.00005)
discrim.compile(loss = wasserstein_loss, optimizer = opt, metrics = ["accuracy"])
return discrim
# Define generator
def define_generator(dataset, latent_dim, key):
init = RandomNormal(stddev = 0.7)
noise = Input(shape = (latent_dim,))
label = Input(shape = (1,))
label_embedding = Flatten()(Embedding(key.shape[0],
math.ceil((1/4)*dataset.shape[1]))
(label))
label_dense = Dense(latent_dim) (label_embedding)
inputs = multiply([noise, label_dense])
main_gen = Dense(math.ceil((1/4)*dataset.shape[1]),
kernel_initializer = init) (inputs)
main_gen = BatchNormalization() (main_gen)
main_gen = Activation("tanh") (main_gen)
main_gen = Dense(math.ceil((1/2)*dataset.shape[1]),
kernel_initializer = init) (main_gen)
main_gen = BatchNormalization() (main_gen)
main_gen = Activation("tanh") (main_gen)
main_gen = Dense((dataset.shape[1]+math.ceil((1/4)*dataset.shape[1])),
kernel_initializer = init) (main_gen)
main_gen = BatchNormalization() (main_gen)
main_gen = Activation("tanh") (main_gen)
gen_out = Dense(dataset.shape[1], activation = "tanh") (main_gen)
gen = Model([noise, label], gen_out)
return gen
# Define GAN
def define_gan(generator, critic, latent_dim):
noise = Input(shape = (latent_dim,))
label = Input(shape = (1,))
features = generator([noise, label])
critic_valid = critic([features, label])
critic.trainable = False
gan_model = Model([noise, label], critic_valid)
opt = RMSprop(lr = 0.000005)
gan_model.compile(loss = wasserstein_loss, optimizer = opt, metrics = ["accuracy"])
return gan_model
# Define functions for generation of real and fake samples
def generate_real_samples(dataset, n_samples, y_values):
ix = randint(0, dataset.shape[0], n_samples)
x = dataset[ix]
labels = y_values[ix]
y = -ones((n_samples, 1))
return [x, labels], y
def generate_fake_samples(generator, latent_dim, n, key):
x_input, labels_input = generate_latent_points(latent_dim, n, key)
x = generator.predict([x_input, labels_input])
y = ones((n, 1))
return [x, labels_input], y
# Define functions for generation of latent point inputs
def generate_latent_points(latent_dim, n, key):
x_input = rand(latent_dim * n)
x_input = x_input.reshape(n, latent_dim)
labels = randint(0, key.shape[0], n)
return [x_input, labels]
# Define function to summarize, save and checkpoint GAN performance
def summarize_performance(step, generator):
h5_name = "generator_model_e%03d.h5" % (step + 1)
generator.save(h5_name)
# Define function to plot training history
def plot_history(d1_hist, d2_hist, g_hist):
plt.plot(d1_hist, label = "critic-real")
plt.plot(d2_hist, label = "critic-fake")
plt.plot(g_hist, label = "gen")
plt.legend()
plt.savefig("plot_line_plot_loss.png")
# Define final function for training
def wass_train(g_model, c_model, gan_model, dataset, latent_dim, key, y_values,
n_epoch, n_batch, n_critic = 5, n_eval = 100):
bat_per_epo = int(dataset.shape[0]/n_batch)
n_steps = bat_per_epo * n_epoch
half_batch = int(n_batch/2)
c1_hist, c2_hist, g_hist = list(), list(), list()
for i in range(n_steps):
c1_tmp, c2_tmp = list(), list()
for _ in range(n_critic):
[x_real, labels_real], y_real = generate_real_samples(dataset,
half_batch,
y_values)
c_loss1, _ = c_model.train_on_batch([x_real, labels_real],
y_real)
c1_tmp.append(c_loss1)
[x_fake, dif_labels], y_fake = generate_fake_samples(g_model,
latent_dim,
half_batch,
key)
c_loss2, _ = c_model.train_on_batch([x_fake, dif_labels],
y_fake)
c2_tmp.append(c_loss2)
c1_hist.append(np.mean(c1_tmp))
c2_hist.append(np.mean(c2_tmp))
[x_gan, labels_input] = generate_latent_points(latent_dim,
n_batch, key)
y_gan = -ones((n_batch, 1))
g_loss = gan_model.train_on_batch([x_gan, labels_input], y_gan)
g_hist.append(g_loss)
if (i+1) % n_eval == 0:
summarize_performance(i, g_model)
plot_history(c1_hist, c2_hist, g_hist)
# Load data to augment
# Note the first column of the txt file must contain the Sample label.
# No prior scaling is required, the scaling procedure has been included within the present code
dataset = pd.read_csv("data.txt", header = None)
X = pd.DataFrame.to_numpy(dataset.loc[:,1:dataset.shape[1]], dtype = "float")
X_std = (X - X.min(axis = 0)) / (X.max(axis = 0) - X.min(axis = 0))
X = X_std * (1 - -1) + -1
y = dataset.loc[:,0].astype("category")
# Create and a key with categorical variable values once encoded
key = pd.DataFrame(index = range(y.cat.categories.size),
columns = ["key_value", "sample"])
for i in range(y.cat.categories.size):
key.loc[i, "key_value"] = i
key.loc[i, "sample"] = y.cat.categories[i]
y_values = pd.DataFrame(index = range(y.shape[0]), columns = range(1))
for i in range(y.shape[0]):
for i2 in range(key.shape[0]):
if y[i] == key.loc[i2, "sample"]:
y_values.loc[i] = key.loc[i2, "key_value"]
y_values = pd.DataFrame.to_numpy(y_values, dtype = "float")
# Print the key
key
# Set hyperparameters
n_epochs = 400
n_batch = 16
latent_dim = 50
# Train GAN to augment data
c_model = define_critic_gp(X)
g_model = define_generator(X, latent_dim, key)
gan_model = define_gan(g_model, c_model, latent_dim)
start_time = time.time()
wass_train(g_model, c_model, gan_model, X,
latent_dim = latent_dim, key = key,
y_values = y_values, n_epoch = n_epochs, n_batch = n_batch)
end_time = time.time()
print("train_discriminator time: %s" % (end_time - start_time))
| [
"numpy.random.seed",
"tensorflow.keras.layers.multiply",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"numpy.ones",
"matplotlib.pyplot.style.use",
"numpy.random.randint",
"numpy.mean",
"tensorflow.keras.optimizers.RMSprop",
"os.chdir",
"pandas.DataFrame.to_numpy",
"tensorflow.keras.laye... | [((74, 93), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (82, 93), False, 'import os\n'), ((1385, 1411), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (1399, 1411), True, 'import numpy as np\n'), ((1548, 1571), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1561, 1571), True, 'from matplotlib import pyplot as plt\n'), ((8004, 8040), 'pandas.read_csv', 'pd.read_csv', (['"""data.txt"""'], {'header': 'None'}), "('data.txt', header=None)\n", (8015, 8040), True, 'import pandas as pd\n'), ((8048, 8120), 'pandas.DataFrame.to_numpy', 'pd.DataFrame.to_numpy', (['dataset.loc[:, 1:dataset.shape[1]]'], {'dtype': '"""float"""'}), "(dataset.loc[:, 1:dataset.shape[1]], dtype='float')\n", (8069, 8120), True, 'import pandas as pd\n'), ((8822, 8868), 'pandas.DataFrame.to_numpy', 'pd.DataFrame.to_numpy', (['y_values'], {'dtype': '"""float"""'}), "(y_values, dtype='float')\n", (8843, 8868), True, 'import pandas as pd\n'), ((9151, 9162), 'time.time', 'time.time', ([], {}), '()\n', (9160, 9162), False, 'import time\n'), ((9339, 9350), 'time.time', 'time.time', ([], {}), '()\n', (9348, 9350), False, 'import time\n'), ((1682, 1711), 'tensorflow.keras.backend.mean', 'backend.mean', (['(y_true * y_pred)'], {}), '(y_true * y_pred)\n', (1694, 1711), False, 'from tensorflow.keras import backend\n'), ((1802, 1826), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1814, 1826), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((1855, 1887), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(dataset.shape[1],)'}), '(shape=(dataset.shape[1],))\n', (1860, 1887), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((1908, 1925), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (1913, 1925), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((2197, 2234), 'tensorflow.keras.layers.multiply', 'multiply', (['[feature_data, label_dense]'], {}), '([feature_data, label_dense])\n', (2205, 2234), False, 'from tensorflow.keras.layers import LeakyReLU, Dropout, Embedding, multiply\n'), ((2795, 2838), 'tensorflow.keras.models.Model', 'Model', (['[feature_data, label_data]', 'disc_out'], {}), '([feature_data, label_data], disc_out)\n', (2800, 2838), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((2856, 2873), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(5e-05)'}), '(lr=5e-05)\n', (2863, 2873), False, 'from tensorflow.keras.optimizers import Adam, RMSprop\n'), ((3076, 3100), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.7)'}), '(stddev=0.7)\n', (3088, 3100), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((3122, 3148), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)'}), '(shape=(latent_dim,))\n', (3127, 3148), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((3164, 3181), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (3169, 3181), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((3442, 3472), 'tensorflow.keras.layers.multiply', 'multiply', (['[noise, label_dense]'], {}), '([noise, label_dense])\n', (3450, 3472), False, 'from tensorflow.keras.layers import LeakyReLU, Dropout, Embedding, multiply\n'), ((4212, 4242), 'tensorflow.keras.models.Model', 'Model', (['[noise, label]', 'gen_out'], {}), '([noise, label], gen_out)\n', (4217, 4242), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((4338, 4364), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)'}), '(shape=(latent_dim,))\n', (4343, 4364), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((4380, 4397), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (4385, 4397), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((4535, 4570), 'tensorflow.keras.models.Model', 'Model', (['[noise, label]', 'critic_valid'], {}), '([noise, label], critic_valid)\n', (4540, 4570), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((4582, 4599), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(5e-06)'}), '(lr=5e-06)\n', (4589, 4599), False, 'from tensorflow.keras.optimizers import Adam, RMSprop\n'), ((4848, 4887), 'numpy.random.randint', 'randint', (['(0)', 'dataset.shape[0]', 'n_samples'], {}), '(0, dataset.shape[0], n_samples)\n', (4855, 4887), False, 'from numpy.random import randint, rand, randn, random, choice\n'), ((5186, 5198), 'numpy.ones', 'ones', (['(n, 1)'], {}), '((n, 1))\n', (5190, 5198), False, 'from numpy import ones, zeros, vstack\n'), ((5358, 5378), 'numpy.random.rand', 'rand', (['(latent_dim * n)'], {}), '(latent_dim * n)\n', (5362, 5378), False, 'from numpy.random import randint, rand, randn, random, choice\n'), ((5439, 5466), 'numpy.random.randint', 'randint', (['(0)', 'key.shape[0]', 'n'], {}), '(0, key.shape[0], n)\n', (5446, 5466), False, 'from numpy.random import randint, rand, randn, random, choice\n'), ((5797, 5835), 'matplotlib.pyplot.plot', 'plt.plot', (['d1_hist'], {'label': '"""critic-real"""'}), "(d1_hist, label='critic-real')\n", (5805, 5835), True, 'from matplotlib import pyplot as plt\n'), ((5843, 5881), 'matplotlib.pyplot.plot', 'plt.plot', (['d2_hist'], {'label': '"""critic-fake"""'}), "(d2_hist, label='critic-fake')\n", (5851, 5881), True, 'from matplotlib import pyplot as plt\n'), ((5889, 5918), 'matplotlib.pyplot.plot', 'plt.plot', (['g_hist'], {'label': '"""gen"""'}), "(g_hist, label='gen')\n", (5897, 5918), True, 'from matplotlib import pyplot as plt\n'), ((5926, 5938), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5936, 5938), True, 'from matplotlib import pyplot as plt\n'), ((5944, 5982), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_line_plot_loss.png"""'], {}), "('plot_line_plot_loss.png')\n", (5955, 5982), True, 'from matplotlib import pyplot as plt\n'), ((1957, 1966), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1964, 1966), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((2135, 2158), 'tensorflow.keras.layers.Dense', 'Dense', (['dataset.shape[1]'], {}), '(dataset.shape[1])\n', (2140, 2158), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((2374, 2394), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2392, 2394), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((2424, 2442), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (2434, 2442), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((2591, 2611), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2609, 2611), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((2641, 2659), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (2651, 2659), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((2689, 2701), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2696, 2701), False, 'from tensorflow.keras.layers import LeakyReLU, Dropout, Embedding, multiply\n'), ((2730, 2759), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (2735, 2759), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((3213, 3222), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3220, 3222), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((3386, 3403), 'tensorflow.keras.layers.Dense', 'Dense', (['latent_dim'], {}), '(latent_dim)\n', (3391, 3403), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((3610, 3630), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3628, 3630), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((3658, 3676), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (3668, 3676), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((3821, 3841), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3839, 3841), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((3869, 3887), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (3879, 3887), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((4052, 4072), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4070, 4072), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((4100, 4118), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (4110, 4118), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, Flatten\n'), ((4145, 4187), 'tensorflow.keras.layers.Dense', 'Dense', (['dataset.shape[1]'], {'activation': '"""tanh"""'}), "(dataset.shape[1], activation='tanh')\n", (4150, 4187), False, 'from tensorflow.keras.layers import Dense, Reshape, Input, Concatenate\n'), ((4946, 4966), 'numpy.ones', 'ones', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (4950, 4966), False, 'from numpy import ones, zeros, vstack\n'), ((2264, 2299), 'math.ceil', 'math.ceil', (['(1 / 2 * dataset.shape[1])'], {}), '(1 / 2 * dataset.shape[1])\n', (2273, 2299), False, 'import math\n'), ((2478, 2513), 'math.ceil', 'math.ceil', (['(1 / 4 * dataset.shape[1])'], {}), '(1 / 4 * dataset.shape[1])\n', (2487, 2513), False, 'import math\n'), ((3501, 3536), 'math.ceil', 'math.ceil', (['(1 / 4 * dataset.shape[1])'], {}), '(1 / 4 * dataset.shape[1])\n', (3510, 3536), False, 'import math\n'), ((3710, 3745), 'math.ceil', 'math.ceil', (['(1 / 2 * dataset.shape[1])'], {}), '(1 / 2 * dataset.shape[1])\n', (3719, 3745), False, 'import math\n'), ((7335, 7350), 'numpy.mean', 'np.mean', (['c1_tmp'], {}), '(c1_tmp)\n', (7342, 7350), True, 'import numpy as np\n'), ((7376, 7391), 'numpy.mean', 'np.mean', (['c2_tmp'], {}), '(c2_tmp)\n', (7383, 7391), True, 'import numpy as np\n'), ((7549, 7567), 'numpy.ones', 'ones', (['(n_batch, 1)'], {}), '((n_batch, 1))\n', (7553, 7567), False, 'from numpy import ones, zeros, vstack\n'), ((2034, 2069), 'math.ceil', 'math.ceil', (['(1 / 4 * dataset.shape[1])'], {}), '(1 / 4 * dataset.shape[1])\n', (2043, 2069), False, 'import math\n'), ((3290, 3325), 'math.ceil', 'math.ceil', (['(1 / 4 * dataset.shape[1])'], {}), '(1 / 4 * dataset.shape[1])\n', (3299, 3325), False, 'import math\n'), ((3939, 3974), 'math.ceil', 'math.ceil', (['(1 / 4 * dataset.shape[1])'], {}), '(1 / 4 * dataset.shape[1])\n', (3948, 3974), False, 'import math\n')] |
import os
import sys
import numpy as np
from PIL import Image
height_trim = 16
width_trim = 20
def trim(image):
arr = np.asarray(image).tolist()
arr = arr[height_trim:-height_trim]
if len(arr[0]) == 215:
arr = [row[(width_trim-1):-width_trim] for row in arr]
else:
arr = [row[width_trim:-width_trim] for row in arr]
arr = np.asarray(arr)
image_new = Image.fromarray(arr.astype(np.uint8))
return image_new
def main():
dir_name = sys.argv[1]
for i, file_name in enumerate(os.listdir('./' + dir_name)):
path = './' + dir_name + '/' + file_name
print(path)
image = Image.open(path)
image_new = trim(image)
image.close()
image_new.save(path)
if __name__ == '__main__':
main()
| [
"numpy.asarray",
"os.listdir",
"PIL.Image.open"
] | [((360, 375), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (370, 375), True, 'import numpy as np\n'), ((526, 553), 'os.listdir', 'os.listdir', (["('./' + dir_name)"], {}), "('./' + dir_name)\n", (536, 553), False, 'import os\n'), ((641, 657), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (651, 657), False, 'from PIL import Image\n'), ((124, 141), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (134, 141), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""
Module with utility functions to the nested sampling for parameter estimation.
"""
__author__ = '<NAME>'
__all__ = ['un_burning']
import numpy as np
def un_burning(res, logger=None):
"""
Automatic burning of UltraNest chain based on cumulated sum of weights
(as implemented in UltraNest's cornerplot).
Note: this function is necessary to be able to make corner plots showing
units after best estimates, as ultranest's cornerplots does not feature
that option and does burning+corner plot together.
Parameters
----------
res: UltraNest result object
The UltraNest result.
Returns
-------
burned_res: tuple of 2 numpy nd array
The burned UltraNest chain and associated weights
"""
paramnames = res['paramnames']
data = np.array(res['weighted_samples']['points'])
weights = np.array(res['weighted_samples']['weights'])
cumsumweights = np.cumsum(weights)
mask = cumsumweights > 1e-4
if mask.sum() == 1:
if logger is not None:
warn = 'Posterior is still concentrated in a single point:'
for i, p in enumerate(paramnames):
v = res['samples'][mask,i]
warn += "\n" + ' %-20s: %s' % (p, v)
logger.warning(warn)
logger.info('Try running longer.')
return
burned_res = (data[mask,:], weights[mask])
return burned_res | [
"numpy.cumsum",
"numpy.array"
] | [((851, 894), 'numpy.array', 'np.array', (["res['weighted_samples']['points']"], {}), "(res['weighted_samples']['points'])\n", (859, 894), True, 'import numpy as np\n'), ((909, 953), 'numpy.array', 'np.array', (["res['weighted_samples']['weights']"], {}), "(res['weighted_samples']['weights'])\n", (917, 953), True, 'import numpy as np\n'), ((974, 992), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (983, 992), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
la = np.linalg
words = ['I', 'like', 'enjoy', 'deep', 'learning', 'NLP', 'flying', '.']
X = np.array([[0,2,1,0,0,0,0,0],
[2,0,0,1,0,1,0,0],
[1,0,0,0,0,0,1,0],
[0,1,0,0,1,0,0,0],
[0,0,0,1,0,0,0,1],
[0,1,0,0,0,0,0,1],
[0,0,1,0,0,0,0,1],
[0,0,0,0,1,1,1,0]])
#U for left singular vector, s for diagonal
U, s, Vh = la.svd(X, full_matrices=False)
for i in range(len(words)):
plt.text(U[i,0], U[i,1], words[i])
plt.show()
| [
"matplotlib.pyplot.text",
"numpy.array",
"matplotlib.pyplot.show"
] | [((144, 371), 'numpy.array', 'np.array', (['[[0, 2, 1, 0, 0, 0, 0, 0], [2, 0, 0, 1, 0, 1, 0, 0], [1, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 0,\n 0, 0, 1], [0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 1, 1, 0]]'], {}), '([[0, 2, 1, 0, 0, 0, 0, 0], [2, 0, 0, 1, 0, 1, 0, 0], [1, 0, 0, 0, \n 0, 0, 1, 0], [0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 1], [0, 1,\n 0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 1, 1, 0]])\n', (152, 371), True, 'import numpy as np\n'), ((560, 570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (568, 570), True, 'import matplotlib.pyplot as plt\n'), ((525, 561), 'matplotlib.pyplot.text', 'plt.text', (['U[i, 0]', 'U[i, 1]', 'words[i]'], {}), '(U[i, 0], U[i, 1], words[i])\n', (533, 561), True, 'import matplotlib.pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.