code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import matplotlib.pyplot as plt
import sys
def display_image(img):
plt.clf()
img = np.transpose(img, (2, 0, 1))
b, g, r = img[0, ...], img[1, ...], img[2, ...]
rgb = np.asarray([r, g, b])
transp = np.transpose(rgb, (1, 2, 0))
plt.imshow(transp)
plt.show()
if __name__ == "__main__":
folder = "different_targets"
npy_file = "{}/train.large.input.npy".format(folder)
assert len(sys.argv) > 1, "Indicate an index!"
img = np.load(npy_file)[int(sys.argv[1])]
print(img.shape)
display_image(img)
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.clf",
"numpy.asarray",
"numpy.load",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((92, 101), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (99, 101), True, 'import matplotlib.pyplot as plt\n'), ((112, 140), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (124, 140), True, 'import numpy as np\n'), ((203, 224), 'numpy.asarray', 'np.asarray', (['[r, g, b]'], {}), '([r, g, b])\n', (213, 224), True, 'import numpy as np\n'), ((238, 266), 'numpy.transpose', 'np.transpose', (['rgb', '(1, 2, 0)'], {}), '(rgb, (1, 2, 0))\n', (250, 266), True, 'import numpy as np\n'), ((271, 289), 'matplotlib.pyplot.imshow', 'plt.imshow', (['transp'], {}), '(transp)\n', (281, 289), True, 'import matplotlib.pyplot as plt\n'), ((294, 304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (302, 304), True, 'import matplotlib.pyplot as plt\n'), ((488, 505), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (495, 505), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import cv2
import time
import dataset
from PIL import Image, ImageDraw
from keras.models import load_model
args = argparse.ArgumentParser()
args.add_argument("model_path")
args = args.parse_args()
model = load_model(args.model_path)
print(model.inputs)
dim = int(model.input.shape[1])
# dim = 160
def draw_landmarks(image, landmarks, r=1, fill_color=(255,0,0,100)):
draw = ImageDraw.Draw(image)
for row in landmarks:
x, y = row
draw.ellipse((x-r, y-r, x+r, y+r), fill=fill_color)
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_alt.xml')
while(True):
#_, img = cap.read()
img = cv2.imread(r"C:\Users\admin\Desktop\random\fn059t2afunaff001.png",cv2.IMREAD_COLOR)
print(img.shape)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
faces = face_cascade.detectMultiScale(img_gray, 1.3, 5)
for (x,y,w,h) in faces:
img = Image.fromarray(img[y:y+h, x:x+w])
dp = {
'image': img,
'landmarks': np.zeros((66,2))
}
dp = dataset.resize_mirror_datapoint(dp, dim, False)
img = dp['image']
landmark = np.array([[
[0,0],
[50,50],
[100,50]
]])
landmark = model.predict(np.array(img.convert("L"))[None, :, :, None] / 255.0)
landmark = np.squeeze(landmark, axis=0)
h, w = img.size
img = img.resize((2*h, 2*w)).convert("L")
landmark *= 2
draw_landmarks(img, landmark, r=2)
# Convert RGB to BGR
img = np.array(img.convert("RGB"))
img = img[:, :, ::-1].copy()
cv2.imshow('frame', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| [
"PIL.Image.fromarray",
"keras.models.load_model",
"argparse.ArgumentParser",
"dataset.resize_mirror_datapoint",
"numpy.squeeze",
"cv2.imshow",
"numpy.array",
"PIL.ImageDraw.Draw",
"numpy.zeros",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.imread"
] | [((150, 175), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (173, 175), False, 'import argparse\n'), ((242, 269), 'keras.models.load_model', 'load_model', (['args.model_path'], {}), '(args.model_path)\n', (252, 269), False, 'from keras.models import load_model\n'), ((549, 568), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (565, 568), False, 'import cv2\n'), ((617, 675), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haarcascade_frontalface_alt.xml"""'], {}), "('./haarcascade_frontalface_alt.xml')\n", (638, 675), False, 'import cv2\n'), ((415, 436), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (429, 436), False, 'from PIL import Image, ImageDraw\n'), ((727, 820), 'cv2.imread', 'cv2.imread', (['"""C:\\\\Users\\\\admin\\\\Desktop\\\\random\\\\fn059t2afunaff001.png"""', 'cv2.IMREAD_COLOR'], {}), "('C:\\\\Users\\\\admin\\\\Desktop\\\\random\\\\fn059t2afunaff001.png', cv2.\n IMREAD_COLOR)\n", (737, 820), False, 'import cv2\n'), ((895, 932), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (907, 932), False, 'import cv2\n'), ((1035, 1073), 'PIL.Image.fromarray', 'Image.fromarray', (['img[y:y + h, x:x + w]'], {}), '(img[y:y + h, x:x + w])\n', (1050, 1073), False, 'from PIL import Image, ImageDraw\n'), ((1177, 1224), 'dataset.resize_mirror_datapoint', 'dataset.resize_mirror_datapoint', (['dp', 'dim', '(False)'], {}), '(dp, dim, False)\n', (1208, 1224), False, 'import dataset\n'), ((1271, 1312), 'numpy.array', 'np.array', (['[[[0, 0], [50, 50], [100, 50]]]'], {}), '([[[0, 0], [50, 50], [100, 50]]])\n', (1279, 1312), True, 'import numpy as np\n'), ((1462, 1490), 'numpy.squeeze', 'np.squeeze', (['landmark'], {'axis': '(0)'}), '(landmark, axis=0)\n', (1472, 1490), True, 'import numpy as np\n'), ((1776, 1800), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'img'], {}), "('frame', img)\n", (1786, 1800), False, 'import cv2\n'), ((1137, 1154), 'numpy.zeros', 'np.zeros', (['(66, 2)'], {}), '((66, 2))\n', (1145, 1154), True, 'import numpy as np\n'), ((1812, 1826), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1823, 1826), False, 'import cv2\n')] |
'''
# This is an 80 character line #
Purpose: read data from multiple text files and compare (plot) in one convenient
image :) Wow, go you
'''
import sys
import numpy as np
import matplotlib.pyplot as plt
# Use this to back-calculate time in units of Brownian tau
kT = 1.0 # temperature
threeEtaPiSigma = 1.0 # drag coefficient
sigma = 1.0 # particle diameter
D_t = kT / threeEtaPiSigma # translational diffusion constant
D_r = (3.0 * D_t) / (sigma**2) # rotational diffusion constant
tauBrown = (sigma**2) / D_t # brownian time scale (invariant)
# Define what functions you'll need here
def getFromTxt(fname, first, last):
"Takes a string, text before and after desired text, outs text between"
start = fname.index( first ) + len( first )
end = fname.index( last, start )
myTxt = fname[start:end]
return float(myTxt)
# Above function kindly provided by user "cji" on stackoverflow
#https://stackoverflow.com/questions/3368969/find-string-between-two-substrings
def computeVel(activity):
"Given particle activity, output intrinsic swim speed"
velocity = (activity * sigma) / (3 * (1/D_r))
return velocity
def computeActiveForce(velocity):
"Given particle activity, output repulsion well depth"
activeForce = velocity * threeEtaPiSigma
return activeForce
def computeEps(activeForce):
"Given particle activity, output repulsion well depth"
epsilon = activeForce * sigma / 24.0
return epsilon
def computeTauLJ(epsilon):
"Given epsilon, compute lennard-jones time unit"
tauLJ = ((sigma**2) * threeEtaPiSigma) / epsilon
return tauLJ
# Grab the textfiles to run on
txtFiles = sys.argv[2:] # pass starting at 2 to avoid script path
# Parse out the activities and fractions from the filenames
peA = np.zeros_like(txtFiles, dtype=np.float64)
peB = np.zeros_like(txtFiles, dtype=np.float64)
xA = np.zeros_like(txtFiles, dtype=np.float64)
# This grabs the parameters of each text file
for i in range(0, len(txtFiles)):
peA[i] = getFromTxt(txtFiles[i], "pa", "_pb")
peB[i] = getFromTxt(txtFiles[i], "pb", "_xa")
xA[i] = getFromTxt(txtFiles[i], "xa", ".txt")
partFracA = xA/100.0
# Initialize the arrays so that all text files fit in each one
tsteps = np.zeros(len(txtFiles), dtype=np.ndarray)
gasA = np.zeros(len(txtFiles), dtype=np.ndarray)
gasB = np.zeros(len(txtFiles), dtype=np.ndarray)
gasAll = np.zeros(len(txtFiles), dtype=np.ndarray)
denseA = np.zeros(len(txtFiles), dtype=np.ndarray)
denseB = np.zeros(len(txtFiles), dtype=np.ndarray)
denseAll = np.zeros(len(txtFiles), dtype=np.ndarray)
lgClust = np.zeros(len(txtFiles), dtype=np.ndarray)
dpDensity = np.zeros(len(txtFiles), dtype=np.ndarray)
gpDensity = np.zeros(len(txtFiles), dtype=np.ndarray)
dpArea = np.zeros(len(txtFiles), dtype=np.ndarray)
gpArea = np.zeros(len(txtFiles), dtype=np.ndarray)
dt = np.zeros(len(txtFiles), dtype=np.float64)
# It is absolutley archaic that I have to initialize these like this ^
# Pull data from text files
for i in range(0, len(txtFiles)):
tsteps[i],\
gasA[i],\
gasB[i],\
gasAll[i],\
denseA[i],\
denseB[i],\
denseAll[i],\
lgClust[i],\
dpDensity[i],\
gpDensity[i],\
dpArea[i],\
gpArea[i] = np.loadtxt(txtFiles[i], skiprows=1, unpack=True)
partNum = gasAll[i][0]
gasA[i] /= partNum * partFracA[i]
gasB[i] /= partNum * (1-partFracA[i])
gasAll[i] /= partNum
denseA[i] /= partNum * partFracA[i]
denseB[i] /= partNum * (1-partFracA[i])
denseAll[i] /= partNum
lgClust[i] /= partNum
# Compute Brownian time
for i in range(0, len(txtFiles)):
# Compute parameters from activities
if peA[i] != 0: # A particles are NOT Brownian
vA = computeVel(peA[i])
FpA = computeActiveForce(vA)
epsA = computeEps(FpA)
tauA = computeTauLJ(epsA)
else: # A particles are Brownian
vA = 0.0
FpA = 0.0
epsA = kT
tauA = computeTauLJ(epsA)
if peB[i] != 0: # B particles are NOT Brownian
vB = computeVel(peB[i])
FpB = computeActiveForce(vB)
epsB = computeEps(FpB)
tauB = computeTauLJ(epsB)
else: # B particles are Brownian
vB = 0.0
FpB = 0.0
epsB = kT
tauB = computeTauLJ(epsB)
# Get adjusted time units
tauLJ = (tauA if (tauA <= tauB) else tauB) # use the smaller tauLJ
ratio = tauLJ / tauBrown # get rid of LJ units
dt[i] = ratio * 0.00001 # tstep size
tsteps[i] *= dt[i]
# Now plot everything
# Gas phase
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], gasA[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Fraction in Gas Phase')
plt.ylim(0,1)
plt.savefig('gasPhaseA.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], gasB[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Fraction in Gas Phase')
plt.ylim(0,1)
plt.savefig('gasPhaseB.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], gasAll[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Fraction in Gas Phase')
plt.ylim(0,1)
plt.savefig('gasPhaseAll.png', dpi=1000)
plt.close()
# Dense phase
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], denseA[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Fraction in Dense Phase')
plt.ylim(0,1)
plt.savefig('densePhaseA.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], denseB[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Fraction in Dense Phase')
plt.ylim(0,1)
plt.savefig('densePhaseB.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], denseAll[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Fraction in Dense Phase')
plt.ylim(0,1)
plt.savefig('densePhaseAll.png', dpi=1000)
plt.close()
# Largest Cluster
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], lgClust[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Fraction in Largest Cluster')
plt.ylim(0,1)
plt.savefig('largestCluster.png', dpi=1000)
plt.close()
# Cluster Density
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], gpDensity[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Gas Phase Density')
#plt.ylim(0,1)
plt.savefig('gpDensity.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], dpDensity[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Dense Phase Density')
#plt.ylim(0,1)
plt.savefig('dpDensity.png', dpi=1000)
plt.close()
# Cluster Area
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], gpArea[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Gas Phase Area')
#plt.ylim(0,1)
plt.savefig('gpArea.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
plt.plot(tsteps[i], dpArea[i], label=str(peA[i]))
plt.legend()
plt.xlabel(r'Brownian Time $(\tau_{Brownian})$')
plt.ylabel('Dense Phase Area')
#plt.ylim(0,1)
plt.savefig('dpArea.png', dpi=1000)
plt.close()
################################################################################
# Steady state values
################################################################################
ssGasA = np.zeros((len(txtFiles)), dtype=np.float64)
ssGasB = np.zeros((len(txtFiles)), dtype=np.float64)
ssGasAll = np.zeros((len(txtFiles)), dtype=np.float64)
ssDenseA = np.zeros((len(txtFiles)), dtype=np.float64)
ssDenseB = np.zeros((len(txtFiles)), dtype=np.float64)
ssDenseAll = np.zeros((len(txtFiles)), dtype=np.float64)
ssLgClust = np.zeros((len(txtFiles)), dtype=np.float64)
ssDPDensity = np.zeros((len(txtFiles)), dtype=np.float64)
ssGPDensity = np.zeros((len(txtFiles)), dtype=np.float64)
ssDPArea = np.zeros((len(txtFiles)), dtype=np.float64)
ssGPArea = np.zeros((len(txtFiles)), dtype=np.float64)
# Get steady state values (last 100 dumps)
for i in range(0, len(txtFiles)):
ssGasA[i] = np.mean(gasA[i][-100:-1])
ssGasB[i] = np.mean(gasB[i][-100:-1])
ssGasAll[i] = np.mean(gasAll[i][-100:-1])
ssDenseA[i] = np.mean(denseA[i][-100:-1])
ssDenseB[i] = np.mean(denseB[i][-100:-1])
ssDenseAll[i] = np.mean(denseAll[i][-100:-1])
ssLgClust[i] = np.mean(lgClust[i][-100:-1])
ssDPDensity[i] = np.mean(dpDensity[i][-100:-1])
ssGPDensity[i] = np.mean(gpDensity[i][-100:-1])
ssDPArea[i] = np.mean(dpArea[i][-100:-1])
ssGPArea[i] = np.mean(gpArea[i][-100:-1])
# Gas Phase #####################
# What is varying? (xA or PeA)
#if xA[0] == xA[1] == 100:
# plt.scatter(peA, ssGasA, c=peA)
# plt.xlabel(r'Activity')
#elif xA[0] == xA[1] == 0:
# plt.scatter(peB, ssGasA, c=peB)
# plt.xlabel(r'Activity')
#else:
# ratio = np.zeros_like(peA)
# ratio = peA / peB
# plt.scatter(ratio, ssGasA, label=str(ratio), c=ratio)
# plt.xlabel(r'Activity Ratio')
#
#plt.ylabel('Steady-State Fraction of A-Particles in Gas')
#plt.legend()
##plt.ylim(0,1)
#plt.savefig('SteadyState_gasA.png', dpi=1000)
#plt.close()
for i in range(0, len(txtFiles)):
# All monodisperse B, use activity
if xA[i] == xA[i-1] == 0:
plt.scatter(peB[i], ssGasA[i], c='k')
plt.xlabel(r'Activity')
# All monodisperse A, use activity
elif xA[i] == xA[i-1] == 100:
plt.scatter(peA[i], ssGasA[i], c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssGasA[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, varying peA
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssGasA[i], c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Fraction of A-Particles in Gas')
plt.savefig('SteadyState_gasA.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssGasB[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssGasB[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssGasB[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssGasB[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Fraction of B-Particles in Gas')
plt.savefig('SteadyState_gasB.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssGasAll[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssGasAll[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssGasAll[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssGasAll[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Fraction of All Particles in Gas')
plt.savefig('SteadyState_gasAll.png', dpi=1000)
plt.close()
# Dense Phase ###################
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssDenseA[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssDenseA[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssDenseA[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssDenseA[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Fraction of A-Particles in Dense Phase')
plt.savefig('SteadyState_denseA.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssDenseB[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssDenseB[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssDenseB[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssDenseB[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Fraction of B-Particles in Dense Phase')
plt.savefig('SteadyState_denseB.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssDenseAll[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssDenseAll[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssDenseAll[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssDenseAll[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Fraction of All Particles in Dense Phase')
plt.savefig('SteadyState_denseAll.png', dpi=1000)
plt.close()
# Largest Cluster ###############
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssLgClust[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssLgClust[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssLgClust[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssLgClust[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Largest Cluster Size')
plt.savefig('SteadyState_dpDensity.png', dpi=1000)
plt.close()
# Cluster Density ###############
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssDPDensity[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssDPDensity[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssDPDensity[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssDPDensity[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Dense Phase Density')
plt.savefig('SteadyState_dpDensity.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssGPDensity[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssGPDensity[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssGPDensity[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssGPDensity[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Gas Phase Density')
plt.savefig('SteadyState_gpDensity.png', dpi=1000)
plt.close()
# Cluster Area ##################
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssDPArea[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssDPArea[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssDPArea[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssDPArea[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Dense Phase Area')
plt.savefig('SteadyState_dpArea.png', dpi=1000)
plt.close()
for i in range(0, len(txtFiles)):
# Monodisperse B, use activity
if xA[i] == 0:
plt.scatter(peB[i], ssGPArea[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Monodisperse A, use activity
elif xA[i] == 100:
plt.scatter(peA[i], ssGPArea[i], label=str(peA[i]), c='k')
plt.xlabel(r'Activity')
# Binary, varying xA
elif xA[i] != xA[i-1]:
plt.scatter(xA[i], ssGPArea[i], c='k')
plt.xlabel(r'Particle Fraction $x_{A}$')
plt.xlim(0,1)
# Binary, use activity ratio
else:
ratio = float(peA[i] / peB[i])
plt.scatter(ratio, ssGPArea[i], label=str(ratio), c='k')
plt.xlabel(r'Activity Ratio')
plt.ylabel('Steady-State Gas Phase Area')
plt.savefig('SteadyState_gpArea.png', dpi=1000)
plt.close()
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.loadtxt",
"numpy.zeros_like",
"matplotlib.pyplot.legend"
] | [((1879, 1920), 'numpy.zeros_like', 'np.zeros_like', (['txtFiles'], {'dtype': 'np.float64'}), '(txtFiles, dtype=np.float64)\n', (1892, 1920), True, 'import numpy as np\n'), ((1927, 1968), 'numpy.zeros_like', 'np.zeros_like', (['txtFiles'], {'dtype': 'np.float64'}), '(txtFiles, dtype=np.float64)\n', (1940, 1968), True, 'import numpy as np\n'), ((1974, 2015), 'numpy.zeros_like', 'np.zeros_like', (['txtFiles'], {'dtype': 'np.float64'}), '(txtFiles, dtype=np.float64)\n', (1987, 2015), True, 'import numpy as np\n'), ((4845, 4857), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4855, 4857), True, 'import matplotlib.pyplot as plt\n'), ((4858, 4906), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (4868, 4906), True, 'import matplotlib.pyplot as plt\n'), ((4907, 4942), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction in Gas Phase"""'], {}), "('Fraction in Gas Phase')\n", (4917, 4942), True, 'import matplotlib.pyplot as plt\n'), ((4943, 4957), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4951, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4957, 4995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gasPhaseA.png"""'], {'dpi': '(1000)'}), "('gasPhaseA.png', dpi=1000)\n", (4968, 4995), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5007), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5005, 5007), True, 'import matplotlib.pyplot as plt\n'), ((5095, 5107), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5105, 5107), True, 'import matplotlib.pyplot as plt\n'), ((5108, 5156), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (5118, 5156), True, 'import matplotlib.pyplot as plt\n'), ((5157, 5192), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction in Gas Phase"""'], {}), "('Fraction in Gas Phase')\n", (5167, 5192), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5207), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5201, 5207), True, 'import matplotlib.pyplot as plt\n'), ((5207, 5245), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gasPhaseB.png"""'], {'dpi': '(1000)'}), "('gasPhaseB.png', dpi=1000)\n", (5218, 5245), True, 'import matplotlib.pyplot as plt\n'), ((5246, 5257), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5255, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5347, 5359), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5357, 5359), True, 'import matplotlib.pyplot as plt\n'), ((5360, 5408), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (5370, 5408), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5444), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction in Gas Phase"""'], {}), "('Fraction in Gas Phase')\n", (5419, 5444), True, 'import matplotlib.pyplot as plt\n'), ((5445, 5459), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5453, 5459), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5499), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gasPhaseAll.png"""'], {'dpi': '(1000)'}), "('gasPhaseAll.png', dpi=1000)\n", (5470, 5499), True, 'import matplotlib.pyplot as plt\n'), ((5500, 5511), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5509, 5511), True, 'import matplotlib.pyplot as plt\n'), ((5615, 5627), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5625, 5627), True, 'import matplotlib.pyplot as plt\n'), ((5628, 5676), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (5638, 5676), True, 'import matplotlib.pyplot as plt\n'), ((5677, 5714), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction in Dense Phase"""'], {}), "('Fraction in Dense Phase')\n", (5687, 5714), True, 'import matplotlib.pyplot as plt\n'), ((5715, 5729), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5723, 5729), True, 'import matplotlib.pyplot as plt\n'), ((5729, 5769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""densePhaseA.png"""'], {'dpi': '(1000)'}), "('densePhaseA.png', dpi=1000)\n", (5740, 5769), True, 'import matplotlib.pyplot as plt\n'), ((5770, 5781), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5779, 5781), True, 'import matplotlib.pyplot as plt\n'), ((5871, 5883), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5881, 5883), True, 'import matplotlib.pyplot as plt\n'), ((5884, 5932), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (5894, 5932), True, 'import matplotlib.pyplot as plt\n'), ((5933, 5970), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction in Dense Phase"""'], {}), "('Fraction in Dense Phase')\n", (5943, 5970), True, 'import matplotlib.pyplot as plt\n'), ((5971, 5985), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5979, 5985), True, 'import matplotlib.pyplot as plt\n'), ((5985, 6025), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""densePhaseB.png"""'], {'dpi': '(1000)'}), "('densePhaseB.png', dpi=1000)\n", (5996, 6025), True, 'import matplotlib.pyplot as plt\n'), ((6026, 6037), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6035, 6037), True, 'import matplotlib.pyplot as plt\n'), ((6129, 6141), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6139, 6141), True, 'import matplotlib.pyplot as plt\n'), ((6142, 6190), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (6152, 6190), True, 'import matplotlib.pyplot as plt\n'), ((6191, 6228), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction in Dense Phase"""'], {}), "('Fraction in Dense Phase')\n", (6201, 6228), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6243), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (6237, 6243), True, 'import matplotlib.pyplot as plt\n'), ((6243, 6285), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""densePhaseAll.png"""'], {'dpi': '(1000)'}), "('densePhaseAll.png', dpi=1000)\n", (6254, 6285), True, 'import matplotlib.pyplot as plt\n'), ((6286, 6297), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6295, 6297), True, 'import matplotlib.pyplot as plt\n'), ((6406, 6418), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6416, 6418), True, 'import matplotlib.pyplot as plt\n'), ((6419, 6467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (6429, 6467), True, 'import matplotlib.pyplot as plt\n'), ((6468, 6509), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction in Largest Cluster"""'], {}), "('Fraction in Largest Cluster')\n", (6478, 6509), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6524), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (6518, 6524), True, 'import matplotlib.pyplot as plt\n'), ((6524, 6567), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""largestCluster.png"""'], {'dpi': '(1000)'}), "('largestCluster.png', dpi=1000)\n", (6535, 6567), True, 'import matplotlib.pyplot as plt\n'), ((6568, 6579), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6577, 6579), True, 'import matplotlib.pyplot as plt\n'), ((6690, 6702), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6700, 6702), True, 'import matplotlib.pyplot as plt\n'), ((6703, 6751), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (6713, 6751), True, 'import matplotlib.pyplot as plt\n'), ((6752, 6783), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gas Phase Density"""'], {}), "('Gas Phase Density')\n", (6762, 6783), True, 'import matplotlib.pyplot as plt\n'), ((6799, 6837), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gpDensity.png"""'], {'dpi': '(1000)'}), "('gpDensity.png', dpi=1000)\n", (6810, 6837), True, 'import matplotlib.pyplot as plt\n'), ((6838, 6849), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6847, 6849), True, 'import matplotlib.pyplot as plt\n'), ((6942, 6954), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6952, 6954), True, 'import matplotlib.pyplot as plt\n'), ((6955, 7003), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (6965, 7003), True, 'import matplotlib.pyplot as plt\n'), ((7004, 7037), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dense Phase Density"""'], {}), "('Dense Phase Density')\n", (7014, 7037), True, 'import matplotlib.pyplot as plt\n'), ((7053, 7091), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dpDensity.png"""'], {'dpi': '(1000)'}), "('dpDensity.png', dpi=1000)\n", (7064, 7091), True, 'import matplotlib.pyplot as plt\n'), ((7092, 7103), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7101, 7103), True, 'import matplotlib.pyplot as plt\n'), ((7208, 7220), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7218, 7220), True, 'import matplotlib.pyplot as plt\n'), ((7221, 7269), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (7231, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7270, 7298), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gas Phase Area"""'], {}), "('Gas Phase Area')\n", (7280, 7298), True, 'import matplotlib.pyplot as plt\n'), ((7314, 7349), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gpArea.png"""'], {'dpi': '(1000)'}), "('gpArea.png', dpi=1000)\n", (7325, 7349), True, 'import matplotlib.pyplot as plt\n'), ((7350, 7361), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7359, 7361), True, 'import matplotlib.pyplot as plt\n'), ((7451, 7463), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7461, 7463), True, 'import matplotlib.pyplot as plt\n'), ((7464, 7512), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brownian Time $(\\\\tau_{Brownian})$"""'], {}), "('Brownian Time $(\\\\tau_{Brownian})$')\n", (7474, 7512), True, 'import matplotlib.pyplot as plt\n'), ((7513, 7543), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dense Phase Area"""'], {}), "('Dense Phase Area')\n", (7523, 7543), True, 'import matplotlib.pyplot as plt\n'), ((7559, 7594), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dpArea.png"""'], {'dpi': '(1000)'}), "('dpArea.png', dpi=1000)\n", (7570, 7594), True, 'import matplotlib.pyplot as plt\n'), ((7595, 7606), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7604, 7606), True, 'import matplotlib.pyplot as plt\n'), ((10216, 10273), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Fraction of A-Particles in Gas"""'], {}), "('Steady-State Fraction of A-Particles in Gas')\n", (10226, 10273), True, 'import matplotlib.pyplot as plt\n'), ((10274, 10319), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_gasA.png"""'], {'dpi': '(1000)'}), "('SteadyState_gasA.png', dpi=1000)\n", (10285, 10319), True, 'import matplotlib.pyplot as plt\n'), ((10320, 10331), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10329, 10331), True, 'import matplotlib.pyplot as plt\n'), ((11025, 11082), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Fraction of B-Particles in Gas"""'], {}), "('Steady-State Fraction of B-Particles in Gas')\n", (11035, 11082), True, 'import matplotlib.pyplot as plt\n'), ((11083, 11128), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_gasB.png"""'], {'dpi': '(1000)'}), "('SteadyState_gasB.png', dpi=1000)\n", (11094, 11128), True, 'import matplotlib.pyplot as plt\n'), ((11129, 11140), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11138, 11140), True, 'import matplotlib.pyplot as plt\n'), ((11842, 11901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Fraction of All Particles in Gas"""'], {}), "('Steady-State Fraction of All Particles in Gas')\n", (11852, 11901), True, 'import matplotlib.pyplot as plt\n'), ((11902, 11949), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_gasAll.png"""'], {'dpi': '(1000)'}), "('SteadyState_gasAll.png', dpi=1000)\n", (11913, 11949), True, 'import matplotlib.pyplot as plt\n'), ((11950, 11961), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11959, 11961), True, 'import matplotlib.pyplot as plt\n'), ((12697, 12762), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Fraction of A-Particles in Dense Phase"""'], {}), "('Steady-State Fraction of A-Particles in Dense Phase')\n", (12707, 12762), True, 'import matplotlib.pyplot as plt\n'), ((12763, 12810), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_denseA.png"""'], {'dpi': '(1000)'}), "('SteadyState_denseA.png', dpi=1000)\n", (12774, 12810), True, 'import matplotlib.pyplot as plt\n'), ((12811, 12822), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12820, 12822), True, 'import matplotlib.pyplot as plt\n'), ((13524, 13589), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Fraction of B-Particles in Dense Phase"""'], {}), "('Steady-State Fraction of B-Particles in Dense Phase')\n", (13534, 13589), True, 'import matplotlib.pyplot as plt\n'), ((13590, 13637), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_denseB.png"""'], {'dpi': '(1000)'}), "('SteadyState_denseB.png', dpi=1000)\n", (13601, 13637), True, 'import matplotlib.pyplot as plt\n'), ((13638, 13649), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13647, 13649), True, 'import matplotlib.pyplot as plt\n'), ((14359, 14426), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Fraction of All Particles in Dense Phase"""'], {}), "('Steady-State Fraction of All Particles in Dense Phase')\n", (14369, 14426), True, 'import matplotlib.pyplot as plt\n'), ((14427, 14476), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_denseAll.png"""'], {'dpi': '(1000)'}), "('SteadyState_denseAll.png', dpi=1000)\n", (14438, 14476), True, 'import matplotlib.pyplot as plt\n'), ((14477, 14488), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14486, 14488), True, 'import matplotlib.pyplot as plt\n'), ((15228, 15275), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Largest Cluster Size"""'], {}), "('Steady-State Largest Cluster Size')\n", (15238, 15275), True, 'import matplotlib.pyplot as plt\n'), ((15276, 15326), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_dpDensity.png"""'], {'dpi': '(1000)'}), "('SteadyState_dpDensity.png', dpi=1000)\n", (15287, 15326), True, 'import matplotlib.pyplot as plt\n'), ((15327, 15338), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15336, 15338), True, 'import matplotlib.pyplot as plt\n'), ((16086, 16132), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Dense Phase Density"""'], {}), "('Steady-State Dense Phase Density')\n", (16096, 16132), True, 'import matplotlib.pyplot as plt\n'), ((16133, 16183), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_dpDensity.png"""'], {'dpi': '(1000)'}), "('SteadyState_dpDensity.png', dpi=1000)\n", (16144, 16183), True, 'import matplotlib.pyplot as plt\n'), ((16184, 16195), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16193, 16195), True, 'import matplotlib.pyplot as plt\n'), ((16909, 16953), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Gas Phase Density"""'], {}), "('Steady-State Gas Phase Density')\n", (16919, 16953), True, 'import matplotlib.pyplot as plt\n'), ((16954, 17004), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_gpDensity.png"""'], {'dpi': '(1000)'}), "('SteadyState_gpDensity.png', dpi=1000)\n", (16965, 17004), True, 'import matplotlib.pyplot as plt\n'), ((17005, 17016), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17014, 17016), True, 'import matplotlib.pyplot as plt\n'), ((17752, 17795), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Dense Phase Area"""'], {}), "('Steady-State Dense Phase Area')\n", (17762, 17795), True, 'import matplotlib.pyplot as plt\n'), ((17796, 17843), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_dpArea.png"""'], {'dpi': '(1000)'}), "('SteadyState_dpArea.png', dpi=1000)\n", (17807, 17843), True, 'import matplotlib.pyplot as plt\n'), ((17844, 17855), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17853, 17855), True, 'import matplotlib.pyplot as plt\n'), ((18557, 18598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steady-State Gas Phase Area"""'], {}), "('Steady-State Gas Phase Area')\n", (18567, 18598), True, 'import matplotlib.pyplot as plt\n'), ((18599, 18646), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SteadyState_gpArea.png"""'], {'dpi': '(1000)'}), "('SteadyState_gpArea.png', dpi=1000)\n", (18610, 18646), True, 'import matplotlib.pyplot as plt\n'), ((18647, 18658), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18656, 18658), True, 'import matplotlib.pyplot as plt\n'), ((3328, 3376), 'numpy.loadtxt', 'np.loadtxt', (['txtFiles[i]'], {'skiprows': '(1)', 'unpack': '(True)'}), '(txtFiles[i], skiprows=1, unpack=True)\n', (3338, 3376), True, 'import numpy as np\n'), ((8496, 8521), 'numpy.mean', 'np.mean', (['gasA[i][-100:-1]'], {}), '(gasA[i][-100:-1])\n', (8503, 8521), True, 'import numpy as np\n'), ((8538, 8563), 'numpy.mean', 'np.mean', (['gasB[i][-100:-1]'], {}), '(gasB[i][-100:-1])\n', (8545, 8563), True, 'import numpy as np\n'), ((8582, 8609), 'numpy.mean', 'np.mean', (['gasAll[i][-100:-1]'], {}), '(gasAll[i][-100:-1])\n', (8589, 8609), True, 'import numpy as np\n'), ((8628, 8655), 'numpy.mean', 'np.mean', (['denseA[i][-100:-1]'], {}), '(denseA[i][-100:-1])\n', (8635, 8655), True, 'import numpy as np\n'), ((8674, 8701), 'numpy.mean', 'np.mean', (['denseB[i][-100:-1]'], {}), '(denseB[i][-100:-1])\n', (8681, 8701), True, 'import numpy as np\n'), ((8722, 8751), 'numpy.mean', 'np.mean', (['denseAll[i][-100:-1]'], {}), '(denseAll[i][-100:-1])\n', (8729, 8751), True, 'import numpy as np\n'), ((8771, 8799), 'numpy.mean', 'np.mean', (['lgClust[i][-100:-1]'], {}), '(lgClust[i][-100:-1])\n', (8778, 8799), True, 'import numpy as np\n'), ((8821, 8851), 'numpy.mean', 'np.mean', (['dpDensity[i][-100:-1]'], {}), '(dpDensity[i][-100:-1])\n', (8828, 8851), True, 'import numpy as np\n'), ((8873, 8903), 'numpy.mean', 'np.mean', (['gpDensity[i][-100:-1]'], {}), '(gpDensity[i][-100:-1])\n', (8880, 8903), True, 'import numpy as np\n'), ((8922, 8949), 'numpy.mean', 'np.mean', (['dpArea[i][-100:-1]'], {}), '(dpArea[i][-100:-1])\n', (8929, 8949), True, 'import numpy as np\n'), ((8968, 8995), 'numpy.mean', 'np.mean', (['gpArea[i][-100:-1]'], {}), '(gpArea[i][-100:-1])\n', (8975, 8995), True, 'import numpy as np\n'), ((9668, 9705), 'matplotlib.pyplot.scatter', 'plt.scatter', (['peB[i]', 'ssGasA[i]'], {'c': '"""k"""'}), "(peB[i], ssGasA[i], c='k')\n", (9679, 9705), True, 'import matplotlib.pyplot as plt\n'), ((9714, 9736), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (9724, 9736), True, 'import matplotlib.pyplot as plt\n'), ((10494, 10516), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (10504, 10516), True, 'import matplotlib.pyplot as plt\n'), ((11305, 11327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (11315, 11327), True, 'import matplotlib.pyplot as plt\n'), ((12160, 12182), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (12170, 12182), True, 'import matplotlib.pyplot as plt\n'), ((12987, 13009), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (12997, 13009), True, 'import matplotlib.pyplot as plt\n'), ((13816, 13838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (13826, 13838), True, 'import matplotlib.pyplot as plt\n'), ((14688, 14710), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (14698, 14710), True, 'import matplotlib.pyplot as plt\n'), ((15540, 15562), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (15550, 15562), True, 'import matplotlib.pyplot as plt\n'), ((16363, 16385), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (16373, 16385), True, 'import matplotlib.pyplot as plt\n'), ((17215, 17237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (17225, 17237), True, 'import matplotlib.pyplot as plt\n'), ((18020, 18042), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (18030, 18042), True, 'import matplotlib.pyplot as plt\n'), ((9819, 9856), 'matplotlib.pyplot.scatter', 'plt.scatter', (['peA[i]', 'ssGasA[i]'], {'c': '"""k"""'}), "(peA[i], ssGasA[i], c='k')\n", (9830, 9856), True, 'import matplotlib.pyplot as plt\n'), ((9865, 9887), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (9875, 9887), True, 'import matplotlib.pyplot as plt\n'), ((10649, 10671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (10659, 10671), True, 'import matplotlib.pyplot as plt\n'), ((11462, 11484), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (11472, 11484), True, 'import matplotlib.pyplot as plt\n'), ((12317, 12339), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (12327, 12339), True, 'import matplotlib.pyplot as plt\n'), ((13144, 13166), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (13154, 13166), True, 'import matplotlib.pyplot as plt\n'), ((13975, 13997), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (13985, 13997), True, 'import matplotlib.pyplot as plt\n'), ((14846, 14868), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (14856, 14868), True, 'import matplotlib.pyplot as plt\n'), ((15700, 15722), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (15710, 15722), True, 'import matplotlib.pyplot as plt\n'), ((16523, 16545), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (16533, 16545), True, 'import matplotlib.pyplot as plt\n'), ((17372, 17394), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (17382, 17394), True, 'import matplotlib.pyplot as plt\n'), ((18177, 18199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity"""'], {}), "('Activity')\n", (18187, 18199), True, 'import matplotlib.pyplot as plt\n'), ((9949, 9985), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssGasA[i]'], {'c': '"""k"""'}), "(xA[i], ssGasA[i], c='k')\n", (9960, 9985), True, 'import matplotlib.pyplot as plt\n'), ((9994, 10033), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (10004, 10033), True, 'import matplotlib.pyplot as plt\n'), ((10043, 10057), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (10051, 10057), True, 'import matplotlib.pyplot as plt\n'), ((10140, 10176), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ratio', 'ssGasA[i]'], {'c': '"""k"""'}), "(ratio, ssGasA[i], c='k')\n", (10151, 10176), True, 'import matplotlib.pyplot as plt\n'), ((10185, 10213), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (10195, 10213), True, 'import matplotlib.pyplot as plt\n'), ((10733, 10769), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssGasB[i]'], {'c': '"""k"""'}), "(xA[i], ssGasB[i], c='k')\n", (10744, 10769), True, 'import matplotlib.pyplot as plt\n'), ((10778, 10817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (10788, 10817), True, 'import matplotlib.pyplot as plt\n'), ((10827, 10841), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (10835, 10841), True, 'import matplotlib.pyplot as plt\n'), ((10994, 11022), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (11004, 11022), True, 'import matplotlib.pyplot as plt\n'), ((11546, 11584), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssGasAll[i]'], {'c': '"""k"""'}), "(xA[i], ssGasAll[i], c='k')\n", (11557, 11584), True, 'import matplotlib.pyplot as plt\n'), ((11593, 11632), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (11603, 11632), True, 'import matplotlib.pyplot as plt\n'), ((11642, 11656), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (11650, 11656), True, 'import matplotlib.pyplot as plt\n'), ((11811, 11839), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (11821, 11839), True, 'import matplotlib.pyplot as plt\n'), ((12401, 12439), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssDenseA[i]'], {'c': '"""k"""'}), "(xA[i], ssDenseA[i], c='k')\n", (12412, 12439), True, 'import matplotlib.pyplot as plt\n'), ((12448, 12487), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (12458, 12487), True, 'import matplotlib.pyplot as plt\n'), ((12497, 12511), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (12505, 12511), True, 'import matplotlib.pyplot as plt\n'), ((12666, 12694), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (12676, 12694), True, 'import matplotlib.pyplot as plt\n'), ((13228, 13266), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssDenseB[i]'], {'c': '"""k"""'}), "(xA[i], ssDenseB[i], c='k')\n", (13239, 13266), True, 'import matplotlib.pyplot as plt\n'), ((13275, 13314), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (13285, 13314), True, 'import matplotlib.pyplot as plt\n'), ((13324, 13338), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (13332, 13338), True, 'import matplotlib.pyplot as plt\n'), ((13493, 13521), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (13503, 13521), True, 'import matplotlib.pyplot as plt\n'), ((14059, 14099), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssDenseAll[i]'], {'c': '"""k"""'}), "(xA[i], ssDenseAll[i], c='k')\n", (14070, 14099), True, 'import matplotlib.pyplot as plt\n'), ((14108, 14147), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (14118, 14147), True, 'import matplotlib.pyplot as plt\n'), ((14157, 14171), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (14165, 14171), True, 'import matplotlib.pyplot as plt\n'), ((14328, 14356), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (14338, 14356), True, 'import matplotlib.pyplot as plt\n'), ((14930, 14969), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssLgClust[i]'], {'c': '"""k"""'}), "(xA[i], ssLgClust[i], c='k')\n", (14941, 14969), True, 'import matplotlib.pyplot as plt\n'), ((14978, 15017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (14988, 15017), True, 'import matplotlib.pyplot as plt\n'), ((15027, 15041), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (15035, 15041), True, 'import matplotlib.pyplot as plt\n'), ((15197, 15225), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (15207, 15225), True, 'import matplotlib.pyplot as plt\n'), ((15784, 15825), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssDPDensity[i]'], {'c': '"""k"""'}), "(xA[i], ssDPDensity[i], c='k')\n", (15795, 15825), True, 'import matplotlib.pyplot as plt\n'), ((15834, 15873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (15844, 15873), True, 'import matplotlib.pyplot as plt\n'), ((15883, 15897), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (15891, 15897), True, 'import matplotlib.pyplot as plt\n'), ((16055, 16083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (16065, 16083), True, 'import matplotlib.pyplot as plt\n'), ((16607, 16648), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssGPDensity[i]'], {'c': '"""k"""'}), "(xA[i], ssGPDensity[i], c='k')\n", (16618, 16648), True, 'import matplotlib.pyplot as plt\n'), ((16657, 16696), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (16667, 16696), True, 'import matplotlib.pyplot as plt\n'), ((16706, 16720), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (16714, 16720), True, 'import matplotlib.pyplot as plt\n'), ((16878, 16906), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (16888, 16906), True, 'import matplotlib.pyplot as plt\n'), ((17456, 17494), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssDPArea[i]'], {'c': '"""k"""'}), "(xA[i], ssDPArea[i], c='k')\n", (17467, 17494), True, 'import matplotlib.pyplot as plt\n'), ((17503, 17542), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (17513, 17542), True, 'import matplotlib.pyplot as plt\n'), ((17552, 17566), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (17560, 17566), True, 'import matplotlib.pyplot as plt\n'), ((17721, 17749), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (17731, 17749), True, 'import matplotlib.pyplot as plt\n'), ((18261, 18299), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xA[i]', 'ssGPArea[i]'], {'c': '"""k"""'}), "(xA[i], ssGPArea[i], c='k')\n", (18272, 18299), True, 'import matplotlib.pyplot as plt\n'), ((18308, 18347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Particle Fraction $x_{A}$"""'], {}), "('Particle Fraction $x_{A}$')\n", (18318, 18347), True, 'import matplotlib.pyplot as plt\n'), ((18357, 18371), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (18365, 18371), True, 'import matplotlib.pyplot as plt\n'), ((18526, 18554), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Activity Ratio"""'], {}), "('Activity Ratio')\n", (18536, 18554), True, 'import matplotlib.pyplot as plt\n')] |
# test for returning to position of SHO
# test for amplitude insensitivity of SHO
import unittest
import numpy
import random
import utils
class AdaptiveRK4Test(unittest.TestCase):
def test_meta(self):
self.assertTrue(True)
def test_period_check(self):
stateI = numpy.array([0.,0.,1.])
print('pre starting')
path_out = utils.RK4_adapt(base_SHO, stateI, 2*numpy.pi, max_steps=500, precision=10**-6)
print(path_out[-1][0]-2*numpy.pi, path_out[-1][1])
self.assertTrue((path_out[-1][0]-2*numpy.pi) < 10**-5)
self.assertTrue(abs(path_out[-1][1]) < 10**-5)
class ReturnTimeTest(unittest.TestCase):
def setUp(self):
x0 = random.random()
p0 = (1-x0**2)**.5
self.stateI = numpy.array([0.,x0,p0])
self.path_out = utils.RK4_adapt(
base_SHO, self.stateI, 2*numpy.pi*(1.+10**-7),
max_steps=2000, precision=10**-8)
def test_meta(self):
self.assertTrue(True)
def test_returns_1_time_per_degree_of_freedom(self):
gam0 = self.stateI
gam1 = self.path_out[1]
gam2 = self.path_out[2]
return_times = utils.return_time(gam0, gam1, gam2)
self.assertTrue(len(return_times)==(len(gam0)-1))
def test_finds_future_times(self): #accounting for cyclic flips previous behavior
gam0 = self.stateI
gam1 = self.path_out[1]
gam2 = self.path_out[2]
return_times = utils.return_time(gam0, gam1, gam2)
self.assertTrue(return_times[0]>gam1[0])
self.assertTrue(return_times[1]>gam1[0])
# @unittest.skip("cyclic coordinate fudging makes non-sandwiching times behave unexpectedly")
def test_finds_previous_times(self): #accounting for cyclic flips previous behavior
gam0 = self.stateI
gam1 = self.path_out[-3]
gam2 = self.path_out[-2]
return_times = utils.return_time(gam0, gam1, gam2)
self.assertTrue(return_times[0]<gam1[0])
self.assertTrue(return_times[1]<gam1[0])
def test_depends_on_more_than_spatial(self):
gam0 = self.stateI
mid_point = len(self.path_out)/2
gam1 = self.path_out[mid_point-1]
gam2 = self.path_out[mid_point]
return_times = utils.return_time(gam0, gam1, gam2)
time_diff = abs(return_times[0]-return_times[1])
end_time = self.path_out[-1][0]
self.assertTrue(
time_diff>(end_time)/100.
)
def test_agrees_on_period(self):
gam0 = self.stateI
gam1 = self.path_out[-2]
gam2 = self.path_out[-1]
return_times = utils.return_time(gam0, gam1, gam2)
time_diff = abs(return_times[0]-return_times[1])
end_time = self.path_out[-1][0]
avg_time =(return_times[0]+return_times[1])/2.
print(return_times,avg_time)
print(gam1)
print(gam0)
print(gam2)
# print(gam2[0]-2*numpy.pi, 'time difference')
self.assertTrue(
time_diff<10**-6
)
self.assertAlmostEqual(
avg_time,2*numpy.pi
)
class PeriodFinderTest(unittest.TestCase):
def test_meta(self):
self.assertTrue(True)
def test_return_times_finder(self):
x0 = random.random()
p0 = (1.-x0**2)**.5
stateI = numpy.array([0.,x0,p0])
return_times = utils.return_times_finder(base_SHO, stateI, precision=10**-10, max_time=100.0, max_steps=10**6)
print(return_times)
print([val[0]/(2*numpy.pi) for val in return_times])
print(len(return_times))
for i in range(len(return_times)):
self.assertAlmostEqual(
(i+1)*2*numpy.pi, return_times[i][0],
delta=return_times[i][1]
)
self.assertTrue(len(return_times)>10.0/2.1*numpy.pi)
def base_SHO(state):
deltas = numpy.zeros(len(state))
deltas[0] = 1.
deltas[1] = state[2]
deltas[2] = -state[1]
return deltas
| [
"utils.return_time",
"numpy.array",
"utils.RK4_adapt",
"random.random",
"utils.return_times_finder"
] | [((287, 315), 'numpy.array', 'numpy.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (298, 315), False, 'import numpy\n'), ((360, 446), 'utils.RK4_adapt', 'utils.RK4_adapt', (['base_SHO', 'stateI', '(2 * numpy.pi)'], {'max_steps': '(500)', 'precision': '(10 ** -6)'}), '(base_SHO, stateI, 2 * numpy.pi, max_steps=500, precision=10 **\n -6)\n', (375, 446), False, 'import utils\n'), ((692, 707), 'random.random', 'random.random', ([], {}), '()\n', (705, 707), False, 'import random\n'), ((757, 783), 'numpy.array', 'numpy.array', (['[0.0, x0, p0]'], {}), '([0.0, x0, p0])\n', (768, 783), False, 'import numpy\n'), ((805, 916), 'utils.RK4_adapt', 'utils.RK4_adapt', (['base_SHO', 'self.stateI', '(2 * numpy.pi * (1.0 + 10 ** -7))'], {'max_steps': '(2000)', 'precision': '(10 ** -8)'}), '(base_SHO, self.stateI, 2 * numpy.pi * (1.0 + 10 ** -7),\n max_steps=2000, precision=10 ** -8)\n', (820, 916), False, 'import utils\n'), ((1156, 1191), 'utils.return_time', 'utils.return_time', (['gam0', 'gam1', 'gam2'], {}), '(gam0, gam1, gam2)\n', (1173, 1191), False, 'import utils\n'), ((1451, 1486), 'utils.return_time', 'utils.return_time', (['gam0', 'gam1', 'gam2'], {}), '(gam0, gam1, gam2)\n', (1468, 1486), False, 'import utils\n'), ((1888, 1923), 'utils.return_time', 'utils.return_time', (['gam0', 'gam1', 'gam2'], {}), '(gam0, gam1, gam2)\n', (1905, 1923), False, 'import utils\n'), ((2245, 2280), 'utils.return_time', 'utils.return_time', (['gam0', 'gam1', 'gam2'], {}), '(gam0, gam1, gam2)\n', (2262, 2280), False, 'import utils\n'), ((2608, 2643), 'utils.return_time', 'utils.return_time', (['gam0', 'gam1', 'gam2'], {}), '(gam0, gam1, gam2)\n', (2625, 2643), False, 'import utils\n'), ((3247, 3262), 'random.random', 'random.random', ([], {}), '()\n', (3260, 3262), False, 'import random\n'), ((3308, 3334), 'numpy.array', 'numpy.array', (['[0.0, x0, p0]'], {}), '([0.0, x0, p0])\n', (3319, 3334), False, 'import numpy\n'), ((3355, 3459), 'utils.return_times_finder', 'utils.return_times_finder', (['base_SHO', 'stateI'], {'precision': '(10 ** -10)', 'max_time': '(100.0)', 'max_steps': '(10 ** 6)'}), '(base_SHO, stateI, precision=10 ** -10, max_time=\n 100.0, max_steps=10 ** 6)\n', (3380, 3459), False, 'import utils\n')] |
from keras.optimizers import Adam
from keras.layers import Dense, Input, Activation
import random
import keras.backend as K
from keras.models import Sequential, Model, load_model
import numpy as np
import tensorflow as tf
from time import time
from keras.callbacks import History
from collections import deque
class PG():
def __init__(self, load_network = False, load_weight = False, load_file = None):
self.learning_rate = 0.0001
self.discount_factor = 0.99
self.state_memory = []
self.reward_memory = []
self.action_memory = []
self.num_actions = 4
self.curr_disc_rewards = None
self.policy_n, self.predict_n = self.create_network(load_network, load_weight, load_file)
def create_network(self, load_network = False, load_weight = False, load_file = None):
if load_network is True:
model = load_model(load_file)
return (None, model)
input = Input(shape = (363,))
disc_rewards = Input(shape = [1])
dense1 = Dense(128, activation = 'relu')(input)
dense2 = Dense(64, activation = 'relu')(dense1)
prob_output = Dense(self.num_actions, activation = 'softmax')(dense2)
opt = Adam(self.learning_rate)
def custom_loss(y_true, y_pred):
clip_out = K.clip(y_pred,1e-8, 1-1e-8)
log_lik = y_true * K.log(clip_out)
return K.sum(-log_lik * disc_rewards)
policy_n = Model(inputs = [input, disc_rewards], outputs = [prob_output])
policy_n.compile(loss = custom_loss, optimizer=opt)
predict_n = Model(inputs= [input], outputs = [prob_output])
if load_weight is True:
predict_n.load_weights(load_file)
return (None, predict_n)
return policy_n, predict_n
def predict_action(self, state):
predicted_probs = self.predict_n.predict(state)[0]
pred_action = np.random.choice(range(self.num_actions), p = predicted_probs)
return pred_action
def remember(self, state, action, reward):
self.state_memory.append(state.reshape((363,)))
self.action_memory.append(action)
self.reward_memory.append(reward)
def update_policy(self):
state_mem = np.array(self.state_memory)
action_mem = np.array(self.action_memory)
reward_mem = np.array(self.reward_memory)
actions = np.zeros((len(action_mem),self.num_actions))
actions[np.arange(len(action_mem)), action_mem] = 1
disc_rewards = np.zeros_like(reward_mem)
for t in range(len(reward_mem)):
Gt = 0
pw = 0
for r in reward_mem[t:]:
Gt = Gt + (self.discount_factor ** pw) * r
pw = pw + 1
disc_rewards[t] = Gt
#scale rewards for numerical stability - baseline
mean = disc_rewards.mean()
std_dev = disc_rewards.std()
norm_disc_rewards = (disc_rewards-mean)/std_dev
# Train on the network
cost = self.policy_n.train_on_batch([state_mem, norm_disc_rewards],actions)
# Reset the memory
self.state_memory = []
self.action_memory = []
self.reward_memory = []
f = open("./logs_pg/model_metrics_pg.csv",'a+')
f.write(str(cost)+ "\n")
f.close()
return cost
def save_model(self, iteration='1'):
self.predict_n.save_weights("./weight_store"+"/pg_weight_"+iteration+".h5")
self.predict_n.save("./model_store"+"/pg_model_"+iteration+".h5")
'''
def get_random_state():
return(np.random.rand(1,300))
network_obj = PG()
s= get_random_state()
network_obj.remember(s,network_obj.predict_action(s),1)
s= get_random_state()
network_obj.remember(s,network_obj.predict_action(s),1)
s= get_random_state()
network_obj.remember(s,network_obj.predict_action(s),1)
s= get_random_state()
network_obj.remember(s,network_obj.predict_action(s),1)
s= get_random_state()
network_obj.remember(s,network_obj.predict_action(s),1)
s= get_random_state()
network_obj.remember(s,network_obj.predict_action(s),1)
print(network_obj.update_policy())
Refered to: https://github.com/philtabor/Deep-Q-Learning-Paper-To-Code
''' | [
"keras.optimizers.Adam",
"keras.models.load_model",
"keras.backend.sum",
"keras.backend.clip",
"numpy.array",
"keras.layers.Input",
"keras.backend.log",
"keras.models.Model",
"keras.layers.Dense",
"numpy.zeros_like"
] | [((963, 982), 'keras.layers.Input', 'Input', ([], {'shape': '(363,)'}), '(shape=(363,))\n', (968, 982), False, 'from keras.layers import Dense, Input, Activation\n'), ((1008, 1024), 'keras.layers.Input', 'Input', ([], {'shape': '[1]'}), '(shape=[1])\n', (1013, 1024), False, 'from keras.layers import Dense, Input, Activation\n'), ((1231, 1255), 'keras.optimizers.Adam', 'Adam', (['self.learning_rate'], {}), '(self.learning_rate)\n', (1235, 1255), False, 'from keras.optimizers import Adam\n'), ((1466, 1524), 'keras.models.Model', 'Model', ([], {'inputs': '[input, disc_rewards]', 'outputs': '[prob_output]'}), '(inputs=[input, disc_rewards], outputs=[prob_output])\n', (1471, 1524), False, 'from keras.models import Sequential, Model, load_model\n'), ((1609, 1653), 'keras.models.Model', 'Model', ([], {'inputs': '[input]', 'outputs': '[prob_output]'}), '(inputs=[input], outputs=[prob_output])\n', (1614, 1653), False, 'from keras.models import Sequential, Model, load_model\n'), ((2258, 2285), 'numpy.array', 'np.array', (['self.state_memory'], {}), '(self.state_memory)\n', (2266, 2285), True, 'import numpy as np\n'), ((2307, 2335), 'numpy.array', 'np.array', (['self.action_memory'], {}), '(self.action_memory)\n', (2315, 2335), True, 'import numpy as np\n'), ((2357, 2385), 'numpy.array', 'np.array', (['self.reward_memory'], {}), '(self.reward_memory)\n', (2365, 2385), True, 'import numpy as np\n'), ((2535, 2560), 'numpy.zeros_like', 'np.zeros_like', (['reward_mem'], {}), '(reward_mem)\n', (2548, 2560), True, 'import numpy as np\n'), ((891, 912), 'keras.models.load_model', 'load_model', (['load_file'], {}), '(load_file)\n', (901, 912), False, 'from keras.models import Sequential, Model, load_model\n'), ((1044, 1073), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1049, 1073), False, 'from keras.layers import Dense, Input, Activation\n'), ((1100, 1128), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1105, 1128), False, 'from keras.layers import Dense, Input, Activation\n'), ((1161, 1206), 'keras.layers.Dense', 'Dense', (['self.num_actions'], {'activation': '"""softmax"""'}), "(self.num_actions, activation='softmax')\n", (1166, 1206), False, 'from keras.layers import Dense, Input, Activation\n'), ((1321, 1353), 'keras.backend.clip', 'K.clip', (['y_pred', '(1e-08)', '(1 - 1e-08)'], {}), '(y_pred, 1e-08, 1 - 1e-08)\n', (1327, 1353), True, 'import keras.backend as K\n'), ((1415, 1445), 'keras.backend.sum', 'K.sum', (['(-log_lik * disc_rewards)'], {}), '(-log_lik * disc_rewards)\n', (1420, 1445), True, 'import keras.backend as K\n'), ((1380, 1395), 'keras.backend.log', 'K.log', (['clip_out'], {}), '(clip_out)\n', (1385, 1395), True, 'import keras.backend as K\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cv2
import numpy as np
def is_entry_me(entry_img):
# ヒストグラムから、入力エントリが自分かを判断
me_img = entry_img[:, 0:43]
me_score = np.sum(me_img)
me_score_normalized = 0
try:
me_score_normalized = me_score / (43 * 45 * 255 / 10)
except ZeroDivisionError as e:
me_score_normalized = 0
return (me_score_normalized > 1)
def anonymize(img,
anonWinTeam=False,
anonLoseTeam=False,
anonMyTeam=False,
anonCounterTeam=False,
anonMe=False,
anonOthers=False,
anonAll=False):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(img_gray, 230, 255, cv2.THRESH_BINARY)
# 各プレイヤー情報のスタート左位置
entry_left = 610
# 各プレイヤー情報の横幅
entry_width = 610
# 各プレイヤー情報の高さ
entry_height = 45
# 各エントリ内での名前スタート位置と長さ
entry_xoffset_name = 809 - entry_left
entry_xoffset_name_me = 770 - entry_left
entry_width_name = 180
entry_xoffset_nawabari_score = 995 - entry_left
entry_width_nawabari_score = 115
entry_top = [101, 167, 231, 296, 432, 496, 562, 627]
entry_xoffset_kd = 1187 - entry_left
entry_width_kd = 30
entry_height_kd = 20
entry_id = 0
# 自分を探す
me = 0
myteam = 0
for top in entry_top:
entry_id = entry_id + 1
img_entry = thresh1[top:top + entry_height,
entry_left:entry_left + entry_width]
if (is_entry_me(img_entry)):
me = entry_id
myteam = 1 if entry_id < 5 else 2
entry_id = 0
entries = []
anonymized = img.copy()
for top in entry_top:
entry_id = entry_id + 1
team = 1 if entry_id < 5 else 2
anon = anonAll or \
(anonWinTeam and (team == 1)) or \
(anonLoseTeam and (team == 2)) or \
(anonMyTeam and (team == myteam)) or \
(anonCounterTeam and (team != myteam)) or \
(anonMe and (entry_id == me)) or \
(anonOthers and (entry_id != me))
if anon:
img_entry = thresh1[top:top + entry_height,
entry_left:entry_left + entry_width]
name_left = entry_xoffset_name_me if entry_id == me else entry_xoffset_name
anon_top = top
anon_bottom = top + entry_height
anon_left = entry_left + name_left
anon_right = entry_left + name_left + entry_width_name
img_smallName = cv2.resize(anonymized[anon_top:anon_bottom, anon_left:anon_right], (int(
entry_width_name / 4), int(entry_height / 4)))
anonymized[anon_top:anon_bottom, anon_left:anon_right] = cv2.resize(
img_smallName, (entry_width_name, entry_height), interpolation=cv2.INTER_NEAREST)
return anonymized
| [
"cv2.threshold",
"numpy.sum",
"cv2.resize",
"cv2.cvtColor"
] | [((793, 807), 'numpy.sum', 'np.sum', (['me_img'], {}), '(me_img)\n', (799, 807), True, 'import numpy as np\n'), ((1275, 1312), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1287, 1312), False, 'import cv2\n'), ((1332, 1384), 'cv2.threshold', 'cv2.threshold', (['img_gray', '(230)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img_gray, 230, 255, cv2.THRESH_BINARY)\n', (1345, 1384), False, 'import cv2\n'), ((3367, 3464), 'cv2.resize', 'cv2.resize', (['img_smallName', '(entry_width_name, entry_height)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img_smallName, (entry_width_name, entry_height), interpolation=\n cv2.INTER_NEAREST)\n', (3377, 3464), False, 'import cv2\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sRGB Colourspace
================
Defines the *sRGB* colourspace:
- :attr:`sRGB_COLOURSPACE`.
See Also
--------
`RGB Colourspaces IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa
References
----------
.. [1] `Recommendation ITU-R BT.709-5 - Parameter values for the HDTV
standards for production and international programme exchange
<http://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.709-5-200204-I!!PDF-E.pdf>`_ # noqa
(Last accessed 24 February 2014)
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
from colour.models import RGB_Colourspace
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['sRGB_PRIMARIES',
'sRGB_WHITEPOINT',
'sRGB_TO_XYZ_MATRIX',
'XYZ_TO_sRGB_MATRIX',
'sRGB_TRANSFER_FUNCTION',
'sRGB_INVERSE_TRANSFER_FUNCTION',
'sRGB_COLOURSPACE']
sRGB_PRIMARIES = np.array(
[[0.6400, 0.3300],
[0.3000, 0.6000],
[0.1500, 0.0600]])
"""
*sRGB* colourspace primaries.
sRGB_PRIMARIES : ndarray, (3, 2)
"""
sRGB_WHITEPOINT = ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D65')
"""
*sRGB* colourspace whitepoint.
sRGB_WHITEPOINT : tuple
"""
sRGB_TO_XYZ_MATRIX = np.array(
[[0.41238656, 0.35759149, 0.18045049],
[0.21263682, 0.71518298, 0.0721802],
[0.01933062, 0.11919716, 0.95037259]])
"""
*sRGB* colourspace to *CIE XYZ* colourspace matrix.
sRGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_sRGB_MATRIX = np.linalg.inv(sRGB_TO_XYZ_MATRIX)
"""
*CIE XYZ* colourspace to *sRGB* colourspace matrix.
XYZ_TO_sRGB_MATRIX : array_like, (3, 3)
"""
sRGB_TRANSFER_FUNCTION = lambda x: (
x * 12.92 if x <= 0.0031308 else 1.055 * (x ** (1 / 2.4)) - 0.055)
"""
Transfer function from linear to *sRGB* colourspace.
sRGB_TRANSFER_FUNCTION : object
"""
sRGB_INVERSE_TRANSFER_FUNCTION = lambda x: (
x / 12.92 if x <= 0.0031308 else ((x + 0.055) / 1.055) ** 2.4)
"""
Inverse transfer function from *sRGB* colourspace to linear.
sRGB_INVERSE_TRANSFER_FUNCTION : object
"""
sRGB_COLOURSPACE = RGB_Colourspace(
'sRGB',
sRGB_PRIMARIES,
sRGB_WHITEPOINT,
sRGB_TO_XYZ_MATRIX,
XYZ_TO_sRGB_MATRIX,
sRGB_TRANSFER_FUNCTION,
sRGB_INVERSE_TRANSFER_FUNCTION)
"""
*sRGB* colourspace.
sRGB_COLOURSPACE : RGB_Colourspace
"""
| [
"colour.colorimetry.ILLUMINANTS.get",
"numpy.array",
"numpy.linalg.inv",
"colour.models.RGB_Colourspace"
] | [((1301, 1351), 'numpy.array', 'np.array', (['[[0.64, 0.33], [0.3, 0.6], [0.15, 0.06]]'], {}), '([[0.64, 0.33], [0.3, 0.6], [0.15, 0.06]])\n', (1309, 1351), True, 'import numpy as np\n'), ((1629, 1757), 'numpy.array', 'np.array', (['[[0.41238656, 0.35759149, 0.18045049], [0.21263682, 0.71518298, 0.0721802],\n [0.01933062, 0.11919716, 0.95037259]]'], {}), '([[0.41238656, 0.35759149, 0.18045049], [0.21263682, 0.71518298, \n 0.0721802], [0.01933062, 0.11919716, 0.95037259]])\n', (1637, 1757), True, 'import numpy as np\n'), ((1891, 1924), 'numpy.linalg.inv', 'np.linalg.inv', (['sRGB_TO_XYZ_MATRIX'], {}), '(sRGB_TO_XYZ_MATRIX)\n', (1904, 1924), True, 'import numpy as np\n'), ((2472, 2628), 'colour.models.RGB_Colourspace', 'RGB_Colourspace', (['"""sRGB"""', 'sRGB_PRIMARIES', 'sRGB_WHITEPOINT', 'sRGB_TO_XYZ_MATRIX', 'XYZ_TO_sRGB_MATRIX', 'sRGB_TRANSFER_FUNCTION', 'sRGB_INVERSE_TRANSFER_FUNCTION'], {}), "('sRGB', sRGB_PRIMARIES, sRGB_WHITEPOINT, sRGB_TO_XYZ_MATRIX,\n XYZ_TO_sRGB_MATRIX, sRGB_TRANSFER_FUNCTION, sRGB_INVERSE_TRANSFER_FUNCTION)\n", (2487, 2628), False, 'from colour.models import RGB_Colourspace\n'), ((1472, 1526), 'colour.colorimetry.ILLUMINANTS.get', 'ILLUMINANTS.get', (['"""CIE 1931 2 Degree Standard Observer"""'], {}), "('CIE 1931 2 Degree Standard Observer')\n", (1487, 1526), False, 'from colour.colorimetry import ILLUMINANTS\n')] |
import os
import pytest
from astropy.modeling import models
from gempy.library.astromodels import get_named_submodel
from numpy.testing import assert_allclose
def assert_have_same_distortion(ad, ad_ref, atol=0, rtol=1e-7):
"""
Checks if two :class:`~astrodata.AstroData` (or any subclass) have the
same distortion.
Parameters
----------
ad : :class:`astrodata.AstroData`
AstroData object to be checked.
ad_ref : :class:`astrodata.AstroData`
AstroData object used as reference
atol, rtol : float
absolute and relative tolerances
"""
for ext, ext_ref in zip(ad, ad_ref):
m = ext.wcs.get_transform(ext.wcs.input_frame, "distortion_corrected")
m_ref = ext_ref.wcs.get_transform(ext_ref.wcs.input_frame,
"distortion_corrected")
# The [1] index is because the transform is always
# Mapping | Chebyshev2D & Identity(1)
assert m[1].__class__.__name__ == m_ref[1].__class__.__name__ == "Chebyshev2D"
assert_allclose(m[1].parameters, m_ref[1].parameters,
atol=atol, rtol=rtol)
assert_allclose(m.inverse[1].parameters, m_ref.inverse[1].parameters,
atol=atol, rtol=rtol)
def assert_wavelength_solutions_are_close(ad, ad_ref, atol=0, rtol=1e-7):
"""
Checks if two :class:`~astrodata.AstroData` (or any subclass) have the
wavelength solution.
Parameters
----------
ad : :class:`astrodata.AstroData` or any subclass
AstroData object to be checked.
ad_ref : :class:`astrodata.AstroData` or any subclass
AstroData object used as reference
atol, rtol : float
absolute and relative tolerances
"""
for ext, ext_ref in zip(ad, ad_ref):
wcal = get_named_submodel(ext.wcs.forward_transform, 'WAVE')
wcal_ref = get_named_submodel(ext_ref.wcs.forward_transform, 'WAVE')
assert_allclose(wcal.parameters, wcal_ref.parameters,
atol=atol, rtol=rtol)
| [
"numpy.testing.assert_allclose",
"gempy.library.astromodels.get_named_submodel"
] | [((1051, 1126), 'numpy.testing.assert_allclose', 'assert_allclose', (['m[1].parameters', 'm_ref[1].parameters'], {'atol': 'atol', 'rtol': 'rtol'}), '(m[1].parameters, m_ref[1].parameters, atol=atol, rtol=rtol)\n', (1066, 1126), False, 'from numpy.testing import assert_allclose\n'), ((1159, 1255), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.inverse[1].parameters', 'm_ref.inverse[1].parameters'], {'atol': 'atol', 'rtol': 'rtol'}), '(m.inverse[1].parameters, m_ref.inverse[1].parameters, atol=\n atol, rtol=rtol)\n', (1174, 1255), False, 'from numpy.testing import assert_allclose\n'), ((1813, 1866), 'gempy.library.astromodels.get_named_submodel', 'get_named_submodel', (['ext.wcs.forward_transform', '"""WAVE"""'], {}), "(ext.wcs.forward_transform, 'WAVE')\n", (1831, 1866), False, 'from gempy.library.astromodels import get_named_submodel\n'), ((1886, 1943), 'gempy.library.astromodels.get_named_submodel', 'get_named_submodel', (['ext_ref.wcs.forward_transform', '"""WAVE"""'], {}), "(ext_ref.wcs.forward_transform, 'WAVE')\n", (1904, 1943), False, 'from gempy.library.astromodels import get_named_submodel\n'), ((1952, 2027), 'numpy.testing.assert_allclose', 'assert_allclose', (['wcal.parameters', 'wcal_ref.parameters'], {'atol': 'atol', 'rtol': 'rtol'}), '(wcal.parameters, wcal_ref.parameters, atol=atol, rtol=rtol)\n', (1967, 2027), False, 'from numpy.testing import assert_allclose\n')] |
# -*- coding: utf-8 -*-
"""
A few tool classes to solve an optimisation problem
@author: daniel, <NAME>
"""
import numpy as np
from matplotlib import pyplot as plt
import scipy.optimize as opt
class Solver:
data = []
func = 0
optParams = []
residuals = []
xvals = []
totalResidual = 0.0
def __init__(self, d, f):
self.data = d
self.func = f
self.optParams = []
def err(self, parameters):
out_first = self.data.iat[0, 0]
e = 0.0
for row in self.data.values:
if row[0] == out_first:
x = row[1:-1]
y = self.func(parameters, x)
e += (y - row[0]) * (y - row[0]) * row[2]
return e
def calc_residuals(self):
out_first = self.data.iat[0, 0]
e = -1.0
for row in self.data.values:
if row[0] == out_first:
x = row[1:-1]
y = self.func(self.optParams, x)
if e >= 0.0:
self.residuals.append(e)
self.totalResidual += e
self.xvals.extend(x)
e = 0.0
e += (y - row[0]) * (y - row[0]) * row[2]
def optimize(self, first_guess_params):
self.optParams = opt.fmin(self.err, first_guess_params)
self.calc_residuals()
return self.optParams
# Plots the 2D pdf as a scatter plot where points' colour represents the probability
def plot_pdf(self):
plt.scatter(self.data.iloc[:, 1].values, self.data.iloc[:, 0].values, c=self.data.iloc[:, 2].values)
xlocs, xlabels = plt.xticks()
plt.xticks(xlocs[:-1], ['%.2E' % np.exp(x) for x in xlocs[:-1]])
ylocs, ylabels = plt.yticks()
plt.yticks(ylocs[1:-1], ['%.2E' % np.exp(x) for x in ylocs[1:-1]])
plt.xlabel("Weekly rent")
plt.ylabel("Annual net household income")
plt.tight_layout()
# Plots the fitted function
def plot_func(self, parameters):
xvals = [x / 10.0 for x in range(80, 115)]
yvals = [self.func(parameters, x) for x in xvals]
plt.plot(xvals, yvals)
# Plots the residuals only
def plot_residuals(self):
plt.plot(self.xvals, np.array(self.residuals) * 50.0)
| [
"matplotlib.pyplot.xticks",
"scipy.optimize.fmin",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.tight_layout"
] | [((1272, 1310), 'scipy.optimize.fmin', 'opt.fmin', (['self.err', 'first_guess_params'], {}), '(self.err, first_guess_params)\n', (1280, 1310), True, 'import scipy.optimize as opt\n'), ((1493, 1598), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.data.iloc[:, 1].values', 'self.data.iloc[:, 0].values'], {'c': 'self.data.iloc[:, 2].values'}), '(self.data.iloc[:, 1].values, self.data.iloc[:, 0].values, c=\n self.data.iloc[:, 2].values)\n', (1504, 1598), True, 'from matplotlib import pyplot as plt\n'), ((1619, 1631), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (1629, 1631), True, 'from matplotlib import pyplot as plt\n'), ((1730, 1742), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (1740, 1742), True, 'from matplotlib import pyplot as plt\n'), ((1826, 1851), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Weekly rent"""'], {}), "('Weekly rent')\n", (1836, 1851), True, 'from matplotlib import pyplot as plt\n'), ((1860, 1901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Annual net household income"""'], {}), "('Annual net household income')\n", (1870, 1901), True, 'from matplotlib import pyplot as plt\n'), ((1910, 1928), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1926, 1928), True, 'from matplotlib import pyplot as plt\n'), ((2116, 2138), 'matplotlib.pyplot.plot', 'plt.plot', (['xvals', 'yvals'], {}), '(xvals, yvals)\n', (2124, 2138), True, 'from matplotlib import pyplot as plt\n'), ((2230, 2254), 'numpy.array', 'np.array', (['self.residuals'], {}), '(self.residuals)\n', (2238, 2254), True, 'import numpy as np\n'), ((1673, 1682), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1679, 1682), True, 'import numpy as np\n'), ((1785, 1794), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1791, 1794), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import xarray as xr
import typing as tp
NdType = tp.Union[np.ndarray, pd.DataFrame, xr.DataArray, pd.Series]
NdTupleType = tp.Union[
tp.Tuple[NdType],
tp.Tuple[NdType, NdType],
tp.Tuple[NdType, NdType, NdType],
tp.Tuple[NdType, NdType, NdType, NdType],
]
XR_TIME_DIMENSION = "time"
def nd_universal_adapter(d1_function, nd_args: NdTupleType, plain_args: tuple) -> NdType:
if isinstance(nd_args[0], np.ndarray):
return nd_np_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], pd.DataFrame):
return nd_pd_df_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], pd.Series):
return nd_pd_s_adapter(d1_function, nd_args, plain_args)
if isinstance(nd_args[0], xr.DataArray):
return nd_xr_da_adapter(d1_function, nd_args, plain_args)
raise Exception("unsupported")
def nd_np_adapter(d1_function, nd_args: tp.Tuple[np.ndarray], plain_args: tuple) -> np.ndarray:
shape = nd_args[0].shape
if len(shape) == 1:
args = nd_args + plain_args
return d1_function(*args)
nd_args_2d = tuple(a.reshape(-1, shape[-1]) for a in nd_args)
result2d = np.empty_like(nd_args_2d[0], )
for i in range(nd_args_2d[0].shape[0]):
slices = tuple(a[i] for a in nd_args_2d)
args = slices + plain_args
result2d[i] = d1_function(*args)
return result2d.reshape(shape)
def nd_pd_df_adapter(d1_function, nd_args: tp.Tuple[pd.DataFrame], plain_args: tuple) -> pd.DataFrame:
np_nd_args = tuple(a.to_numpy().transpose() for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.DataFrame(np_result, columns=nd_args[0].columns, index=nd_args[0].index)
def nd_pd_s_adapter(d1_function, nd_args: tp.Tuple[pd.Series], plain_args: tuple) -> pd.Series:
np_nd_args = tuple(a.to_numpy() for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.Series(np_result, nd_args[0].index)
def nd_xr_da_adapter(d1_function, nd_args: tp.Tuple[xr.DataArray], plain_args: tuple) -> xr.DataArray:
origin_dims = nd_args[0].dims
transpose_dims = tuple(i for i in origin_dims if i != XR_TIME_DIMENSION) + (XR_TIME_DIMENSION,)
np_nd_args = tuple(a.transpose(*transpose_dims).values for a in nd_args)
np_result = nd_np_adapter(d1_function, np_nd_args, plain_args)
return xr.DataArray(np_result, dims=transpose_dims, coords=nd_args[0].coords).transpose(*origin_dims)
def nd_to_1d_universal_adapter(np_function, nd_args: NdTupleType, plain_args: tuple) -> NdType:
if isinstance(nd_args[0], np.ndarray):
return nd_to_1d_np_adapter(nd_args, plain_args)
if isinstance(nd_args[0], pd.DataFrame):
return nd_to_1d_pd_df_adapter(np_function, nd_args, plain_args)
if isinstance(nd_args[0], xr.DataArray):
return nd_to_1d_xr_da_adapter(np_function, nd_args, plain_args)
raise Exception("unsupported")
def nd_to_1d_np_adapter(np_function, nd_args: tp.Tuple[np.ndarray], plain_args: tuple) -> np.ndarray:
args = nd_args + plain_args
return np_function(*args)
def nd_to_1d_pd_df_adapter(np_function, nd_args: tp.Tuple[pd.DataFrame], plain_args: tuple) -> pd.Series:
np_nd_args = tuple(a.to_numpy().transpose() for a in nd_args)
np_result = nd_to_1d_np_adapter(np_function, np_nd_args, plain_args)
np_result = np_result.transpose()
return pd.Series(np_result, index=nd_args[0].index)
def nd_to_1d_xr_da_adapter(np_function, nd_args: tp.Tuple[xr.DataArray], plain_args: tuple) -> xr.DataArray:
origin_dims = nd_args[0].dims
transpose_dims = tuple(i for i in origin_dims if i != XR_TIME_DIMENSION) + (XR_TIME_DIMENSION,)
np_nd_args = tuple(a.transpose(*transpose_dims).values for a in nd_args)
np_result = nd_to_1d_np_adapter(np_function, np_nd_args, plain_args)
return xr.DataArray(
np_result,
dims=[XR_TIME_DIMENSION],
coords=[nd_args[0].coords[XR_TIME_DIMENSION]]
)
| [
"pandas.DataFrame",
"pandas.Series",
"numpy.empty_like",
"xarray.DataArray"
] | [((1203, 1231), 'numpy.empty_like', 'np.empty_like', (['nd_args_2d[0]'], {}), '(nd_args_2d[0])\n', (1216, 1231), True, 'import numpy as np\n'), ((1725, 1800), 'pandas.DataFrame', 'pd.DataFrame', (['np_result'], {'columns': 'nd_args[0].columns', 'index': 'nd_args[0].index'}), '(np_result, columns=nd_args[0].columns, index=nd_args[0].index)\n', (1737, 1800), True, 'import pandas as pd\n'), ((2069, 2107), 'pandas.Series', 'pd.Series', (['np_result', 'nd_args[0].index'], {}), '(np_result, nd_args[0].index)\n', (2078, 2107), True, 'import pandas as pd\n'), ((3525, 3569), 'pandas.Series', 'pd.Series', (['np_result'], {'index': 'nd_args[0].index'}), '(np_result, index=nd_args[0].index)\n', (3534, 3569), True, 'import pandas as pd\n'), ((3976, 4077), 'xarray.DataArray', 'xr.DataArray', (['np_result'], {'dims': '[XR_TIME_DIMENSION]', 'coords': '[nd_args[0].coords[XR_TIME_DIMENSION]]'}), '(np_result, dims=[XR_TIME_DIMENSION], coords=[nd_args[0].coords\n [XR_TIME_DIMENSION]])\n', (3988, 4077), True, 'import xarray as xr\n'), ((2502, 2572), 'xarray.DataArray', 'xr.DataArray', (['np_result'], {'dims': 'transpose_dims', 'coords': 'nd_args[0].coords'}), '(np_result, dims=transpose_dims, coords=nd_args[0].coords)\n', (2514, 2572), True, 'import xarray as xr\n')] |
"""Wrapper code for using commonly-used layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from basenji import ops
def exp_function(length, decay_constant=0.05):
"""
"""
X = np.zeros((length, length), dtype=np.float32)
for i in range(length):
X[i, :] = np.exp(-1*decay_constant*np.abs(i-(np.arange(length))))
X -= np.eye(length)
return tf.convert_to_tensor(X)
def inv_exp_function(length, decay=0.01, right=False):
X = np.zeros((length, length), dtype=np.float32)
for i in range(length):
if right:
x = np.exp(-1*decay*np.maximum(i - np.arange(length) + (length/2), 0))
else:
x = np.exp(-1*decay*np.maximum(-i + (np.arange(length) + (length/2)), 0))
X[i,:] = x
X[i,i] = 0.0
return X
def exp_block(seqs_repr, is_training,
decay_constants, name=''):
H = seqs_repr
length = H.get_shape().as_list()[1]
batch_size = tf.shape(H)[0]
seqs_repr_next = H
for decay_constant in decay_constants:
A = exp_function(length, decay_constant)
A = tf.expand_dims(A, axis=0)
C = tf.matmul(tf.tile(A, multiples=[batch_size, 1, 1]), H)
seqs_repr_next = tf.concat([seqs_repr_next, C], axis=2)
tf.logging.info('Exp layer with decay constants {}.'.format(decay_constants))
return seqs_repr_next
def exp_block_variable(seqs_repr, is_training,
decay_variable, name=''):
H = seqs_repr
length = H.get_shape().as_list()[1]
batch_size = tf.shape(H)[0]
contexts = [H]
for i in range(decay_variable):
with tf.variable_scope('learned_exponential{}'.format(i), reuse=tf.AUTO_REUSE):
exp_fn = exp_function(length, 1)
decay_factor = tf.get_variable(f"decay_factor", [1],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(0, 1),
constraint=lambda x: tf.clip_by_value(x, 0, np.infty))
decay_factor = tf.Print(decay_factor, [decay_factor])
A = tf.pow(exp_fn, decay_factor)
A = tf.nn.softmax(A, axis=2)
A = tf.expand_dims(A, axis=0)
C = tf.matmul(tf.tile(A, multiples=[batch_size, 1, 1]), H)
contexts.append(C)
seqs_repr_next = tf.concat(contexts, axis=2)
tf.logging.info(f'Exp layer with {decay_variable} decay variables.')
return seqs_repr_next
def multi_head_attention_block(seqs_repr, is_training, num_heads, num_units, n_query_layers,
decay_variable, decay_constant,
dropout, query_dropout,
l2_scale, name=''):
contexts = [seqs_repr]
for i in range(num_heads):
with tf.variable_scope('multi_attention{}'.format(i), reuse=tf.AUTO_REUSE):
context = attention_block(seqs_repr=seqs_repr,
is_training=is_training,
n_query_layers=n_query_layers,
decay_variable=decay_variable,
decay_constant=decay_constant,
dropout=dropout,
query_dropout=query_dropout,
l2_scale=l2_scale,
dense=False)
contexts.append(context)
tf.logging.info("Adding attention head.")
seqs_repr = tf.concat(contexts, axis=2)
tf.logging.info("Concatentating contexts.")
#with tf.variable_scope('multi_attention_final', reuse=tf.AUTO_REUSE):
#seqs_repr = tf.layers.dense(inputs=seqs_repr,
# units=num_units,
# activation=tf.nn.relu,
# kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_in'),
# kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_scale))
#seqs_repr = tf.layers.conv1d(
# seqs_repr,
# filters=2048,
# kernel_size=[1],
# strides=1,
# padding='same',
# use_bias=True,
# activation=tf.nn.relu,
# kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_in'),
# kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_scale))
#tf.logging.info("Adding multi-head final dense.")
return seqs_repr
def dense_attention_block(seqs_repr, is_training, num_layers,
decay_variable, decay_constant,
units, dropout, query_dropout,
l2_scale, name=''):
"""
"""
for i in range(num_layers):
with tf.variable_scope('dense_attention{}'.format(i), reuse=tf.AUTO_REUSE):
#seqs_repr = tf.Print(seqs_repr, [tf.shape(seqs_repr)], "{}".format(i))
seqs_repr = attention_block(seqs_repr,
is_training,
decay_variable,
decay_constant,
dropout,
query_dropout,
l2_scale)
layer_reprs.append(seqs_repr)
return seqs_repr
def attention_block(seqs_repr, is_training, n_query_layers,
decay_variable, decay_constant,
dropout, query_dropout,
l2_scale, name='', dense=True):
"""
Args:
seqs_repr: [batchsize, length, num_channels] input sequence
is_training: whether is a training graph or not
batch_norm: whether to use batchnorm
bn_momentum: batch norm momentum
batch_renorm: whether to use batch renormalization in batchnorm
l2_scale: L2 weight regularization scale
name: optional name for the block
"""
H = seqs_repr
length = H.get_shape().as_list()[1]
num_channels = H.get_shape().as_list()[2]
Q = H
for i in range(n_query_layers):
Q = tf.layers.dense(Q, num_channels,
activation=tf.nn.tanh,
use_bias=True,
kernel_initializer= tf.variance_scaling_initializer(scale=2.0, mode='fan_in'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None)
if query_dropout > 0:
Q = tf.layers.dropout(inputs=Q,
rate=query_dropout,
training=is_training)
tf.logging.info('Query Dropout w/ probability %.3f' % query_dropout)
A = tf.matmul(Q, H, transpose_b=True)
tf.logging.info("Adding A ReLU")
A = tf.nn.relu(A)
if decay_variable:
#tf.logging.info("Adding decay variable.")
#exp_fn = exp_function(length, 1)
#decay_factor = tf.get_variable("decay_factor", [1],
# dtype=tf.float32,
# initializer=tf.random_uniform_initializer(0, 1),
# constraint=lambda x: tf.clip_by_value(x, 0, np.infty))
#exp_fn = tf.pow(exp_fn, decay_factor)
#A = tf.multiply(A, exp_fn)
tf.logging.info("Adding flex focus.")
left_exp_fn = inv_exp_function(length, right=False)
right_exp_fn = inv_exp_function(length, right=True)
left_decay_factor = tf.get_variable("left_decay_factor", [1],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(0, 1),
constraint=lambda x: tf.clip_by_value(x, 0, np.infty))
right_decay_factor = tf.get_variable("right_decay_factor", [1],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(0, 1),
constraint=lambda x: tf.clip_by_value(x, 0, np.infty))
left_exp_fn = tf.pow(left_exp_fn, left_decay_factor)
left_exp_fn = tf.Print(left_exp_fn, [left_decay_factor, right_decay_factor], "Left_exp_fn, right_exp_fn:")
right_exp_fn = tf.pow(right_exp_fn, right_decay_factor)
exp_fn = tf.math.minimum(left_exp_fn, right_exp_fn)
A = tf.multiply(A, exp_fn)
elif decay_constant > 0:
tf.logging.info("Adding decay constant of {}".format(decay_constant))
exp_fn = exp_function(length, decay_constant)
A = tf.multiply(A, exp_fn)
A = tf.nn.softmax(A, axis=2)
C = tf.matmul(A, H)
if dense:
seqs_repr_next = tf.concat([H, C], axis=2)
else:
seqs_repr_next = C
if dropout > 0:
seqs_repr_next = tf.layers.dropout(
inputs=seqs_repr_next,
rate=dropout,
training=is_training)
tf.logging.info('Dropout w/ probability %.3f' % dropout)
tf.logging.info('Attention Layer.')
return seqs_repr_next
def conv_block(seqs_repr, conv_params, is_training,
batch_norm, batch_norm_momentum,
batch_renorm, batch_renorm_momentum,
l2_scale, layer_reprs, name=''):
"""Construct a single (dilated) CNN block.
Args:
seqs_repr: [batchsize, length, num_channels] input sequence
conv_params: convolution parameters
is_training: whether is a training graph or not
batch_norm: whether to use batchnorm
bn_momentum: batch norm momentum
batch_renorm: whether to use batch renormalization in batchnorm
l2_scale: L2 weight regularization scale
name: optional name for the block
Returns:
updated representation for the sequence
"""
# ReLU
seqs_repr_next = tf.nn.relu(seqs_repr)
tf.logging.info('ReLU')
# Convolution
seqs_repr_next = tf.layers.conv1d(
seqs_repr_next,
filters=conv_params.filters,
kernel_size=[conv_params.filter_size],
strides=conv_params.stride,
padding='same',
dilation_rate=[conv_params.dilation],
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_in'),
kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_scale))
tf.logging.info('Convolution w/ %d %dx%d filters strided %d, dilated %d' %
(conv_params.filters, seqs_repr.shape[2],
conv_params.filter_size, conv_params.stride,
conv_params.dilation))
# Batch norm
if batch_norm:
seqs_repr_next = tf.layers.batch_normalization(
seqs_repr_next,
momentum=batch_norm_momentum,
training=is_training,
renorm=batch_renorm,
renorm_clipping={'rmin': 1./4, 'rmax':4., 'dmax':6.},
renorm_momentum=batch_renorm_momentum,
fused=True)
tf.logging.info('Batch normalization')
# Dropout
if conv_params.dropout > 0:
seqs_repr_next = tf.layers.dropout(
inputs=seqs_repr_next,
rate=conv_params.dropout,
training=is_training)
tf.logging.info('Dropout w/ probability %.3f' % conv_params.dropout)
# Skip
if conv_params.skip_layers > 0:
if conv_params.skip_layers > len(layer_reprs):
raise ValueError('Skip connection reaches back too far.')
# Add
seqs_repr_next += layer_reprs[-conv_params.skip_layers]
# Dense
elif conv_params.dense:
seqs_repr_next = tf.concat(values=[seqs_repr, seqs_repr_next], axis=2)
# Pool
if conv_params.pool > 1:
seqs_repr_next = tf.layers.max_pooling1d(
inputs=seqs_repr_next,
pool_size=conv_params.pool,
strides=conv_params.pool,
padding='same')
tf.logging.info('Max pool %d' % conv_params.pool)
return seqs_repr_next
| [
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.multiply",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"numpy.arange",
"tensorflow.math.minimum",
"tensorflow.Print",
"tensorflow.pow",
"tensorflow.concat",
"tensorflow.matmul",
"tenso... | [((296, 340), 'numpy.zeros', 'np.zeros', (['(length, length)'], {'dtype': 'np.float32'}), '((length, length), dtype=np.float32)\n', (304, 340), True, 'import numpy as np\n'), ((446, 460), 'numpy.eye', 'np.eye', (['length'], {}), '(length)\n', (452, 460), True, 'import numpy as np\n'), ((470, 493), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['X'], {}), '(X)\n', (490, 493), True, 'import tensorflow as tf\n'), ((557, 601), 'numpy.zeros', 'np.zeros', (['(length, length)'], {'dtype': 'np.float32'}), '((length, length), dtype=np.float32)\n', (565, 601), True, 'import numpy as np\n'), ((2305, 2332), 'tensorflow.concat', 'tf.concat', (['contexts'], {'axis': '(2)'}), '(contexts, axis=2)\n', (2314, 2332), True, 'import tensorflow as tf\n'), ((2336, 2404), 'tensorflow.logging.info', 'tf.logging.info', (['f"""Exp layer with {decay_variable} decay variables."""'], {}), "(f'Exp layer with {decay_variable} decay variables.')\n", (2351, 2404), True, 'import tensorflow as tf\n'), ((3486, 3513), 'tensorflow.concat', 'tf.concat', (['contexts'], {'axis': '(2)'}), '(contexts, axis=2)\n', (3495, 3513), True, 'import tensorflow as tf\n'), ((3516, 3559), 'tensorflow.logging.info', 'tf.logging.info', (['"""Concatentating contexts."""'], {}), "('Concatentating contexts.')\n", (3531, 3559), True, 'import tensorflow as tf\n'), ((6693, 6726), 'tensorflow.matmul', 'tf.matmul', (['Q', 'H'], {'transpose_b': '(True)'}), '(Q, H, transpose_b=True)\n', (6702, 6726), True, 'import tensorflow as tf\n'), ((6729, 6761), 'tensorflow.logging.info', 'tf.logging.info', (['"""Adding A ReLU"""'], {}), "('Adding A ReLU')\n", (6744, 6761), True, 'import tensorflow as tf\n'), ((6768, 6781), 'tensorflow.nn.relu', 'tf.nn.relu', (['A'], {}), '(A)\n', (6778, 6781), True, 'import tensorflow as tf\n'), ((8509, 8533), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['A'], {'axis': '(2)'}), '(A, axis=2)\n', (8522, 8533), True, 'import tensorflow as tf\n'), ((8540, 8555), 'tensorflow.matmul', 'tf.matmul', (['A', 'H'], {}), '(A, H)\n', (8549, 8555), True, 'import tensorflow as tf\n'), ((8885, 8920), 'tensorflow.logging.info', 'tf.logging.info', (['"""Attention Layer."""'], {}), "('Attention Layer.')\n", (8900, 8920), True, 'import tensorflow as tf\n'), ((9676, 9697), 'tensorflow.nn.relu', 'tf.nn.relu', (['seqs_repr'], {}), '(seqs_repr)\n', (9686, 9697), True, 'import tensorflow as tf\n'), ((9700, 9723), 'tensorflow.logging.info', 'tf.logging.info', (['"""ReLU"""'], {}), "('ReLU')\n", (9715, 9723), True, 'import tensorflow as tf\n'), ((10157, 10349), 'tensorflow.logging.info', 'tf.logging.info', (["('Convolution w/ %d %dx%d filters strided %d, dilated %d' % (conv_params.\n filters, seqs_repr.shape[2], conv_params.filter_size, conv_params.\n stride, conv_params.dilation))"], {}), "('Convolution w/ %d %dx%d filters strided %d, dilated %d' %\n (conv_params.filters, seqs_repr.shape[2], conv_params.filter_size,\n conv_params.stride, conv_params.dilation))\n", (10172, 10349), True, 'import tensorflow as tf\n'), ((1001, 1012), 'tensorflow.shape', 'tf.shape', (['H'], {}), '(H)\n', (1009, 1012), True, 'import tensorflow as tf\n'), ((1131, 1156), 'tensorflow.expand_dims', 'tf.expand_dims', (['A'], {'axis': '(0)'}), '(A, axis=0)\n', (1145, 1156), True, 'import tensorflow as tf\n'), ((1241, 1279), 'tensorflow.concat', 'tf.concat', (['[seqs_repr_next, C]'], {'axis': '(2)'}), '([seqs_repr_next, C], axis=2)\n', (1250, 1279), True, 'import tensorflow as tf\n'), ((1543, 1554), 'tensorflow.shape', 'tf.shape', (['H'], {}), '(H)\n', (1551, 1554), True, 'import tensorflow as tf\n'), ((6490, 6559), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'Q', 'rate': 'query_dropout', 'training': 'is_training'}), '(inputs=Q, rate=query_dropout, training=is_training)\n', (6507, 6559), True, 'import tensorflow as tf\n'), ((6617, 6685), 'tensorflow.logging.info', 'tf.logging.info', (["('Query Dropout w/ probability %.3f' % query_dropout)"], {}), "('Query Dropout w/ probability %.3f' % query_dropout)\n", (6632, 6685), True, 'import tensorflow as tf\n'), ((7259, 7296), 'tensorflow.logging.info', 'tf.logging.info', (['"""Adding flex focus."""'], {}), "('Adding flex focus.')\n", (7274, 7296), True, 'import tensorflow as tf\n'), ((8019, 8057), 'tensorflow.pow', 'tf.pow', (['left_exp_fn', 'left_decay_factor'], {}), '(left_exp_fn, left_decay_factor)\n', (8025, 8057), True, 'import tensorflow as tf\n'), ((8076, 8172), 'tensorflow.Print', 'tf.Print', (['left_exp_fn', '[left_decay_factor, right_decay_factor]', '"""Left_exp_fn, right_exp_fn:"""'], {}), "(left_exp_fn, [left_decay_factor, right_decay_factor],\n 'Left_exp_fn, right_exp_fn:')\n", (8084, 8172), True, 'import tensorflow as tf\n'), ((8188, 8228), 'tensorflow.pow', 'tf.pow', (['right_exp_fn', 'right_decay_factor'], {}), '(right_exp_fn, right_decay_factor)\n', (8194, 8228), True, 'import tensorflow as tf\n'), ((8243, 8285), 'tensorflow.math.minimum', 'tf.math.minimum', (['left_exp_fn', 'right_exp_fn'], {}), '(left_exp_fn, right_exp_fn)\n', (8258, 8285), True, 'import tensorflow as tf\n'), ((8294, 8316), 'tensorflow.multiply', 'tf.multiply', (['A', 'exp_fn'], {}), '(A, exp_fn)\n', (8305, 8316), True, 'import tensorflow as tf\n'), ((8590, 8615), 'tensorflow.concat', 'tf.concat', (['[H, C]'], {'axis': '(2)'}), '([H, C], axis=2)\n', (8599, 8615), True, 'import tensorflow as tf\n'), ((8689, 8765), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'seqs_repr_next', 'rate': 'dropout', 'training': 'is_training'}), '(inputs=seqs_repr_next, rate=dropout, training=is_training)\n', (8706, 8765), True, 'import tensorflow as tf\n'), ((8825, 8881), 'tensorflow.logging.info', 'tf.logging.info', (["('Dropout w/ probability %.3f' % dropout)"], {}), "('Dropout w/ probability %.3f' % dropout)\n", (8840, 8881), True, 'import tensorflow as tf\n'), ((10452, 10694), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['seqs_repr_next'], {'momentum': 'batch_norm_momentum', 'training': 'is_training', 'renorm': 'batch_renorm', 'renorm_clipping': "{'rmin': 1.0 / 4, 'rmax': 4.0, 'dmax': 6.0}", 'renorm_momentum': 'batch_renorm_momentum', 'fused': '(True)'}), "(seqs_repr_next, momentum=batch_norm_momentum,\n training=is_training, renorm=batch_renorm, renorm_clipping={'rmin': 1.0 /\n 4, 'rmax': 4.0, 'dmax': 6.0}, renorm_momentum=batch_renorm_momentum,\n fused=True)\n", (10481, 10694), True, 'import tensorflow as tf\n'), ((10737, 10775), 'tensorflow.logging.info', 'tf.logging.info', (['"""Batch normalization"""'], {}), "('Batch normalization')\n", (10752, 10775), True, 'import tensorflow as tf\n'), ((10840, 10933), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'seqs_repr_next', 'rate': 'conv_params.dropout', 'training': 'is_training'}), '(inputs=seqs_repr_next, rate=conv_params.dropout, training\n =is_training)\n', (10857, 10933), True, 'import tensorflow as tf\n'), ((10958, 11026), 'tensorflow.logging.info', 'tf.logging.info', (["('Dropout w/ probability %.3f' % conv_params.dropout)"], {}), "('Dropout w/ probability %.3f' % conv_params.dropout)\n", (10973, 11026), True, 'import tensorflow as tf\n'), ((11427, 11547), 'tensorflow.layers.max_pooling1d', 'tf.layers.max_pooling1d', ([], {'inputs': 'seqs_repr_next', 'pool_size': 'conv_params.pool', 'strides': 'conv_params.pool', 'padding': '"""same"""'}), "(inputs=seqs_repr_next, pool_size=conv_params.pool,\n strides=conv_params.pool, padding='same')\n", (11450, 11547), True, 'import tensorflow as tf\n'), ((11581, 11630), 'tensorflow.logging.info', 'tf.logging.info', (["('Max pool %d' % conv_params.pool)"], {}), "('Max pool %d' % conv_params.pool)\n", (11596, 11630), True, 'import tensorflow as tf\n'), ((1175, 1215), 'tensorflow.tile', 'tf.tile', (['A'], {'multiples': '[batch_size, 1, 1]'}), '(A, multiples=[batch_size, 1, 1])\n', (1182, 1215), True, 'import tensorflow as tf\n'), ((2047, 2085), 'tensorflow.Print', 'tf.Print', (['decay_factor', '[decay_factor]'], {}), '(decay_factor, [decay_factor])\n', (2055, 2085), True, 'import tensorflow as tf\n'), ((2096, 2124), 'tensorflow.pow', 'tf.pow', (['exp_fn', 'decay_factor'], {}), '(exp_fn, decay_factor)\n', (2102, 2124), True, 'import tensorflow as tf\n'), ((2135, 2159), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['A'], {'axis': '(2)'}), '(A, axis=2)\n', (2148, 2159), True, 'import tensorflow as tf\n'), ((2170, 2195), 'tensorflow.expand_dims', 'tf.expand_dims', (['A'], {'axis': '(0)'}), '(A, axis=0)\n', (2184, 2195), True, 'import tensorflow as tf\n'), ((3430, 3471), 'tensorflow.logging.info', 'tf.logging.info', (['"""Adding attention head."""'], {}), "('Adding attention head.')\n", (3445, 3471), True, 'import tensorflow as tf\n'), ((8477, 8499), 'tensorflow.multiply', 'tf.multiply', (['A', 'exp_fn'], {}), '(A, exp_fn)\n', (8488, 8499), True, 'import tensorflow as tf\n'), ((10027, 10084), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""'}), "(scale=2.0, mode='fan_in')\n", (10058, 10084), True, 'import tensorflow as tf\n'), ((10111, 10153), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['l2_scale'], {}), '(l2_scale)\n', (10143, 10153), True, 'import tensorflow as tf\n'), ((11315, 11368), 'tensorflow.concat', 'tf.concat', ([], {'values': '[seqs_repr, seqs_repr_next]', 'axis': '(2)'}), '(values=[seqs_repr, seqs_repr_next], axis=2)\n', (11324, 11368), True, 'import tensorflow as tf\n'), ((2216, 2256), 'tensorflow.tile', 'tf.tile', (['A'], {'multiples': '[batch_size, 1, 1]'}), '(A, multiples=[batch_size, 1, 1])\n', (2223, 2256), True, 'import tensorflow as tf\n'), ((6282, 6339), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""'}), "(scale=2.0, mode='fan_in')\n", (6313, 6339), True, 'import tensorflow as tf\n'), ((6383, 6405), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6403, 6405), True, 'import tensorflow as tf\n'), ((7577, 7612), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(0)', '(1)'], {}), '(0, 1)\n', (7606, 7612), True, 'import tensorflow as tf\n'), ((7874, 7909), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(0)', '(1)'], {}), '(0, 1)\n', (7903, 7909), True, 'import tensorflow as tf\n'), ((1897, 1932), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(0)', '(1)'], {}), '(0, 1)\n', (1926, 1932), True, 'import tensorflow as tf\n'), ((7670, 7702), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0)', 'np.infty'], {}), '(x, 0, np.infty)\n', (7686, 7702), True, 'import tensorflow as tf\n'), ((7967, 7999), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0)', 'np.infty'], {}), '(x, 0, np.infty)\n', (7983, 7999), True, 'import tensorflow as tf\n'), ((418, 435), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (427, 435), True, 'import numpy as np\n'), ((1992, 2024), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0)', 'np.infty'], {}), '(x, 0, np.infty)\n', (2008, 2024), True, 'import tensorflow as tf\n'), ((683, 700), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (692, 700), True, 'import numpy as np\n'), ((772, 789), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (781, 789), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
'''
Epidemiological models used for fitting/analyzing data. Derived classes
have increasing ability to handle variable parameters which may be optimized.
At this point, most capable is class ̀SEIRYOpt3`.
'''
__author__ = '<NAME>'
__license__ = 'MIT License'
__version__ = '1.1'
import math
import numpy as NP
import numpy.random as RAND
import scipy.stats as STATS
import pandas as PAN
from scipy import sparse
from scipy import linalg
from scipy import integrate
from lib.basicUtils import toDict
class SEIRYModel(object):
def __init__(self, t0 = 0,tF = 100, tDelta = 1):
self.t0 = t0
self.tF = tF
self.tDelta = tDelta
self.tSteps = NP.arange(t0, tF, tDelta)
def setInitial(self,
i0 = 389/14.0e6, # infected Wuhan in paper
e0 = 318/14.0e6, # exposed
r0 = 0, # recovered
q0 = 0, # quarantined
d0 = 0, # dead
p0 = 0, # insusceptibles
s0 = None # susceptibles defaults to :1 -((i0 + e0 + r0)
):
if s0 is None:
s0 = 1 - (i0 + e0 + r0) # susceptibles
self.y0 = (s0 , e0, i0, q0, r0, d0, p0 )
print(f"In setInitial: initial values for ODE")
for (x,l) in ((i0,"i0: infected"), (e0,"e0: exposed"), (r0, "r0: recovered"),
(q0,"q0: quarantined"), (d0,"d0: dead"),
(p0,"p0: insusceptibles"), (s0, "s0: susceptibles")):
print (f"\t{l}\t->\t{x}")
def lambFn(t): # estimated dates for change in policy/efficiency
if t <20:
return 0.02
else:
return 0.04
def kappaFn(t): # estimated dates for change in policy/efficiency
if t<30:
return 3.0e-2
else:
return 1.0e-2
def setParms(self,
beta = 1, # rate of exposition (in prop of susceptibles
#meeting infected)
alpha = 0.085, # rate of susceptibles becoming insusceptible
delta = 1/7.4, # inverse average quarantine time
lambFn = lambFn, # recovery/cure rate
kappaFn= kappaFn, # mortality rate
gamma = 0.5 # inverse average latent time
):
""" Set internal parameters, will provide defaults for all arguments.
To selectively modify parameters, see method `adjustParms`
"""
self.beta = beta
self.alpha = alpha
self.delta = delta
self.lamb = lambFn
self.kappa = kappaFn
self.gamma = gamma
def adjustParms(self, kwdParmDict):
""" Set internal parameters, only concern parameters appearing in arguments
appearing in `kwdParmDict`
"""
for k in kwdParmDict:
if not hasattr(self,k):
raise KeyError(f"Bad parms={k}")
self.__setattr__(k,kwdParmDict[k])
def showParms(self):
print("-- "*3 + "parms")
for p in ("beta","alpha", "delta", "lamb", "kappa", "gamma"):
print(f"{p}\t->\t{self.__getattr__(p)}")
print("-- "*6)
def __getattr__(self,p):
d = self.__dict__
if p not in d:
raise KeyError(f"Bad parms={p}")
return d[p]
def setattr(self,p,val):
d = self.__dict__
if p not in d:
raise KeyError(f"Bad parms={p}")
d[p]=val
def FTY(self, y, t):
""" The RHS in the ODE, see the paper
"Epidemic analysis of COVID-19 in China by dynamical modeling"
(arxiv:2002.06563V1).
"""
s, e, i, q, r, d, p = y
dydt = (- self.beta * s * i - self.alpha * s, # s
self.beta * s * i - self.gamma * e , # e
self.gamma * e - self.delta*i, # i
self.delta * i - ( self.lamb(t) + self.kappa(t) ) * q, # q
self.lamb(t) * q, # r
self.kappa(t) * q, # d
self.alpha * s # p
)
return dydt
def solve(self):
self.solY, self.psolDict = integrate.odeint( self.FTY, self.y0, self.tSteps,
full_output = True)
self.solDF=PAN.DataFrame(self.solY)
self.solDF.columns = ("susceptible", "exposed","infected", "quarantined",
"recovered", "dead", "insusceptible")
self.solDF.index = self.tSteps
def error(self, refTSteps, refY, columns):
# for now we suppose that refSol is computed with the same time steps
# than the problem solution, and that the solution is available.
# we also assume that these are vector in the reals ($R^n$)
return ((refY - self.solDF.loc[:,columns])**2).sum()
class SEIRYOpt1(SEIRYModel):
"""
This is a first attempt at fitting, with 1 parameter (beta), based on the
data concerning recovered (only)
"""
def __init__(self, beta, refT, refY):
""" (Warning : quite specific to single parm beta)
Derived class used for parameter identification, has reference data
for fitting in parameters
- refT
- refY
For now, all time scales have better be the same, no interpolation
when computing the error.....
"""
SEIRYModel.__init__(self)
self. setInitial()
self.setParms(beta=beta) #this allows for non default beta ?
self.refT = refT
self.refY = refY
def eval(self, beta):
self.adjustParms(toDict( beta = beta )) # beta becomes an optim variable here!
self.solve()
return self.error(self.refT, self.refY, "recovered")
class SEIRYOpt2(SEIRYModel):
"""
This is a first attempt at fitting, with multiple parameters (beta, alpha, delta,
gamma) based on the data concerning recovered (only)
This has reference data for fitting in parameters
- refT
- refY
For now, all time scales have better be the same, no interpolation
when computing the error.....
"""
def __init__(self, parmDict,refT, refY):
SEIRYModel.__init__(self)
self. setInitial()
self.setParms()
self.adjustParms(parmDict)
self.refT = refT
self.refY = refY
def eval(self, pV):
if pV is not None:
self.adjustParms(toDict(beta=pV[0], alpha=pV[1], delta=pV[2], gamma=pV[3]) )
self.solve()
return self.error(self.refT, self.refY, "recovered")
class SEIRYOpt3(SEIRYModel):
"""
This fits, with multiple parameters (beta, alpha, delta, gamma) based on the
data concerning multiple elements of the result vector.
This has reference data for fitting in parameters
- refT
- refY : dataFrame where columns are the reference vectors,
column names are also used for identification in result
For now, all time scales have better be the same, no interpolation
when computing the error.....
This class makes the assumption that the column names of the solution
components are identical in refY and in the solution produced and
found in self.solDF
"""
def __init__(self, parmDict,refT, refY,initial={}, **kwdParms):
SEIRYModel.__init__(self,**kwdParms)
if not isinstance(refY, PAN.DataFrame):
sys.stderr.write(f"Got refY with type {type(refY)}\n")
raise RuntimeError(f"parameter refY is expected to be a DataFrame")
self. setInitial(**initial)
self.setParms()
self.adjustParms(parmDict)
self.refT = refT
self.refY = refY
def error(self, refTSteps, refY):
# for now we suppose that refSol is computed with the same time steps
# than the problem solution, and that the solution is available.
# we also assume that these are vector in the reals ($R^n$)
accum = 0.0
if isinstance( self.solDF, PAN.DataFrame):
for col in refY.columns:
accum += ((refY.loc[:,col] - self.solDF.loc[:,col])**2).sum()
else:
for icol in range(0, refY.shape[1]):
accum += ((refY.iloc[:,icol] - self.solDF.loc[:,icol])**2).sum()
return accum
def eval(self, pV):
if pV is not None:
self.adjustParms(toDict( beta=pV[0], alpha=pV[1], delta=pV[2], gamma=pV[3] ) )
self.solve()
return self.error(self.refT, self.refY)
| [
"scipy.integrate.odeint",
"lib.basicUtils.toDict",
"pandas.DataFrame",
"numpy.arange"
] | [((743, 768), 'numpy.arange', 'NP.arange', (['t0', 'tF', 'tDelta'], {}), '(t0, tF, tDelta)\n', (752, 768), True, 'import numpy as NP\n'), ((4626, 4692), 'scipy.integrate.odeint', 'integrate.odeint', (['self.FTY', 'self.y0', 'self.tSteps'], {'full_output': '(True)'}), '(self.FTY, self.y0, self.tSteps, full_output=True)\n', (4642, 4692), False, 'from scipy import integrate\n'), ((4768, 4792), 'pandas.DataFrame', 'PAN.DataFrame', (['self.solY'], {}), '(self.solY)\n', (4781, 4792), True, 'import pandas as PAN\n'), ((6119, 6136), 'lib.basicUtils.toDict', 'toDict', ([], {'beta': 'beta'}), '(beta=beta)\n', (6125, 6136), False, 'from lib.basicUtils import toDict\n'), ((6969, 7026), 'lib.basicUtils.toDict', 'toDict', ([], {'beta': 'pV[0]', 'alpha': 'pV[1]', 'delta': 'pV[2]', 'gamma': 'pV[3]'}), '(beta=pV[0], alpha=pV[1], delta=pV[2], gamma=pV[3])\n', (6975, 7026), False, 'from lib.basicUtils import toDict\n'), ((9002, 9059), 'lib.basicUtils.toDict', 'toDict', ([], {'beta': 'pV[0]', 'alpha': 'pV[1]', 'delta': 'pV[2]', 'gamma': 'pV[3]'}), '(beta=pV[0], alpha=pV[1], delta=pV[2], gamma=pV[3])\n', (9008, 9059), False, 'from lib.basicUtils import toDict\n')] |
import os
import tempfile
import unittest
import numpy as np
from keras_pos_embd.backend import keras
from keras_pos_embd import TrigPosEmbedding
class TestSinCosPosEmbd(unittest.TestCase):
def test_invalid_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=5,
)
def test_missing_output_dim(self):
with self.assertRaises(NotImplementedError):
TrigPosEmbedding(
mode=TrigPosEmbedding.MODE_EXPAND,
)
def test_brute(self):
seq_len = np.random.randint(1, 10)
embd_dim = np.random.randint(1, 20) * 2
indices = np.expand_dims(np.arange(seq_len), 0)
model = keras.models.Sequential()
model.add(TrigPosEmbedding(
input_shape=(seq_len,),
mode=TrigPosEmbedding.MODE_EXPAND,
output_dim=embd_dim,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.summary()
predicts = model.predict(indices)[0].tolist()
for i in range(seq_len):
for j in range(embd_dim):
actual = predicts[i][j]
if j % 2 == 0:
expect = np.sin(i / 10000.0 ** (float(j) / embd_dim))
else:
expect = np.cos(i / 10000.0 ** ((j - 1.0) / embd_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embd_dim, i, j, expect, actual))
def test_add(self):
seq_len = np.random.randint(1, 10)
embed_dim = np.random.randint(1, 20) * 2
inputs = np.ones((1, seq_len, embed_dim))
model = keras.models.Sequential()
model.add(TrigPosEmbedding(
input_shape=(seq_len, embed_dim),
mode=TrigPosEmbedding.MODE_ADD,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.summary()
predicts = model.predict(inputs)[0].tolist()
for i in range(seq_len):
for j in range(embed_dim):
actual = predicts[i][j]
if j % 2 == 0:
expect = 1.0 + np.sin(i / 10000.0 ** (float(j) / embed_dim))
else:
expect = 1.0 + np.cos(i / 10000.0 ** ((j - 1.0) / embed_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embed_dim, i, j, expect, actual))
def test_concat(self):
seq_len = np.random.randint(1, 10)
feature_dim = np.random.randint(1, 20)
embed_dim = np.random.randint(1, 20) * 2
inputs = np.ones((1, seq_len, feature_dim))
model = keras.models.Sequential()
model.add(TrigPosEmbedding(
input_shape=(seq_len, feature_dim),
output_dim=embed_dim,
mode=TrigPosEmbedding.MODE_CONCAT,
name='Pos-Embd',
))
model.compile('adam', 'mse')
model_path = os.path.join(tempfile.gettempdir(), 'test_trig_pos_embd_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={'TrigPosEmbedding': TrigPosEmbedding})
model.summary()
predicts = model.predict(inputs)[0].tolist()
for i in range(seq_len):
for j in range(embed_dim):
actual = predicts[i][feature_dim + j]
if j % 2 == 0:
expect = np.sin(i / 10000.0 ** (float(j) / embed_dim))
else:
expect = np.cos(i / 10000.0 ** ((j - 1.0) / embed_dim))
self.assertAlmostEqual(expect, actual, places=6, msg=(embed_dim, i, j, expect, actual))
| [
"numpy.ones",
"numpy.random.random",
"keras_pos_embd.TrigPosEmbedding",
"keras_pos_embd.backend.keras.models.load_model",
"numpy.random.randint",
"tempfile.gettempdir",
"numpy.cos",
"keras_pos_embd.backend.keras.models.Sequential",
"numpy.arange"
] | [((645, 669), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (662, 669), True, 'import numpy as np\n'), ((790, 815), 'keras_pos_embd.backend.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (813, 815), False, 'from keras_pos_embd.backend import keras\n'), ((1198, 1292), 'keras_pos_embd.backend.keras.models.load_model', 'keras.models.load_model', (['model_path'], {'custom_objects': "{'TrigPosEmbedding': TrigPosEmbedding}"}), "(model_path, custom_objects={'TrigPosEmbedding':\n TrigPosEmbedding})\n", (1221, 1292), False, 'from keras_pos_embd.backend import keras\n'), ((1826, 1850), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1843, 1850), True, 'import numpy as np\n'), ((1917, 1949), 'numpy.ones', 'np.ones', (['(1, seq_len, embed_dim)'], {}), '((1, seq_len, embed_dim))\n', (1924, 1949), True, 'import numpy as np\n'), ((1966, 1991), 'keras_pos_embd.backend.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (1989, 1991), False, 'from keras_pos_embd.backend import keras\n'), ((2348, 2442), 'keras_pos_embd.backend.keras.models.load_model', 'keras.models.load_model', (['model_path'], {'custom_objects': "{'TrigPosEmbedding': TrigPosEmbedding}"}), "(model_path, custom_objects={'TrigPosEmbedding':\n TrigPosEmbedding})\n", (2371, 2442), False, 'from keras_pos_embd.backend import keras\n'), ((2994, 3018), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (3011, 3018), True, 'import numpy as np\n'), ((3041, 3065), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (3058, 3065), True, 'import numpy as np\n'), ((3132, 3166), 'numpy.ones', 'np.ones', (['(1, seq_len, feature_dim)'], {}), '((1, seq_len, feature_dim))\n', (3139, 3166), True, 'import numpy as np\n'), ((3183, 3208), 'keras_pos_embd.backend.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (3206, 3208), False, 'from keras_pos_embd.backend import keras\n'), ((3604, 3698), 'keras_pos_embd.backend.keras.models.load_model', 'keras.models.load_model', (['model_path'], {'custom_objects': "{'TrigPosEmbedding': TrigPosEmbedding}"}), "(model_path, custom_objects={'TrigPosEmbedding':\n TrigPosEmbedding})\n", (3627, 3698), False, 'from keras_pos_embd.backend import keras\n'), ((299, 364), 'keras_pos_embd.TrigPosEmbedding', 'TrigPosEmbedding', ([], {'mode': 'TrigPosEmbedding.MODE_EXPAND', 'output_dim': '(5)'}), '(mode=TrigPosEmbedding.MODE_EXPAND, output_dim=5)\n', (315, 364), False, 'from keras_pos_embd import TrigPosEmbedding\n'), ((517, 568), 'keras_pos_embd.TrigPosEmbedding', 'TrigPosEmbedding', ([], {'mode': 'TrigPosEmbedding.MODE_EXPAND'}), '(mode=TrigPosEmbedding.MODE_EXPAND)\n', (533, 568), False, 'from keras_pos_embd import TrigPosEmbedding\n'), ((689, 713), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (706, 713), True, 'import numpy as np\n'), ((751, 769), 'numpy.arange', 'np.arange', (['seq_len'], {}), '(seq_len)\n', (760, 769), True, 'import numpy as np\n'), ((834, 951), 'keras_pos_embd.TrigPosEmbedding', 'TrigPosEmbedding', ([], {'input_shape': '(seq_len,)', 'mode': 'TrigPosEmbedding.MODE_EXPAND', 'output_dim': 'embd_dim', 'name': '"""Pos-Embd"""'}), "(input_shape=(seq_len,), mode=TrigPosEmbedding.MODE_EXPAND,\n output_dim=embd_dim, name='Pos-Embd')\n", (850, 951), False, 'from keras_pos_embd import TrigPosEmbedding\n'), ((1079, 1100), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1098, 1100), False, 'import tempfile\n'), ((1871, 1895), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (1888, 1895), True, 'import numpy as np\n'), ((2010, 2114), 'keras_pos_embd.TrigPosEmbedding', 'TrigPosEmbedding', ([], {'input_shape': '(seq_len, embed_dim)', 'mode': 'TrigPosEmbedding.MODE_ADD', 'name': '"""Pos-Embd"""'}), "(input_shape=(seq_len, embed_dim), mode=TrigPosEmbedding.\n MODE_ADD, name='Pos-Embd')\n", (2026, 2114), False, 'from keras_pos_embd import TrigPosEmbedding\n'), ((2229, 2250), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2248, 2250), False, 'import tempfile\n'), ((3086, 3110), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (3103, 3110), True, 'import numpy as np\n'), ((3227, 3357), 'keras_pos_embd.TrigPosEmbedding', 'TrigPosEmbedding', ([], {'input_shape': '(seq_len, feature_dim)', 'output_dim': 'embed_dim', 'mode': 'TrigPosEmbedding.MODE_CONCAT', 'name': '"""Pos-Embd"""'}), "(input_shape=(seq_len, feature_dim), output_dim=embed_dim,\n mode=TrigPosEmbedding.MODE_CONCAT, name='Pos-Embd')\n", (3243, 3357), False, 'from keras_pos_embd import TrigPosEmbedding\n'), ((3485, 3506), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (3504, 3506), False, 'import tempfile\n'), ((1131, 1149), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1147, 1149), True, 'import numpy as np\n'), ((2281, 2299), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2297, 2299), True, 'import numpy as np\n'), ((3537, 3555), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3553, 3555), True, 'import numpy as np\n'), ((1634, 1679), 'numpy.cos', 'np.cos', (['(i / 10000.0 ** ((j - 1.0) / embd_dim))'], {}), '(i / 10000.0 ** ((j - 1.0) / embd_dim))\n', (1640, 1679), True, 'import numpy as np\n'), ((4055, 4101), 'numpy.cos', 'np.cos', (['(i / 10000.0 ** ((j - 1.0) / embed_dim))'], {}), '(i / 10000.0 ** ((j - 1.0) / embed_dim))\n', (4061, 4101), True, 'import numpy as np\n'), ((2797, 2843), 'numpy.cos', 'np.cos', (['(i / 10000.0 ** ((j - 1.0) / embed_dim))'], {}), '(i / 10000.0 ** ((j - 1.0) / embed_dim))\n', (2803, 2843), True, 'import numpy as np\n')] |
"""
MatPlotLib viewer for NumPy 1D NumPy data.
"""
import numpy as np
import matplotlib.pyplot as plt
class _MplDataViewer1D(object):
"""This class allows to display 1D NumPy array in a :class:`matplotlib.figure.Figure`
This will be a simple matplotlib plot.
:param dataset: the NumPy array to be displayed
The dataset will be squeezed from any dimensions equal to 1
:type dataset: :class:`numpy.ndarray`
:param `kwargs`: the keyword arguments
:type `kwargs`: dict
"""
def __init__(self,dataset,**kwargs):
self._figure = plt.figure()
self._initLayout()
self.dataset = dataset
plt.show(self._figure)
@property
def figure(self):
"""Getter for the figure to be displayed.
:return: returns the figure to be displayed in the jupyter output widget
:rtype: :class:`matplotlib.figure.Figure`
"""
return self._figure
@property
def dataset(self):
"""Getter/setter for the dataset to be displayed.
:getter: returns the dataset to be displayed
:setter: sets the dataset to be displayed
:type: :class:`numpy.ndarray`
"""
return self._dataset
@dataset.setter
def dataset(self,dataset):
self._dataset = dataset
self.update()
def _initLayout(self):
"""Initializes the figure layout.
"""
self._mainAxes = plt.subplot(111)
def update(self):
"""Update the figure.
"""
self._mainAxes.plot(self._dataset[:])
plt.draw()
if __name__ == "__main__":
data = np.random.uniform(0,1,(1000,))
d = DataViewer1D(data)
plt.show()
| [
"matplotlib.pyplot.figure",
"numpy.random.uniform",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1771, 1803), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1000,)'], {}), '(0, 1, (1000,))\n', (1788, 1803), True, 'import numpy as np\n'), ((1835, 1845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1843, 1845), True, 'import matplotlib.pyplot as plt\n'), ((618, 630), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (628, 630), True, 'import matplotlib.pyplot as plt\n'), ((700, 722), 'matplotlib.pyplot.show', 'plt.show', (['self._figure'], {}), '(self._figure)\n', (708, 722), True, 'import matplotlib.pyplot as plt\n'), ((1554, 1570), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1565, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1730), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1728, 1730), True, 'import matplotlib.pyplot as plt\n')] |
import os
import random
import numpy as np
import argparse
import itertools
from models.resnet18_classifier import Resnet_classifier
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
from utils.dataLoader import dataloader_train, dataloader_val
parser = argparse.ArgumentParser(description='Trains ResNet on tiny-imagenet-200', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train_dir', type = str, default = '/nfs/pyrex/raid6/svenkata/Datasets/tiny-imagenet-200/train',
help='path to train folder')
parser.add_argument('--val_dir', type = str, default = '/nfs/pyrex/raid6/svenkata/Datasets/tiny-imagenet-200/val',
help='path to val folder')
parser.add_argument('--save_dir', type = str, default = '/nfs/pyrex/raid6/svenkata/weights/AlignMixup_CVPR22/tiny_imagenet/',
help='folder where results are to be stored')
# Optimization options
parser.add_argument('--epochs', type=int, default=1200, help='Number of epochs to train.')
parser.add_argument('--alpha', type=float, default=2.0, help='alpha parameter for mixup')
parser.add_argument('--num_classes', type=int, default=200, help='number of classes, set 100 for CIFAR-100')
parser.add_argument('--batch_size', type=int, default=256, help='Batch size.')
parser.add_argument('--lr_', type=float, default=0.1, help='The Learning Rate.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', type=float, default=1e-4, help='Weight decay (L2 penalty).')
parser.add_argument('--schedule', type=int, nargs='+', default=[600, 900], help='Decrease learning rate at these epochs.')
parser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1], help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')
# Checkpoints
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--workers', type=int, default=8, help='number of data loading workers (default: 8)')
# random seed
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
out_str = str(args)
print(out_str)
device = torch.device("cuda" if args.ngpu>0 and torch.cuda.is_available() else "cpu")
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
torch.cuda.manual_seed_all(args.manualSeed)
cudnn.benchmark = True
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
transform_train = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
train_data = dataloader_train(args.train_dir, transform_train)
test_data = dataloader_val(os.path.join(args.val_dir, 'images'), os.path.join(args.val_dir, 'val_annotations.txt'), transform_test)
trainloader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
testloader = DataLoader(test_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
model = Resnet_classifier(num_classes=args.num_classes)
model = torch.nn.DataParallel(model)
model.to(device)
print(model)
optimizer = optim.SGD(model.parameters(), lr=args.lr_, momentum=args.momentum, weight_decay=args.decay)
criterion = nn.CrossEntropyLoss()
best_acc = 0
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
best_acc = checkpoint['acc']
print("=> loaded checkpoint '{}' accuracy={} (epoch {})" .format(args.resume, best_acc, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
def train(epoch):
model.train()
total_loss = 0
correct = 0
for i, (images, targets) in enumerate(trainloader):
images = images.to(device)
targets = targets.to(device)
lam = np.random.beta(args.alpha, args.alpha)
outputs,targets_a,targets_b = model(images, targets, lam, mode='train')
loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
_,pred = torch.max(outputs, dim=1)
correct += (pred == targets).sum().item()
print('epoch: {} --> Train loss = {:.4f} Train Accuracy = {:.4f} '.format(epoch, total_loss / len(trainloader.dataset), 100.*correct / len(trainloader.dataset)))
def test(epoch):
#best_acc = 0
global best_acc
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs, None, None, mode='test')
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('------> epoch: {} --> Test loss = {:.4f} Test Accuracy = {:.4f} '.format(epoch,test_loss / len(testloader.dataset), 100.*correct / len(testloader.dataset)))
acc = 100.*correct/total
if acc > best_acc:
checkpoint(acc, epoch)
best_acc = acc
return best_acc
def checkpoint(acc, epoch):
# Save checkpoint.
print('Saving..')
state = {
'model': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'acc': acc,
'epoch': epoch,
'seed' : args.manualSeed
}
torch.save(state, args.save_dir + 'checkpoint.t7')
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def adjust_learning_rate(optimizer, epoch, gammas, schedule):
"""Sets the learning rate to the initial LR decayed by 10 at 600 and 900 epochs"""
lr = args.lr_
assert len(gammas) == len(schedule), "length of gammas and schedule should be equal"
for (gamma, step) in zip(gammas, schedule):
if (epoch >= step):
lr = lr * gamma
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
if __name__ == '__main__':
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args.gammas, args.schedule)
train(epoch)
best_accuracy = test(epoch)
print('Best Accuracy = ', best_accuracy)
| [
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.cuda.is_available",
"utils.dataLoader.dataloader_train",
"os.path.exists",
"argparse.ArgumentParser",
"models.resnet18_classifier.Resnet_classifier",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"random.randint",
"numpy.random.beta",
... | [((370, 503), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Trains ResNet on tiny-imagenet-200"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Trains ResNet on tiny-imagenet-200',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (393, 503), False, 'import argparse\n'), ((2726, 2754), 'random.seed', 'random.seed', (['args.manualSeed'], {}), '(args.manualSeed)\n', (2737, 2754), False, 'import random\n'), ((2755, 2786), 'numpy.random.seed', 'np.random.seed', (['args.manualSeed'], {}), '(args.manualSeed)\n', (2769, 2786), True, 'import numpy as np\n'), ((2787, 2821), 'torch.manual_seed', 'torch.manual_seed', (['args.manualSeed'], {}), '(args.manualSeed)\n', (2804, 2821), False, 'import torch\n'), ((2822, 2865), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.manualSeed'], {}), '(args.manualSeed)\n', (2848, 2865), False, 'import torch\n'), ((3378, 3427), 'utils.dataLoader.dataloader_train', 'dataloader_train', (['args.train_dir', 'transform_train'], {}), '(args.train_dir, transform_train)\n', (3394, 3427), False, 'from utils.dataLoader import dataloader_train, dataloader_val\n'), ((3575, 3669), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers'}), '(train_data, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers)\n', (3585, 3669), False, 'from torch.utils.data import DataLoader\n'), ((3679, 3773), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers'}), '(test_data, batch_size=args.batch_size, shuffle=True, num_workers\n =args.workers)\n', (3689, 3773), False, 'from torch.utils.data import DataLoader\n'), ((3779, 3826), 'models.resnet18_classifier.Resnet_classifier', 'Resnet_classifier', ([], {'num_classes': 'args.num_classes'}), '(num_classes=args.num_classes)\n', (3796, 3826), False, 'from models.resnet18_classifier import Resnet_classifier\n'), ((3835, 3863), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3856, 3863), False, 'import torch\n'), ((4011, 4032), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4030, 4032), False, 'from torch import nn, optim\n'), ((2700, 2724), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2714, 2724), False, 'import random\n'), ((2898, 2927), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (2912, 2927), False, 'import os\n'), ((2930, 2956), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (2941, 2956), False, 'import os\n'), ((3455, 3491), 'os.path.join', 'os.path.join', (['args.val_dir', '"""images"""'], {}), "(args.val_dir, 'images')\n", (3467, 3491), False, 'import os\n'), ((3493, 3542), 'os.path.join', 'os.path.join', (['args.val_dir', '"""val_annotations.txt"""'], {}), "(args.val_dir, 'val_annotations.txt')\n", (3505, 3542), False, 'import os\n'), ((4070, 4097), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (4084, 4097), False, 'import os\n'), ((6321, 6371), 'torch.save', 'torch.save', (['state', "(args.save_dir + 'checkpoint.t7')"], {}), "(state, args.save_dir + 'checkpoint.t7')\n", (6331, 6371), False, 'import torch\n'), ((3000, 3036), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(64)'], {'padding': '(4)'}), '(64, padding=4)\n', (3021, 3036), False, 'from torchvision import datasets, transforms\n'), ((3039, 3072), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3070, 3072), False, 'from torchvision import datasets, transforms\n'), ((3075, 3096), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3094, 3096), False, 'from torchvision import datasets, transforms\n'), ((3099, 3170), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (3119, 3170), False, 'from torchvision import datasets, transforms\n'), ((3239, 3260), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3258, 3260), False, 'from torchvision import datasets, transforms\n'), ((3263, 3334), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (3283, 3334), False, 'from torchvision import datasets, transforms\n'), ((4184, 4207), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (4194, 4207), False, 'import torch\n'), ((4791, 4829), 'numpy.random.beta', 'np.random.beta', (['args.alpha', 'args.alpha'], {}), '(args.alpha, args.alpha)\n', (4805, 4829), True, 'import numpy as np\n'), ((5087, 5112), 'torch.max', 'torch.max', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (5096, 5112), False, 'import torch\n'), ((5435, 5450), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5448, 5450), False, 'import torch\n'), ((2611, 2636), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2634, 2636), False, 'import torch\n'), ((5721, 5747), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (5730, 5747), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2021 <NAME>
# License: MIT (see LICENSE)
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import copy
import mesh_illustris as mi
my_magma = copy.copy(mpl.cm.get_cmap('magma'))
my_magma.set_bad(my_magma(-1))
basePath = "TNG50-4-Subbox0/output" # may alter
d = mi.load(basePath, 2332, ["gas"])
# The center of TNG50-4-Subbox0 is (26, 10, 26.5) Mpc/h
# The side length of TNG50-4-Subbox0 is 4 Mpc/h
boundary = np.array([[24.0, 8.0, 24.5], [28.0, 12.0, 28.5]]) # in Mpc/h
# Note, the internal length unit of TNG is kpc/h
box = d.box(boundary*1000, ["gas"], ["Coordinates", "Masses"])
fig, [ax0, ax1] = plt.subplots(1, 2, figsize=(9,4))
fig.subplots_adjust(wspace=0.02)
x = box["gas"]["Coordinates"][:,0] / 1000 # in Mpc/h
y = box["gas"]["Coordinates"][:,1] / 1000 # in Mpc/h
weights = box["gas"]["Masses"] / (4/64)**2 / 1e2 # in h Msun/pc^2
ax0.hist2d(x, y, norm=mpl.colors.LogNorm(), weights=weights,
range=[[24.0, 28.0], [8.0, 12.0]], bins=64, cmap=my_magma,
vmax=1e2, vmin=1e-1)
ax0.plot([25, 25.5, 25.5, 25,25], [9.3, 9.3, 9.8, 9.8, 9.3], c="w", lw=1)
ax0.plot([25.5, 28.0], [9.8, 12.0], c="w", lw=1)
ax0.plot([25.5, 28.0], [9.3, 8.0], c="w", lw=1)
ax0.set_xticks([])
ax0.set_yticks([])
boundary = np.array([[25.0, 9.3, 26.25], [25.5, 9.8, 26.75]]) # in Mpc/h
box = d.box(boundary*1000, ["gas"], ["Coordinates", "Masses"])
x = box["gas"]["Coordinates"][:,0] / 1000 # in Mpc/h
y = box["gas"]["Coordinates"][:,1] / 1000 # in Mpc/h
weights = box["gas"]["Masses"] / (0.5/32)**2 / 1e2 # in h Msun/pc^2
h = ax1.hist2d(x, y, norm=mpl.colors.LogNorm(), weights=weights,
range=[[25.0, 25.5], [9.3, 9.8]], bins=32, cmap=my_magma,
vmax=1e2, vmin=1e-1)
ax1.set_xticks([])
ax1.set_yticks([])
plt.subplots_adjust(bottom=0, right=1, top=1)
cax = plt.axes([1.01, 0, 0.02, 1])
cbar = plt.colorbar(h[3], cax=cax, extend="both")
cbar.set_label(r"Column density $({\rm h\,M_\odot/pc^2})$")
plt.savefig('figures/load_box.png', bbox_inches ='tight',
pad_inches=0.05, dpi=200)
| [
"mesh_illustris.load",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"numpy.array",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.colors.LogNorm"
] | [((354, 386), 'mesh_illustris.load', 'mi.load', (['basePath', '(2332)', "['gas']"], {}), "(basePath, 2332, ['gas'])\n", (361, 386), True, 'import mesh_illustris as mi\n'), ((503, 552), 'numpy.array', 'np.array', (['[[24.0, 8.0, 24.5], [28.0, 12.0, 28.5]]'], {}), '([[24.0, 8.0, 24.5], [28.0, 12.0, 28.5]])\n', (511, 552), True, 'import numpy as np\n'), ((695, 729), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 4)'}), '(1, 2, figsize=(9, 4))\n', (707, 729), True, 'import matplotlib.pyplot as plt\n'), ((1301, 1351), 'numpy.array', 'np.array', (['[[25.0, 9.3, 26.25], [25.5, 9.8, 26.75]]'], {}), '([[25.0, 9.3, 26.25], [25.5, 9.8, 26.75]])\n', (1309, 1351), True, 'import numpy as np\n'), ((1787, 1832), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0)', 'right': '(1)', 'top': '(1)'}), '(bottom=0, right=1, top=1)\n', (1806, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1867), 'matplotlib.pyplot.axes', 'plt.axes', (['[1.01, 0, 0.02, 1]'], {}), '([1.01, 0, 0.02, 1])\n', (1847, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1917), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['h[3]'], {'cax': 'cax', 'extend': '"""both"""'}), "(h[3], cax=cax, extend='both')\n", (1887, 1917), True, 'import matplotlib.pyplot as plt\n'), ((1979, 2065), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/load_box.png"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.05)', 'dpi': '(200)'}), "('figures/load_box.png', bbox_inches='tight', pad_inches=0.05,\n dpi=200)\n", (1990, 2065), True, 'import matplotlib.pyplot as plt\n'), ((244, 268), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['"""magma"""'], {}), "('magma')\n", (259, 268), True, 'import matplotlib as mpl\n'), ((957, 977), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {}), '()\n', (975, 977), True, 'import matplotlib as mpl\n'), ((1627, 1647), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {}), '()\n', (1645, 1647), True, 'import matplotlib as mpl\n')] |
import os
import numpy as np
from addict import Dict
from PIL import Image
from .reader import Reader
from .utils import is_image_file
from .builder import READER
__all__ = ['ImageFolderReader']
@READER.register_module()
class ImageFolderReader(Reader):
def __init__(self, root, **kwargs):
super(ImageFolderReader, self).__init__(**kwargs)
self.root = root
self.classes = [d.name for d in os.scandir(self.root) if d.is_dir()]
self.classes.sort()
self.class_to_idx = {self.classes[i]: i for i in range(len(self.classes))}
self.samples = []
for target in sorted(self.class_to_idx.keys()):
d = os.path.join(self.root, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_image_file(path):
item = (path, self.class_to_idx[target])
self.samples.append(item)
assert len(self.samples) > 0
def get_dataset_info(self):
return range(len(self.samples)), Dict({'classes': self.classes})
def get_data_info(self, index):
img = Image.open(self.samples[index][0])
w, h = img.size
return dict(h=h, w=w)
def __call__(self, index):
# index = data_dict
# img = Image.open(self.samples[index][0]).convert('RGB')
img = self.read_image(self.samples[index][0])
label = self.samples[index][1]
w, h = img.size
path = self.samples[index][0]
# return {'image': img, 'ori_size': np.array([h, w]).astype(np.float32), 'path': path, 'label': label}
return dict(
image=img,
ori_size=np.array([h, w]).astype(np.float32),
path=path,
label=label
)
def __repr__(self):
return 'ImageFolderReader(root={}, classes={}, {})'.format(self.root, tuple(self.classes), super(ImageFolderReader, self).__repr__())
| [
"addict.Dict",
"PIL.Image.open",
"os.scandir",
"os.path.join",
"numpy.array",
"os.path.isdir",
"os.walk"
] | [((1287, 1321), 'PIL.Image.open', 'Image.open', (['self.samples[index][0]'], {}), '(self.samples[index][0])\n', (1297, 1321), False, 'from PIL import Image\n'), ((668, 699), 'os.path.join', 'os.path.join', (['self.root', 'target'], {}), '(self.root, target)\n', (680, 699), False, 'import os\n'), ((1204, 1235), 'addict.Dict', 'Dict', (["{'classes': self.classes}"], {}), "({'classes': self.classes})\n", (1208, 1235), False, 'from addict import Dict\n'), ((421, 442), 'os.scandir', 'os.scandir', (['self.root'], {}), '(self.root)\n', (431, 442), False, 'import os\n'), ((719, 735), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (732, 735), False, 'import os\n'), ((804, 832), 'os.walk', 'os.walk', (['d'], {'followlinks': '(True)'}), '(d, followlinks=True)\n', (811, 832), False, 'import os\n'), ((907, 932), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (919, 932), False, 'import os\n'), ((1833, 1849), 'numpy.array', 'np.array', (['[h, w]'], {}), '([h, w])\n', (1841, 1849), True, 'import numpy as np\n')] |
from __future__ import division
from scipy.integrate import simps
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from scipy.stats import kstest
from scipy.stats import norm
import numpy as np
import scipy.fftpack
import scipy.signal
from src.BHKickUtils import tools as gwt
import cmath
import scipy.interpolate
from matplotlib.mlab import griddata
def plot(time,ht1,title='Time domain waveform'):
fig = plt.figure()
plt1 = fig.add_subplot(111)
plt1.plot(list(time), list(ht1))
axes = plt.gca()
axes.set_xlim([-0.02,0.02])
plt.show()
class noise():
def __init__(self):
self.aligochar = self.LIGOnoisedata()
def LIGOnoisedata(self):
with open("ZERO_DET_high_P.txt") as file:
filearr = file.readlines()
dataarr = []
for line in filearr:
strpoint = line.strip('\n').lstrip(' ').split(' ')
valpoint = []
for i in strpoint:
valpoint.append(np.float(i))
dataarr.append(valpoint)
data = np.array(dataarr)
return data
class waveform(object):
def __init__(self, infile):
self.infile = infile
self.data = self.waveformdata(self.infile)
self.fftdata = self.data[0]
self.f = self.fftdata[0]
self.hf = self.fftdata[1]
self.t = self.data[1]
self.ht = self.data[2]
self.fft_func = self.data[3]
self.dT = self.data[4]
def waveformdata(self, filename):
indir = '../surrogatemodel/'
waveformarray = np.loadtxt(indir + filename, dtype=float, usecols=(0, 1, 2))
N = len(waveformarray[:, 1])
dT = (waveformarray[1, 0] - waveformarray[0, 0])
window = scipy.signal.tukey(N, alpha=0.1)
amp = (waveformarray[:, 1]) # *window
fftlist = gwt.fft(amp, 1 / dT)
fft_func = interp1d(fftlist[0], fftlist[1])
return fftlist, waveformarray[:, 0], amp, fft_func, dT
class velowaveform(object):
def __init__(self, timedata, ampdata, velshift, sigma, tshift, phase):
self.velshift = velshift
self.data = self.veloshiftdata(timedata, ampdata, sigma, tshift, phase)
self.fftdata = self.data[0]
self.fft_func = self.data[1]
self.t = self.data[3]
self.ht = self.data[2]
self.f = self.fftdata[0]
self.hf = self.fftdata[1]
self.timeshift = tshift
self.dT = self.data[4]
self.stime = self.data[5]
def veloshiftdata(self, timedata, ampdata, sigma, tshift, phase):
times = timedata
amp = ampdata
N = len(times)
dT = times[1] - times[0]
shifttime = [times[0]]
mini = 1
ind = 0
for n in range(0, len(times) - 1):
if abs(times[n]) < abs(mini):
mini = times[n]
ind = n
shifttime.append(shifttime[-1] + (times[n + 1] - times[n]) * (self.velocityshift(times[n], sigma) + 1))
stimes = []
for time in shifttime:
stimes.append(time + tshift - shifttime[ind]) # TODO REMOVE the ind additions
shiftedwave = interp1d(stimes, amp, bounds_error=False, fill_value=0)
window = scipy.signal.tukey(N, alpha=0.1)
amp = shiftedwave(times) # *window
fftlist = gwt.fft(amp, 1 / dT)
phaseamp = fftlist[1] * np.exp(1j * phase)
fftlist = [fftlist[0], phaseamp]
fft_func = interp1d(fftlist[0], fftlist[1])
return fftlist, fft_func, amp, times, dT, shifttime
def velocityshift(self, time, sigma):
mrel = mass * 4.93 * 10 ** -6
velo = self.velshift * norm.cdf(time, 0, (sigma * mrel))
return velo
mass = 60
sigma = 100
velocity = 0.00
if __name__ == '__main__':
noi = noise()
wave = waveform("cbc_q1.00_M60_d410_t140.00_p0.00.dat")
gwt.fft_plot(wave.f, wave.hf, title='Original Waveform')
olist = []
tlist = []
plist = []
time = 0
phase = 0
velowave = velowaveform(wave.t, wave.ht, velocity, sigma, time, phase)
veloht = gwt.infft(velowave.hf, 1 / velowave.dT)
print([np.round(time, 6), np.round(phase, 6)])
gwt.wave_plot(wave.t, wave.ht, velowave.t, veloht)
times = np.linspace(wave.t[0], wave.t[-1])
velofunc = interp1d(velowave.t, veloht)
fig = plt.figure()
plt1 = fig.add_subplot(111)
plt1.plot(velowave.t, veloht - wave.ht)
axes = plt.gca()
axes.set_xlim([-0.02, 0.02])
plt.show() | [
"src.BHKickUtils.tools.wave_plot",
"numpy.float",
"src.BHKickUtils.tools.fft_plot",
"matplotlib.pyplot.gca",
"scipy.stats.norm.cdf",
"scipy.interpolate.interp1d",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"src.BHKickUtils.tools.fft",
"src.BHKickUtils.tools.infft... | [((433, 445), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (443, 445), True, 'import matplotlib.pyplot as plt\n'), ((526, 535), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (533, 535), True, 'import matplotlib.pyplot as plt\n'), ((572, 582), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (580, 582), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3962), 'src.BHKickUtils.tools.fft_plot', 'gwt.fft_plot', (['wave.f', 'wave.hf'], {'title': '"""Original Waveform"""'}), "(wave.f, wave.hf, title='Original Waveform')\n", (3918, 3962), True, 'from src.BHKickUtils import tools as gwt\n'), ((4123, 4162), 'src.BHKickUtils.tools.infft', 'gwt.infft', (['velowave.hf', '(1 / velowave.dT)'], {}), '(velowave.hf, 1 / velowave.dT)\n', (4132, 4162), True, 'from src.BHKickUtils import tools as gwt\n'), ((4218, 4268), 'src.BHKickUtils.tools.wave_plot', 'gwt.wave_plot', (['wave.t', 'wave.ht', 'velowave.t', 'veloht'], {}), '(wave.t, wave.ht, velowave.t, veloht)\n', (4231, 4268), True, 'from src.BHKickUtils import tools as gwt\n'), ((4281, 4315), 'numpy.linspace', 'np.linspace', (['wave.t[0]', 'wave.t[-1]'], {}), '(wave.t[0], wave.t[-1])\n', (4292, 4315), True, 'import numpy as np\n'), ((4331, 4359), 'scipy.interpolate.interp1d', 'interp1d', (['velowave.t', 'veloht'], {}), '(velowave.t, veloht)\n', (4339, 4359), False, 'from scipy.interpolate import interp1d\n'), ((4370, 4382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4380, 4382), True, 'import matplotlib.pyplot as plt\n'), ((4470, 4479), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4477, 4479), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4525, 4527), True, 'import matplotlib.pyplot as plt\n'), ((1604, 1664), 'numpy.loadtxt', 'np.loadtxt', (['(indir + filename)'], {'dtype': 'float', 'usecols': '(0, 1, 2)'}), '(indir + filename, dtype=float, usecols=(0, 1, 2))\n', (1614, 1664), True, 'import numpy as np\n'), ((1876, 1896), 'src.BHKickUtils.tools.fft', 'gwt.fft', (['amp', '(1 / dT)'], {}), '(amp, 1 / dT)\n', (1883, 1896), True, 'from src.BHKickUtils import tools as gwt\n'), ((1916, 1948), 'scipy.interpolate.interp1d', 'interp1d', (['fftlist[0]', 'fftlist[1]'], {}), '(fftlist[0], fftlist[1])\n', (1924, 1948), False, 'from scipy.interpolate import interp1d\n'), ((3193, 3248), 'scipy.interpolate.interp1d', 'interp1d', (['stimes', 'amp'], {'bounds_error': '(False)', 'fill_value': '(0)'}), '(stimes, amp, bounds_error=False, fill_value=0)\n', (3201, 3248), False, 'from scipy.interpolate import interp1d\n'), ((3363, 3383), 'src.BHKickUtils.tools.fft', 'gwt.fft', (['amp', '(1 / dT)'], {}), '(amp, 1 / dT)\n', (3370, 3383), True, 'from src.BHKickUtils import tools as gwt\n'), ((3497, 3529), 'scipy.interpolate.interp1d', 'interp1d', (['fftlist[0]', 'fftlist[1]'], {}), '(fftlist[0], fftlist[1])\n', (3505, 3529), False, 'from scipy.interpolate import interp1d\n'), ((1092, 1109), 'numpy.array', 'np.array', (['dataarr'], {}), '(dataarr)\n', (1100, 1109), True, 'import numpy as np\n'), ((3417, 3437), 'numpy.exp', 'np.exp', (['(1.0j * phase)'], {}), '(1.0j * phase)\n', (3423, 3437), True, 'import numpy as np\n'), ((3703, 3734), 'scipy.stats.norm.cdf', 'norm.cdf', (['time', '(0)', '(sigma * mrel)'], {}), '(time, 0, sigma * mrel)\n', (3711, 3734), False, 'from scipy.stats import norm\n'), ((4174, 4191), 'numpy.round', 'np.round', (['time', '(6)'], {}), '(time, 6)\n', (4182, 4191), True, 'import numpy as np\n'), ((4193, 4211), 'numpy.round', 'np.round', (['phase', '(6)'], {}), '(phase, 6)\n', (4201, 4211), True, 'import numpy as np\n'), ((1018, 1029), 'numpy.float', 'np.float', (['i'], {}), '(i)\n', (1026, 1029), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import shapely.wkt
from sklearn.metrics import pairwise_distances
from sklearn.metrics import mean_squared_error
import init
import constants as cn
from coordinate import Coordinate
def proximity_ratio(df_destinations):
"""
Calculate proximity ratio and summarize by blockgroups
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
lower_bound = cn.BASKET_EVAL_PROX_MIN # 2 miles
upper_bound = cn.BASKET_EVAL_PROX_MAX # 10 miles
# Ratio of trips under 2 miles to trips between 2 and 10 miles
df_destinations['dist_under_2'] = np.where(df_destinations[cn.DISTANCE] < lower_bound, 1, 0)
df_destinations['dist_2_to_10'] = np.where((df_destinations[cn.DISTANCE] >= lower_bound) & (df_destinations[cn.DISTANCE] < upper_bound), 1, 0)
df_blockgroup = df_destinations.groupby([cn.ORIGIN], as_index=False).agg({'dist_under_2':sum,'dist_2_to_10':sum})
# Remove rows with zero denominators
df_blockgroup = df_blockgroup[df_blockgroup['dist_2_to_10'] != 0]
# Make new column with the data
df_blockgroup[cn.PROX_RATIO] = df_blockgroup['dist_under_2'] / df_blockgroup['dist_2_to_10']
return df_blockgroup[[cn.ORIGIN, cn.PROX_RATIO]]
def vert_hori_ratio(df_destinations, df_blockgroup):
"""
Calculate the ratio between vertical distance and horizontal distance, for each blockgroup
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
df_destinations[cn.VERT_HORI_RATIO] = pd.DataFrame(np.abs( (df_destinations['dest_lat'] - df_destinations['orig_lat']) /
(df_destinations['dest_lon'] - df_destinations['orig_lon']) ))
df_blockgroup2 = df_destinations.groupby([cn.ORIGIN], as_index=False)[cn.VERT_HORI_RATIO].mean()
result_merged = pd.merge(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.ORIGIN, right_on=cn.ORIGIN)
return result_merged
def average_distance(df_destinations, df_blockgroup):
"""
Calculate average travel distance of each blockgroup
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
df_blockgroup2 = df_destinations.groupby([cn.ORIGIN], as_index=False)[cn.DISTANCE].mean()
result_merged = pd.merge(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.ORIGIN, right_on=cn.ORIGIN)
result_merged.rename(columns = {cn.DISTANCE: cn.AVG_DIST}, inplace=True)
return result_merged
def dist_from_cc(df):
"""
Helper function to create a new column in a DataFrame with dist to city center
"""
coordinate = Coordinate(df['dest_lat'], df['dest_lon'])
city_center = Coordinate(cn.CITY_CENTER[0], cn.CITY_CENTER[1])
return city_center.haversine_distance(coordinate)
def distance_from_citycenter(df_destinations, df_blockgroup):
"""
Calculate Euclidean distance of destination from the city center
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
df_destinations['distance_from_citycenter_val'] = df_destinations.apply(dist_from_cc, axis=1)
df_blockgroup2 = df_destinations.groupby([cn.ORIGIN], as_index=False)['distance_from_citycenter_val'].mean()
result_merged = pd.merge(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.ORIGIN, right_on=cn.ORIGIN)
result_merged.rename(columns = {'distance_from_citycenter_val': 'distance_from_citycenter_test'}, inplace=True)
return result_merged
def prepare_psrc(psrc_raw):
"""
This code calculates four features and adds them to the original PSRC data.
It just needs to be run once, as we are using it without filtering.
input:
PSRC data (even though we call it raw, its column names are changed and latitude and longitudes are added)
output:
PSRC data with proximity ratio, vert_hori_ratio, average distance, and distance from city center
"""
psrc_blockgroup = proximity_ratio(psrc_raw)
with_vert_hori_ratio = vert_hori_ratio(psrc_raw, psrc_blockgroup)
with_average_distance = average_distance(psrc_raw, with_vert_hori_ratio)
with_distance_from_citycenter = distance_from_citycenter(psrc_raw, with_average_distance)
result_merged = with_distance_from_citycenter
result_merged.sort_values(by=[cn.ORIGIN])
return result_merged
def calculate_features(google_input, basket_combination):
"""
This calculates three features using google API data; need to run separately for each basket combination
It just needs to be run for every basket combination, as we are filtering it every time.
input:
Google API data, backet combination
output:
Google API data with proximity ratio, vert_hori_ratio, average distance, and distance from city center
"""
# Filter to match basket parameters based on rank (distance from destination)
filtered_data = google_input
for i in range(len(cn.BASKET_CATEGORIES)):
filtered_data = filtered_data[(filtered_data['class'] != cn.BASKET_CATEGORIES[i]) | (filtered_data['rank'] <= basket_combination[i])]
# FEATURES: PROXIMITY RATIO, VERTICAL/HORIZONTAL TRAVEL DISTANCES, AVERAGE DISTANCE TO DESTINATION
# Creating google results
with_proximity_ratio = proximity_ratio(filtered_data.copy())
with_vert_hori_ratio = vert_hori_ratio(filtered_data.copy(), with_proximity_ratio)
with_average_distance = average_distance(filtered_data.copy(), with_vert_hori_ratio)
with_distance_from_citycenter = distance_from_citycenter(filtered_data.copy(), with_average_distance)
final_result = with_distance_from_citycenter.sort_values(by = [cn.ORIGIN])
return final_result
def calculate_mse(psrc_output, google_input):
"""
This calculates three features for each basket combination, saves MSE to compare Google API with PSRC
input:
PSRC wth features, Google API data without features
output:
Basket combinations, MSEs for each basket
"""
score = []
combinations = []
for x in cn.BASKET_COMBOS:
if sum(x) == cn.BASKET_SIZE:
# To do a faster test run, comment out the above and use the following:
# if sum(x) == 40:
combinations.append(x)
df_google = calculate_features(google_input, list(x))
googled_psrc = psrc_output.loc[psrc_output[cn.ORIGIN].isin(df_google[cn.ORIGIN])]
proximity_ratio_mse = mean_squared_error(df_google[cn.PROX_RATIO], googled_psrc[cn.PROX_RATIO])
vert_hori_ratio_mse = mean_squared_error(df_google[cn.VERT_HORI_RATIO], googled_psrc[cn.VERT_HORI_RATIO])
average_distance_mse = mean_squared_error(df_google[cn.AVG_DIST], googled_psrc[cn.AVG_DIST])
distance_from_citycenter_mse = mean_squared_error(df_google['distance_from_citycenter_test'], googled_psrc['distance_from_citycenter_test'])
mses = (proximity_ratio_mse, vert_hori_ratio_mse, average_distance_mse, distance_from_citycenter_mse)
score.append(mses)
if (len(combinations) % 5000 == 0):
print("Still Processing..")
print("Idx+1 of combination is: ", len(combinations))
print("Total number of combinations: " + str(len(combinations)))
print()
final_mses = pd.DataFrame(score, columns = ['from_proximity_ratio', 'from_vert_hori_ratio', 'from_average_distance', 'from_distance_citycenter'])
final_mses['rank_from_proximity_ratio'] = final_mses['from_proximity_ratio'].rank(ascending=1)
final_mses['rank_from_vert_hori_ratio'] = final_mses['from_vert_hori_ratio'].rank(ascending=1)
final_mses['rank_from_average_distance'] = final_mses['from_average_distance'].rank(ascending=1)
final_mses['rank_from_distance_citycenter'] = final_mses['from_distance_citycenter'].rank(ascending=1)
final_combinations = pd.DataFrame(combinations, columns = cn.BASKET_CATEGORIES)
best_loc = final_mses['rank_from_average_distance'].idxmin()
print("Choose the following combination: \n")
print("The index of the best basket is: ", best_loc)
print(final_combinations.loc[best_loc])
return final_combinations, final_mses
# Load PSRC data and pre-process
psrc_rawdat = pd.read_csv(cn.PSRC_FP, dtype={cn.ORIGIN: str, cn.DESTINATION: str})
psrc_rawdat[cn.DISTANCE] = pd.to_numeric(psrc_rawdat[cn.DISTANCE], errors='coerce')
# Load Google API data
input_destinations = pd.read_csv(cn.RAW_DIR + 'GoogleMatrix_Places_Dist.csv', dtype={cn.ORIGIN: str})
input_destinations.rename(columns = {'lat': 'dest_lat', 'lng': 'dest_lon', 'orig_lng': 'orig_lon'}, inplace=True)
# Load blockgroup data with latitude and longitudes; will be merged with Google API
blockgroup_mapping = pd.read_csv(cn.PROCESSED_DIR + 'SeattleCensusBlockGroups.csv', dtype={'tract_blkgrp': str})
print("blockgroup_mapping is loaded!")
blockgroup_mapping['tract_blkgrp'] = '530330' + blockgroup_mapping['tract_blkgrp']
orig_pts = blockgroup_mapping.centroid.apply(shapely.wkt.loads)
blockgroup_mapping['orig_lon'] = pd.DataFrame([kk.x for kk in orig_pts])
blockgroup_mapping['orig_lat'] = pd.DataFrame([kk.y for kk in orig_pts])
origin_blockgroups = blockgroup_mapping [['tract_blkgrp', 'orig_lat', 'orig_lon']]
# origin_merged will be an input data for 'evaluate_features' function
origin_merged = pd.merge(left=input_destinations, right=origin_blockgroups, how='left', left_on=cn.ORIGIN, right_on='tract_blkgrp')
origin_merged = origin_merged[[cn.ORIGIN, 'dest_lat', 'orig_lat','dest_lon', 'orig_lon', 'rank', cn.DISTANCE, 'class']]
print("Google data are ready!")
# One-time computation of psrc: generate three features
df_psrc = prepare_psrc(psrc_rawdat.copy())
print("PSRC data are ready!")
comb, res = calculate_mse(df_psrc, origin_merged.copy())
print("The following is the head of combinations")
print(comb.head())
print("\n\n")
print("The following is the head of mses")
print(res.head())
print("all done!")
comb.to_csv(cn.BASKET_COMBO_FP)
res.to_csv(cn.MSES_FP)
| [
"numpy.abs",
"pandas.read_csv",
"numpy.where",
"pandas.merge",
"sklearn.metrics.mean_squared_error",
"pandas.to_numeric",
"pandas.DataFrame",
"coordinate.Coordinate"
] | [((8771, 8839), 'pandas.read_csv', 'pd.read_csv', (['cn.PSRC_FP'], {'dtype': '{cn.ORIGIN: str, cn.DESTINATION: str}'}), '(cn.PSRC_FP, dtype={cn.ORIGIN: str, cn.DESTINATION: str})\n', (8782, 8839), True, 'import pandas as pd\n'), ((8868, 8924), 'pandas.to_numeric', 'pd.to_numeric', (['psrc_rawdat[cn.DISTANCE]'], {'errors': '"""coerce"""'}), "(psrc_rawdat[cn.DISTANCE], errors='coerce')\n", (8881, 8924), True, 'import pandas as pd\n'), ((8972, 9057), 'pandas.read_csv', 'pd.read_csv', (["(cn.RAW_DIR + 'GoogleMatrix_Places_Dist.csv')"], {'dtype': '{cn.ORIGIN: str}'}), "(cn.RAW_DIR + 'GoogleMatrix_Places_Dist.csv', dtype={cn.ORIGIN: str}\n )\n", (8983, 9057), True, 'import pandas as pd\n'), ((9273, 9369), 'pandas.read_csv', 'pd.read_csv', (["(cn.PROCESSED_DIR + 'SeattleCensusBlockGroups.csv')"], {'dtype': "{'tract_blkgrp': str}"}), "(cn.PROCESSED_DIR + 'SeattleCensusBlockGroups.csv', dtype={\n 'tract_blkgrp': str})\n", (9284, 9369), True, 'import pandas as pd\n'), ((9587, 9626), 'pandas.DataFrame', 'pd.DataFrame', (['[kk.x for kk in orig_pts]'], {}), '([kk.x for kk in orig_pts])\n', (9599, 9626), True, 'import pandas as pd\n'), ((9660, 9699), 'pandas.DataFrame', 'pd.DataFrame', (['[kk.y for kk in orig_pts]'], {}), '([kk.y for kk in orig_pts])\n', (9672, 9699), True, 'import pandas as pd\n'), ((9871, 9990), 'pandas.merge', 'pd.merge', ([], {'left': 'input_destinations', 'right': 'origin_blockgroups', 'how': '"""left"""', 'left_on': 'cn.ORIGIN', 'right_on': '"""tract_blkgrp"""'}), "(left=input_destinations, right=origin_blockgroups, how='left',\n left_on=cn.ORIGIN, right_on='tract_blkgrp')\n", (9879, 9990), True, 'import pandas as pd\n'), ((725, 783), 'numpy.where', 'np.where', (['(df_destinations[cn.DISTANCE] < lower_bound)', '(1)', '(0)'], {}), '(df_destinations[cn.DISTANCE] < lower_bound, 1, 0)\n', (733, 783), True, 'import numpy as np\n'), ((822, 935), 'numpy.where', 'np.where', (['((df_destinations[cn.DISTANCE] >= lower_bound) & (df_destinations[cn.\n DISTANCE] < upper_bound))', '(1)', '(0)'], {}), '((df_destinations[cn.DISTANCE] >= lower_bound) & (df_destinations[\n cn.DISTANCE] < upper_bound), 1, 0)\n', (830, 935), True, 'import numpy as np\n'), ((2086, 2193), 'pandas.merge', 'pd.merge', ([], {'left': 'df_blockgroup', 'right': 'df_blockgroup2', 'how': '"""inner"""', 'left_on': 'cn.ORIGIN', 'right_on': 'cn.ORIGIN'}), "(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.\n ORIGIN, right_on=cn.ORIGIN)\n", (2094, 2193), True, 'import pandas as pd\n'), ((2637, 2744), 'pandas.merge', 'pd.merge', ([], {'left': 'df_blockgroup', 'right': 'df_blockgroup2', 'how': '"""inner"""', 'left_on': 'cn.ORIGIN', 'right_on': 'cn.ORIGIN'}), "(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.\n ORIGIN, right_on=cn.ORIGIN)\n", (2645, 2744), True, 'import pandas as pd\n'), ((2986, 3028), 'coordinate.Coordinate', 'Coordinate', (["df['dest_lat']", "df['dest_lon']"], {}), "(df['dest_lat'], df['dest_lon'])\n", (2996, 3028), False, 'from coordinate import Coordinate\n'), ((3047, 3095), 'coordinate.Coordinate', 'Coordinate', (['cn.CITY_CENTER[0]', 'cn.CITY_CENTER[1]'], {}), '(cn.CITY_CENTER[0], cn.CITY_CENTER[1])\n', (3057, 3095), False, 'from coordinate import Coordinate\n'), ((3712, 3819), 'pandas.merge', 'pd.merge', ([], {'left': 'df_blockgroup', 'right': 'df_blockgroup2', 'how': '"""inner"""', 'left_on': 'cn.ORIGIN', 'right_on': 'cn.ORIGIN'}), "(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.\n ORIGIN, right_on=cn.ORIGIN)\n", (3720, 3819), True, 'import pandas as pd\n'), ((7808, 7942), 'pandas.DataFrame', 'pd.DataFrame', (['score'], {'columns': "['from_proximity_ratio', 'from_vert_hori_ratio', 'from_average_distance',\n 'from_distance_citycenter']"}), "(score, columns=['from_proximity_ratio', 'from_vert_hori_ratio',\n 'from_average_distance', 'from_distance_citycenter'])\n", (7820, 7942), True, 'import pandas as pd\n'), ((8390, 8446), 'pandas.DataFrame', 'pd.DataFrame', (['combinations'], {'columns': 'cn.BASKET_CATEGORIES'}), '(combinations, columns=cn.BASKET_CATEGORIES)\n', (8402, 8446), True, 'import pandas as pd\n'), ((1755, 1889), 'numpy.abs', 'np.abs', (["((df_destinations['dest_lat'] - df_destinations['orig_lat']) / (\n df_destinations['dest_lon'] - df_destinations['orig_lon']))"], {}), "((df_destinations['dest_lat'] - df_destinations['orig_lat']) / (\n df_destinations['dest_lon'] - df_destinations['orig_lon']))\n", (1761, 1889), True, 'import numpy as np\n'), ((6946, 7019), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['df_google[cn.PROX_RATIO]', 'googled_psrc[cn.PROX_RATIO]'], {}), '(df_google[cn.PROX_RATIO], googled_psrc[cn.PROX_RATIO])\n', (6964, 7019), False, 'from sklearn.metrics import mean_squared_error\n'), ((7054, 7142), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['df_google[cn.VERT_HORI_RATIO]', 'googled_psrc[cn.VERT_HORI_RATIO]'], {}), '(df_google[cn.VERT_HORI_RATIO], googled_psrc[cn.\n VERT_HORI_RATIO])\n', (7072, 7142), False, 'from sklearn.metrics import mean_squared_error\n'), ((7173, 7242), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['df_google[cn.AVG_DIST]', 'googled_psrc[cn.AVG_DIST]'], {}), '(df_google[cn.AVG_DIST], googled_psrc[cn.AVG_DIST])\n', (7191, 7242), False, 'from sklearn.metrics import mean_squared_error\n'), ((7286, 7400), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (["df_google['distance_from_citycenter_test']", "googled_psrc['distance_from_citycenter_test']"], {}), "(df_google['distance_from_citycenter_test'], googled_psrc\n ['distance_from_citycenter_test'])\n", (7304, 7400), False, 'from sklearn.metrics import mean_squared_error\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 14:25:42 2015
@author: Hanna
"""
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.io as io
def displayData(X):
pixels = 20 # images are 20x20 pixels
#100 examples shown on a 10 by 10 square
display_rows = 10
display_cols = 10
out = np.zeros((pixels*display_rows,pixels*display_cols))
rand_indices = np.random.permutation(5000)[0:display_rows*display_cols]
for j in range(0, display_rows):
for i in range(0, display_cols):
start_i = i*pixels
start_j = j*pixels
out[start_i:start_i+pixels, start_j:start_j+pixels] = X[rand_indices[display_rows*j+i]].reshape(pixels, pixels).T
fig = plt.figure()
ax = fig.gca()
ax.imshow(out,cmap="Greys_r")
ax.set_axis_off()
plt.savefig("100dataExamples.pdf")
plt.show()
def sigmoid(z):
#works elementwise for any array z
return sp.special.expit(z) # expit(x) = 1/(1+exp(-x)) elementwise for array x
def forward(Theta1,Theta2,X): # works for any # of examples
#forward propagation:
#input activation
m = np.shape(X)[0] # # of examples
a1 = X.T
a1 = np.vstack((np.ones((1,m)),a1))
#hidden layer activation
z2 = np.dot(Theta1,a1)
a2 = sigmoid(z2)
a2 = np.vstack((np.ones((1,m)),a2))
#output layer activation
z3 = np.dot(Theta2,a2)
a3 = sigmoid(z3)
return a3
def predictOneVsAllNN(h):
all_probability = h.T
prediction = np.argmax(all_probability,axis=1) + 1 # we do not have class 0 but have class 10
return prediction.reshape(np.shape(h)[1],1)
if __name__ == '__main__':
#load data
mat = io.loadmat("ex3data1.mat")
X, y = mat['X'], mat['y']
#display 100 random examples
displayData(X)
#load already trained weights
mat = io.loadmat("ex3weights.mat")
Theta1, Theta2 = mat['Theta1'], mat['Theta2']
#Theta1 and Theta2 correspond to a network with:
#400 (+1 bias) input units (= # of feature -- 20x20 image)
#one hidden layer with 25 (+1 bias) units
#10 output units corresponding to 10 classes
print(np.shape(Theta1)) # Theta1 shape is (25,401)
print(np.shape(Theta2)) # Theta2 shape is (10,26)
#NN prediction
h = forward(Theta1,Theta2,X)
prediction = predictOneVsAllNN(h)
training_accuracy = np.mean(prediction == y) * 100.0
print("NN training set prediction accuracy = ", training_accuracy,"%") # get 97.52 %
print("supposed to be 97.5")
#show images and print corresponding predictions one by one
m = np.shape(X)[0] # # of examples
sequence = np.random.permutation(m)
print("Note that 0 is labeled by 10")
plt.ion()
for i in sequence:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(X[i,:].reshape(20, 20).T, cmap="Greys_r")
ax.set_axis_off()
print(prediction[i,0])
input("Press Enter to continue...")
plt.close(fig)
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.random.permutation",
"scipy.io.loadmat",
"numpy.argmax",
"scipy.special.expit",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.dot",
"matplotlib.pyplot.ion",
"numpy.shape",
"matplotlib.pyplot.show... | [((341, 397), 'numpy.zeros', 'np.zeros', (['(pixels * display_rows, pixels * display_cols)'], {}), '((pixels * display_rows, pixels * display_cols))\n', (349, 397), True, 'import numpy as np\n'), ((749, 761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (759, 761), True, 'import matplotlib.pyplot as plt\n'), ((841, 875), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""100dataExamples.pdf"""'], {}), "('100dataExamples.pdf')\n", (852, 875), True, 'import matplotlib.pyplot as plt\n'), ((880, 890), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (888, 890), True, 'import matplotlib.pyplot as plt\n'), ((970, 989), 'scipy.special.expit', 'sp.special.expit', (['z'], {}), '(z)\n', (986, 989), True, 'import scipy as sp\n'), ((1302, 1320), 'numpy.dot', 'np.dot', (['Theta1', 'a1'], {}), '(Theta1, a1)\n', (1308, 1320), True, 'import numpy as np\n'), ((1424, 1442), 'numpy.dot', 'np.dot', (['Theta2', 'a2'], {}), '(Theta2, a2)\n', (1430, 1442), True, 'import numpy as np\n'), ((1769, 1795), 'scipy.io.loadmat', 'io.loadmat', (['"""ex3data1.mat"""'], {}), "('ex3data1.mat')\n", (1779, 1795), True, 'import scipy.io as io\n'), ((1932, 1960), 'scipy.io.loadmat', 'io.loadmat', (['"""ex3weights.mat"""'], {}), "('ex3weights.mat')\n", (1942, 1960), True, 'import scipy.io as io\n'), ((2732, 2756), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (2753, 2756), True, 'import numpy as np\n'), ((2803, 2812), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2810, 2812), True, 'import matplotlib.pyplot as plt\n'), ((412, 439), 'numpy.random.permutation', 'np.random.permutation', (['(5000)'], {}), '(5000)\n', (433, 439), True, 'import numpy as np\n'), ((1174, 1185), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1182, 1185), True, 'import numpy as np\n'), ((1557, 1591), 'numpy.argmax', 'np.argmax', (['all_probability'], {'axis': '(1)'}), '(all_probability, axis=1)\n', (1566, 1591), True, 'import numpy as np\n'), ((2232, 2248), 'numpy.shape', 'np.shape', (['Theta1'], {}), '(Theta1)\n', (2240, 2248), True, 'import numpy as np\n'), ((2288, 2304), 'numpy.shape', 'np.shape', (['Theta2'], {}), '(Theta2)\n', (2296, 2304), True, 'import numpy as np\n'), ((2452, 2476), 'numpy.mean', 'np.mean', (['(prediction == y)'], {}), '(prediction == y)\n', (2459, 2476), True, 'import numpy as np\n'), ((2685, 2696), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (2693, 2696), True, 'import numpy as np\n'), ((2850, 2862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2860, 2862), True, 'import matplotlib.pyplot as plt\n'), ((3066, 3080), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3075, 3080), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1254), 'numpy.ones', 'np.ones', (['(1, m)'], {}), '((1, m))\n', (1246, 1254), True, 'import numpy as np\n'), ((1361, 1376), 'numpy.ones', 'np.ones', (['(1, m)'], {}), '((1, m))\n', (1368, 1376), True, 'import numpy as np\n'), ((1678, 1689), 'numpy.shape', 'np.shape', (['h'], {}), '(h)\n', (1686, 1689), True, 'import numpy as np\n')] |
import crypten.mpc as mpc
from crypten.mpc import MPCTensor
import torch
import crypten
import numpy as np
UNCLASSIFIED = False
NOISE = -1
ws =2
def _dist(p, q):
#return (p - q).square().sum().sqrt()
return (p-q).norm(p=2, dim=None, keepdim=False)
def _eps_neighborhood(p,q,eps):
return _dist(p,q) < eps
def _region_query(m, point_id, eps, n_points, distance_enc):
seeds = [point_id]
for i in range(n_points):
if i == point_id:
continue
if (distance_enc[point_id, i] < eps).get_plain_text() == 1:
seeds.append(i)
return seeds
def _expand_cluster(m, classifications, point_id, cluster_id, eps, min_points, n_points, distance_enc):
seeds = _region_query(m, point_id, eps, n_points, distance_enc)
if len(seeds) < min_points:
classifications[point_id] = NOISE
return False
else:
classifications[point_id] = cluster_id
for seed_id in seeds:
classifications[seed_id] = cluster_id
while len(seeds) > 0:
current_point = seeds[0]
results = _region_query(m, current_point, eps, n_points, distance_enc)
if len(results) >= min_points:
for i in range(0, len(results)):
result_point = results[i]
if classifications[result_point] == UNCLASSIFIED or \
classifications[result_point] == NOISE:
if classifications[result_point] == UNCLASSIFIED:
seeds.append(result_point)
classifications[result_point] = cluster_id
seeds = seeds[1:]
return True
def dbscan(m, eps, min_points):
cluster_id = 1
n_points = m.shape[1]
classifications = [UNCLASSIFIED] * n_points
distance = torch.zeros((n_points,n_points))
distance_enc = MPCTensor(distance, ptype=crypten.mpc.arithmetic)
for i in range(n_points-1):
for j in range(i + 1, n_points):
distance_enc[i,j] = _dist(m[:, i], m[:, j])
distance_enc[j,i] = distance_enc[i,j]
for point_id in range(0, n_points):
#point = m[:, point_id]
if classifications[point_id] == UNCLASSIFIED:
if _expand_cluster(m, classifications, point_id, cluster_id, eps, min_points, n_points, distance_enc):
cluster_id = cluster_id + 1
return classifications
@mpc.run_multiprocess(world_size=ws)
def mpc_dbscan(data):
nc = data.shape[0]
data_enc = MPCTensor(data, ptype=crypten.mpc.arithmetic)
distance_matrix = torch.ones((nc, nc))
distance_enc = MPCTensor(distance_matrix, ptype=crypten.mpc.arithmetic)
cos = crypten.nn.CosineSimilarity(dim=0, eps=1e-6)
for i in range(nc - 1):
for j in range(i + 1, nc):
# numerator_enc = (data_enc[i]*data_enc[j]).sum()
# # numerator = numerator_enc.get_plain_text()
# # print(numerator)
# denominator_enc = data_enc[i].norm(p=2, dim=None, keepdim=False)*data_enc[j].norm(p=2, dim=None, keepdim=False)
# # denominator = denominator_enc.get_plain_text()
# # print(denominator)
# distance_enc[i,j] = numerator_enc/denominator_enc
distance_enc[i, j] = cos(data_enc[i],data_enc[j])
distance_enc[j, i] = distance_enc[i, j]
label = dbscan(distance_enc, eps=0.2, min_points=2)
label = np.array(label).squeeze()
split_label = []
if (label == -1).all():
split_label = [[i for i in range(nc)]]
else:
label_elements = np.unique(label)
for i in label_elements.tolist():
split_label.append(np.where(label == i)[0].tolist())
# b = np.where(label == 0)[0].tolist()
# euclidean distance between self.weights and clients' weights
weight_agg = []
weights_in_mpc = data
for b in split_label:
weights_enc = MPCTensor(weights_in_mpc[b], ptype=crypten.mpc.arithmetic)
weight_agg.append(weights_enc.mean(dim=0).get_plain_text().numpy())
# self.weights = weight_agg
weight_aggg = np.array(weight_agg).flatten()
print(weight_aggg)
print(split_label)
return weight_aggg
# @mpc.run_multiprocess(world_size=ws)
# def mpc_mean(data):
# data_enc = MPCTensor(data, ptype=crypten.mpc.arithmetic)
# data_avg = data_enc.mean(dim=0)
# return data_avg.get_plain_text() | [
"crypten.mpc.run_multiprocess",
"crypten.mpc.MPCTensor",
"numpy.unique",
"numpy.where",
"crypten.nn.CosineSimilarity",
"numpy.array",
"torch.zeros",
"torch.ones"
] | [((2421, 2456), 'crypten.mpc.run_multiprocess', 'mpc.run_multiprocess', ([], {'world_size': 'ws'}), '(world_size=ws)\n', (2441, 2456), True, 'import crypten.mpc as mpc\n'), ((1817, 1850), 'torch.zeros', 'torch.zeros', (['(n_points, n_points)'], {}), '((n_points, n_points))\n', (1828, 1850), False, 'import torch\n'), ((1869, 1918), 'crypten.mpc.MPCTensor', 'MPCTensor', (['distance'], {'ptype': 'crypten.mpc.arithmetic'}), '(distance, ptype=crypten.mpc.arithmetic)\n', (1878, 1918), False, 'from crypten.mpc import MPCTensor\n'), ((2517, 2562), 'crypten.mpc.MPCTensor', 'MPCTensor', (['data'], {'ptype': 'crypten.mpc.arithmetic'}), '(data, ptype=crypten.mpc.arithmetic)\n', (2526, 2562), False, 'from crypten.mpc import MPCTensor\n'), ((2585, 2605), 'torch.ones', 'torch.ones', (['(nc, nc)'], {}), '((nc, nc))\n', (2595, 2605), False, 'import torch\n'), ((2625, 2681), 'crypten.mpc.MPCTensor', 'MPCTensor', (['distance_matrix'], {'ptype': 'crypten.mpc.arithmetic'}), '(distance_matrix, ptype=crypten.mpc.arithmetic)\n', (2634, 2681), False, 'from crypten.mpc import MPCTensor\n'), ((2692, 2737), 'crypten.nn.CosineSimilarity', 'crypten.nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (2719, 2737), False, 'import crypten\n'), ((3585, 3601), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (3594, 3601), True, 'import numpy as np\n'), ((3917, 3975), 'crypten.mpc.MPCTensor', 'MPCTensor', (['weights_in_mpc[b]'], {'ptype': 'crypten.mpc.arithmetic'}), '(weights_in_mpc[b], ptype=crypten.mpc.arithmetic)\n', (3926, 3975), False, 'from crypten.mpc import MPCTensor\n'), ((3427, 3442), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (3435, 3442), True, 'import numpy as np\n'), ((4103, 4123), 'numpy.array', 'np.array', (['weight_agg'], {}), '(weight_agg)\n', (4111, 4123), True, 'import numpy as np\n'), ((3675, 3695), 'numpy.where', 'np.where', (['(label == i)'], {}), '(label == i)\n', (3683, 3695), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from visual_kinematics import Robot, Frame
import numpy as np
from math import pi, sqrt, sin, cos, atan2
def main():
np.set_printoptions(precision=3, suppress=True)
dh_params = np.array([[0.163, 0., 0., 0.5 * pi],
[0., 0.5 * pi, 0.632, pi],
[0., 0., 0.6005, pi],
[0.2013, -0.5 * pi, 0., -0.5 * pi],
[0.1025, 0., 0., 0.5 * pi],
[0.094, 0., 0., 0.]])
def aubo10_inv(dh_params, f):
# save all 8 sets of solutions
theta_all = np.zeros([8, 6])
for i in range(8):
A1 = dh_params[5, 0] * f[1, 2] - f[1, 3]
B1 = f[0, 3] - dh_params[5, 0] * f[0, 2]
C1 = dh_params[3, 0]
# theta_0 : 2 sets of solutions
if i < 4:
theta_all[i, 0] = atan2(C1, sqrt(A1 * A1 + B1 * B1 - C1 * C1)) - atan2(A1, B1)
else:
theta_all[i, 0] = atan2(C1, -sqrt(A1 * A1 + B1 * B1 - C1 * C1)) - atan2(A1, B1)
# theta_4 : 2 sets of solutions
b = f[0, 2] * sin(theta_all[i, 0]) - f[1, 2] * cos(theta_all[i, 0])
if i % 4 == 0 or i % 4 == 1:
theta_all[i, 4] = atan2(sqrt(1 - b * b), b)
else:
theta_all[i, 4] = atan2(-sqrt(1 - b * b), b)
# check singularity
if abs(sin(theta_all[i, 4])) < 1e-6:
print("singularity!!")
return np.zeros([8, 6])
# theta_5
A6 = (f[0, 1] * sin(theta_all[i, 0]) - f[1, 1] * cos(theta_all[i, 0])) / sin(theta_all[i, 4])
B6 = (f[1, 0] * cos(theta_all[i, 0]) - f[0, 0] * sin(theta_all[i, 0])) / sin(theta_all[i, 4])
theta_all[i, 5] = atan2(A6, B6)
# theta_1 : 2 sets of solutions
A234 = f[2, 2] / sin(theta_all[i, 4])
B234 = (f[0, 2] * cos(theta_all[i, 0]) + f[1, 2] * sin(theta_all[i, 0])) / sin(theta_all[i, 4])
M = dh_params[4, 0] * A234 - dh_params[5, 0] * sin(theta_all[i, 4]) * B234 + f[0, 3] * cos(
theta_all[i, 0]) + f[1, 3] * sin(theta_all[i, 0])
N = -dh_params[4, 0] * B234 - dh_params[5, 0] * sin(theta_all[i, 4]) * A234 - dh_params[0, 0] + f[2, 3]
L = (M * M + N * N + dh_params[1, 2] * dh_params[1, 2] - dh_params[2, 2] * dh_params[2, 2]) / (
2 * dh_params[1, 2])
if i % 2 == 0:
theta_all[i, 1] = atan2(N, M) - atan2(L, sqrt(M * M + N * N - L * L))
else:
theta_all[i, 1] = atan2(N, M) - atan2(L, -sqrt(M * M + N * N - L * L))
# theta_2 and theta_3
A23 = (-M - dh_params[1, 2] * sin(theta_all[i, 1])) / dh_params[2, 2]
B23 = (N - dh_params[1, 2] * cos(theta_all[i, 1])) / dh_params[2, 2]
theta_all[i, 2] = theta_all[i, 1] - atan2(A23, B23)
theta_all[i, 3] = atan2(A234, B234) - atan2(A23, B23)
# select the best solution
diff_sum_min = 1e+5
index = 0
for i in range(8):
diff_sum = 0
for j in range(6):
diff = theta_all[i, j] - dh_params[j, 1]
while diff < -pi:
diff += 2. * pi
while diff > pi:
diff -= 2. * pi
diff_sum += abs(diff)
if diff_sum < diff_sum_min:
diff_sum_min = diff_sum
index = i
return theta_all[i]
robot = Robot(dh_params, analytical_inv=aubo10_inv)
# =====================================
# trajectory
# =====================================
trajectory = []
trajectory.append(Frame.from_euler_3(np.array([0.5*pi, 0., pi]), np.array([[0.28127], [0.], [1.13182]])))
trajectory.append(Frame.from_euler_3(np.array([0.25*pi, 0., 0.75*pi]), np.array([[0.48127], [0.], [1.13182]])))
trajectory.append(Frame.from_euler_3(np.array([0.5 * pi, 0., pi]), np.array([[0.48127], [0.], [0.63182]])))
robot.show_trajectory(trajectory, motion="lin")
if __name__ == "__main__":
main()
| [
"math.sqrt",
"math.cos",
"numpy.array",
"numpy.zeros",
"math.atan2",
"visual_kinematics.Robot",
"math.sin",
"numpy.set_printoptions"
] | [((147, 194), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (166, 194), True, 'import numpy as np\n'), ((212, 404), 'numpy.array', 'np.array', (['[[0.163, 0.0, 0.0, 0.5 * pi], [0.0, 0.5 * pi, 0.632, pi], [0.0, 0.0, 0.6005,\n pi], [0.2013, -0.5 * pi, 0.0, -0.5 * pi], [0.1025, 0.0, 0.0, 0.5 * pi],\n [0.094, 0.0, 0.0, 0.0]]'], {}), '([[0.163, 0.0, 0.0, 0.5 * pi], [0.0, 0.5 * pi, 0.632, pi], [0.0, \n 0.0, 0.6005, pi], [0.2013, -0.5 * pi, 0.0, -0.5 * pi], [0.1025, 0.0, \n 0.0, 0.5 * pi], [0.094, 0.0, 0.0, 0.0]])\n', (220, 404), True, 'import numpy as np\n'), ((3544, 3587), 'visual_kinematics.Robot', 'Robot', (['dh_params'], {'analytical_inv': 'aubo10_inv'}), '(dh_params, analytical_inv=aubo10_inv)\n', (3549, 3587), False, 'from visual_kinematics import Robot, Frame\n'), ((608, 624), 'numpy.zeros', 'np.zeros', (['[8, 6]'], {}), '([8, 6])\n', (616, 624), True, 'import numpy as np\n'), ((1796, 1809), 'math.atan2', 'atan2', (['A6', 'B6'], {}), '(A6, B6)\n', (1801, 1809), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((3756, 3785), 'numpy.array', 'np.array', (['[0.5 * pi, 0.0, pi]'], {}), '([0.5 * pi, 0.0, pi])\n', (3764, 3785), True, 'import numpy as np\n'), ((3784, 3823), 'numpy.array', 'np.array', (['[[0.28127], [0.0], [1.13182]]'], {}), '([[0.28127], [0.0], [1.13182]])\n', (3792, 3823), True, 'import numpy as np\n'), ((3866, 3903), 'numpy.array', 'np.array', (['[0.25 * pi, 0.0, 0.75 * pi]'], {}), '([0.25 * pi, 0.0, 0.75 * pi])\n', (3874, 3903), True, 'import numpy as np\n'), ((3900, 3939), 'numpy.array', 'np.array', (['[[0.48127], [0.0], [1.13182]]'], {}), '([[0.48127], [0.0], [1.13182]])\n', (3908, 3939), True, 'import numpy as np\n'), ((3982, 4011), 'numpy.array', 'np.array', (['[0.5 * pi, 0.0, pi]'], {}), '([0.5 * pi, 0.0, pi])\n', (3990, 4011), True, 'import numpy as np\n'), ((4012, 4051), 'numpy.array', 'np.array', (['[[0.48127], [0.0], [0.63182]]'], {}), '([[0.48127], [0.0], [0.63182]])\n', (4020, 4051), True, 'import numpy as np\n'), ((1514, 1530), 'numpy.zeros', 'np.zeros', (['[8, 6]'], {}), '([8, 6])\n', (1522, 1530), True, 'import numpy as np\n'), ((1639, 1659), 'math.sin', 'sin', (['theta_all[i, 4]'], {}), '(theta_all[i, 4])\n', (1642, 1659), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1745, 1765), 'math.sin', 'sin', (['theta_all[i, 4]'], {}), '(theta_all[i, 4])\n', (1748, 1765), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1884, 1904), 'math.sin', 'sin', (['theta_all[i, 4]'], {}), '(theta_all[i, 4])\n', (1887, 1904), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1992, 2012), 'math.sin', 'sin', (['theta_all[i, 4]'], {}), '(theta_all[i, 4])\n', (1995, 2012), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2916, 2931), 'math.atan2', 'atan2', (['A23', 'B23'], {}), '(A23, B23)\n', (2921, 2931), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2962, 2979), 'math.atan2', 'atan2', (['A234', 'B234'], {}), '(A234, B234)\n', (2967, 2979), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2982, 2997), 'math.atan2', 'atan2', (['A23', 'B23'], {}), '(A23, B23)\n', (2987, 2997), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((938, 951), 'math.atan2', 'atan2', (['A1', 'B1'], {}), '(A1, B1)\n', (943, 951), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1052, 1065), 'math.atan2', 'atan2', (['A1', 'B1'], {}), '(A1, B1)\n', (1057, 1065), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1137, 1157), 'math.sin', 'sin', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1140, 1157), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1170, 1190), 'math.cos', 'cos', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1173, 1190), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1272, 1287), 'math.sqrt', 'sqrt', (['(1 - b * b)'], {}), '(1 - b * b)\n', (1276, 1287), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1422, 1442), 'math.sin', 'sin', (['theta_all[i, 4]'], {}), '(theta_all[i, 4])\n', (1425, 1442), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2162, 2182), 'math.sin', 'sin', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (2165, 2182), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2513, 2524), 'math.atan2', 'atan2', (['N', 'M'], {}), '(N, M)\n', (2518, 2524), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2617, 2628), 'math.atan2', 'atan2', (['N', 'M'], {}), '(N, M)\n', (2622, 2628), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((901, 934), 'math.sqrt', 'sqrt', (['(A1 * A1 + B1 * B1 - C1 * C1)'], {}), '(A1 * A1 + B1 * B1 - C1 * C1)\n', (905, 934), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1351, 1366), 'math.sqrt', 'sqrt', (['(1 - b * b)'], {}), '(1 - b * b)\n', (1355, 1366), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1582, 1602), 'math.sin', 'sin', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1585, 1602), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1615, 1635), 'math.cos', 'cos', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1618, 1635), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1688, 1708), 'math.cos', 'cos', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1691, 1708), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1721, 1741), 'math.sin', 'sin', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1724, 1741), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1935, 1955), 'math.cos', 'cos', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1938, 1955), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1968, 1988), 'math.sin', 'sin', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (1971, 1988), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2112, 2132), 'math.cos', 'cos', (['theta_all[i, 0]'], {}), '(theta_all[i, 0])\n', (2115, 2132), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2536, 2563), 'math.sqrt', 'sqrt', (['(M * M + N * N - L * L)'], {}), '(M * M + N * N - L * L)\n', (2540, 2563), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2747, 2767), 'math.sin', 'sin', (['theta_all[i, 1]'], {}), '(theta_all[i, 1])\n', (2750, 2767), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2828, 2848), 'math.cos', 'cos', (['theta_all[i, 1]'], {}), '(theta_all[i, 1])\n', (2831, 2848), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((1015, 1048), 'math.sqrt', 'sqrt', (['(A1 * A1 + B1 * B1 - C1 * C1)'], {}), '(A1 * A1 + B1 * B1 - C1 * C1)\n', (1019, 1048), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2641, 2668), 'math.sqrt', 'sqrt', (['(M * M + N * N - L * L)'], {}), '(M * M + N * N - L * L)\n', (2645, 2668), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2072, 2092), 'math.sin', 'sin', (['theta_all[i, 4]'], {}), '(theta_all[i, 4])\n', (2075, 2092), False, 'from math import pi, sqrt, sin, cos, atan2\n'), ((2243, 2263), 'math.sin', 'sin', (['theta_all[i, 4]'], {}), '(theta_all[i, 4])\n', (2246, 2263), False, 'from math import pi, sqrt, sin, cos, atan2\n')] |
import os
import torch
import torchvision.transforms as tfs
import numpy as np
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from PIL import Image
from .utils import extract_episode
_SHAPENET_ID2NAME = {
'02691156': 'airplane',
'02880940': 'bowl',
'02942699': 'camera',
'02958343': 'car',
'02992529': 'cellphone',
'03001627': 'chair',
'03046257': 'clock',
'03211117': 'monitor',
'03325088': 'faucet',
'03593526': 'jar',
'03797390': 'mug',
'04004475': 'printer',
'04099429': 'rocket',
}
_SHAPENET_NAME2ID = {_SHAPENET_ID2NAME[eachKey]: eachKey for eachKey in _SHAPENET_ID2NAME.keys()}
class FewShotSubShapeNet(Dataset):
def __init__(self, config_path, transform=None, tgt_transform=None, data_argument=False, n_pts=2048):
super(FewShotSubShapeNet, self).__init__()
self.imgs = list()
self.pcs = list()
with open(config_path, 'r') as f:
for eachLine in f.readlines():
item_path = filename = eachLine.rstrip('\n')
npy_file = os.path.join(item_path, 'npy_file.npy')
view_root = os.path.join(item_path, 'images')
if not os.path.exists(npy_file):
continue
views = list()
for view in os.listdir(view_root):
views.append(os.path.join(view_root, view))
self.pcs.append(npy_file)
self.imgs.append(views)
self.pc_data = list()
for idx in range(len(self.pcs)):
try:
pc = np.load(self.pcs[idx])
except:
raise Exception('Unexpected Error!')
choice = np.random.choice(15000, n_pts)
pc = pc[choice, :]
self.pc_data.append(pc)
self.tfs = transform
self.tgt_tfs = tgt_transform
self.data_argument = data_argument
self.n_pts = n_pts
def __getitem__(self, index):
img_path = self.imgs[index][0]
pc = self.pc_data[index]
img = Image.open(img_path).convert('RGB')
if self.tfs is not None:
img = self.tfs(img)
point_set = np.asarray(pc, dtype=np.float32)
if point_set.shape[0] < self.n_pts:
choice = np.random.choice(len(point_set), self.n_pts - point_set.shape[0], replace=True)
aux_pc = point_set[choice, :]
point_set = np.concatenate((point_set, aux_pc))
center_point = np.expand_dims(np.mean(point_set, axis=0), 0)
point_set = point_set - center_point
dist = np.max(np.sqrt(np.sum(point_set ** 2, axis=1)), 0)
point_set = point_set / dist
if self.data_argument:
theta = np.random.uniform(0, np.pi * 2)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
point_set[:, [0, 2]] = point_set[:, [0, 2]].dot(rotation_matrix) # random rotation
point_set += np.random.normal(0, 0.02, size=point_set.shape) # random jitter
point_set = torch.from_numpy(point_set).contiguous()
return img, point_set
def __len__(self, ):
return len(self.imgs)
class FewShotShapeNet(Dataset):
def __init__(self, config_path, auxiliary_dir, n_classes, n_support, n_query, transform=None, tgt_transform=None):
super(FewShotShapeNet, self).__init__()
# Store the img_path & ply path for every data point
self.data_corpus = list()
with open(config_path, 'r') as f:
for eachItem in f.readlines():
self.data_corpus.append(eachItem.rstrip('\n'))
self.item_len = len(self.data_corpus)
self.tfs, self.tgt_tfs = transform, tgt_transform
# Store the img_path & ply path for a specific class -- faster access
self.reference = dict()
self.auxiliary_dir = auxiliary_dir
self._build_reference()
self.n_way = n_classes
self.n_support = n_support
self.n_query = n_query
self.seen_index = [False] * len(self.data_corpus)
self.cache = dict()
def __getitem__(self, index):
item_path = self.data_corpus[index]
data_instance_class = item_path.split('/')[5] # Hard code ?
query_matrix = {
'class': _SHAPENET_ID2NAME[data_instance_class],
'img_data': self.reference[data_instance_class]['imgs'],
'pc_data': self.reference[data_instance_class]['pcs'],
}
ans = extract_episode(self.n_support, self.n_query, query_matrix)
example_idx = torch.randperm(self.item_len)[:self.n_support] # Adding additional img-pc pairs to avoid model collapse
ans['xad'] = self.img_corpus[example_idx]
ans['pcad'] = self.pc_corpus[example_idx]
return ans
def _build_reference(self):
assert self.auxiliary_dir is not None, 'Auxiliary folder is not available!!!'
tmp_img_list, tmp_pc_list = list(), list()
for eachFile in os.listdir(self.auxiliary_dir):
if not eachFile.endswith('.txt'):
continue
class_name = eachFile.split('.')[0].split('+')[1]
self.reference[class_name] = dict()
class_ds = FewShotSubShapeNet(os.path.join(self.auxiliary_dir, eachFile), transform=self.tfs, tgt_transform=self.tgt_tfs)
print(f'{eachFile}: {len(class_ds)}')
loader = DataLoader(class_ds, batch_size=len(class_ds), shuffle=False)
# loader = DataLoader(class_ds, batch_size=min(200, len(class_ds)))
for stacked_img, stacked_pc in loader:
self.reference[class_name]['imgs'] = stacked_img
self.reference[class_name]['pcs'] = stacked_pc
tmp_img_list.append(stacked_img)
tmp_pc_list.append(stacked_pc)
break # Follow the protonet, only need one sample because batch_size equal to the dataset length
self.img_corpus = torch.cat(tmp_img_list, dim=0)
# print(self.img_corpus.shape)
self.pc_corpus = torch.cat(tmp_pc_list, dim=0)
# print(self.pc_corpus.shape)
def __len__(self, ):
return len(self.data_corpus) | [
"numpy.random.normal",
"numpy.mean",
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"torch.randperm",
"numpy.random.choice",
"numpy.asarray",
"os.path.join",
"torch.from_numpy",
"numpy.sum",
"numpy.cos",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.sin",
"numpy.load",
"torc... | [((2218, 2250), 'numpy.asarray', 'np.asarray', (['pc'], {'dtype': 'np.float32'}), '(pc, dtype=np.float32)\n', (2228, 2250), True, 'import numpy as np\n'), ((5077, 5107), 'os.listdir', 'os.listdir', (['self.auxiliary_dir'], {}), '(self.auxiliary_dir)\n', (5087, 5107), False, 'import os\n'), ((6054, 6084), 'torch.cat', 'torch.cat', (['tmp_img_list'], {'dim': '(0)'}), '(tmp_img_list, dim=0)\n', (6063, 6084), False, 'import torch\n'), ((6149, 6178), 'torch.cat', 'torch.cat', (['tmp_pc_list'], {'dim': '(0)'}), '(tmp_pc_list, dim=0)\n', (6158, 6178), False, 'import torch\n'), ((1739, 1769), 'numpy.random.choice', 'np.random.choice', (['(15000)', 'n_pts'], {}), '(15000, n_pts)\n', (1755, 1769), True, 'import numpy as np\n'), ((2463, 2498), 'numpy.concatenate', 'np.concatenate', (['(point_set, aux_pc)'], {}), '((point_set, aux_pc))\n', (2477, 2498), True, 'import numpy as np\n'), ((2538, 2564), 'numpy.mean', 'np.mean', (['point_set'], {'axis': '(0)'}), '(point_set, axis=0)\n', (2545, 2564), True, 'import numpy as np\n'), ((2769, 2800), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(np.pi * 2)'], {}), '(0, np.pi * 2)\n', (2786, 2800), True, 'import numpy as np\n'), ((3028, 3075), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.02)'], {'size': 'point_set.shape'}), '(0, 0.02, size=point_set.shape)\n', (3044, 3075), True, 'import numpy as np\n'), ((4658, 4687), 'torch.randperm', 'torch.randperm', (['self.item_len'], {}), '(self.item_len)\n', (4672, 4687), False, 'import torch\n'), ((1100, 1139), 'os.path.join', 'os.path.join', (['item_path', '"""npy_file.npy"""'], {}), "(item_path, 'npy_file.npy')\n", (1112, 1139), False, 'import os\n'), ((1168, 1201), 'os.path.join', 'os.path.join', (['item_path', '"""images"""'], {}), "(item_path, 'images')\n", (1180, 1201), False, 'import os\n'), ((1341, 1362), 'os.listdir', 'os.listdir', (['view_root'], {}), '(view_root)\n', (1351, 1362), False, 'import os\n'), ((1621, 1643), 'numpy.load', 'np.load', (['self.pcs[idx]'], {}), '(self.pcs[idx])\n', (1628, 1643), True, 'import numpy as np\n'), ((2096, 2116), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2106, 2116), False, 'from PIL import Image\n'), ((2644, 2674), 'numpy.sum', 'np.sum', (['(point_set ** 2)'], {'axis': '(1)'}), '(point_set ** 2, axis=1)\n', (2650, 2674), True, 'import numpy as np\n'), ((3114, 3141), 'torch.from_numpy', 'torch.from_numpy', (['point_set'], {}), '(point_set)\n', (3130, 3141), False, 'import torch\n'), ((5334, 5376), 'os.path.join', 'os.path.join', (['self.auxiliary_dir', 'eachFile'], {}), '(self.auxiliary_dir, eachFile)\n', (5346, 5376), False, 'import os\n'), ((1226, 1250), 'os.path.exists', 'os.path.exists', (['npy_file'], {}), '(npy_file)\n', (1240, 1250), False, 'import os\n'), ((1397, 1426), 'os.path.join', 'os.path.join', (['view_root', 'view'], {}), '(view_root, view)\n', (1409, 1426), False, 'import os\n'), ((2842, 2855), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2848, 2855), True, 'import numpy as np\n'), ((2875, 2888), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2881, 2888), True, 'import numpy as np\n'), ((2890, 2903), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2896, 2903), True, 'import numpy as np\n'), ((2858, 2871), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2864, 2871), True, 'import numpy as np\n')] |
import numpy as np
class Network:
def __init__(self, a, b, c, d, resting_potentials, spiking_thresholds, dt, clamp_voltages=True):
self.neurons = a.shape[0]
self.a = a
self.b = b
self.c = c
self.d = d
self.resting_potentials = resting_potentials
self.spiking_thresholds = spiking_thresholds
self.dt = dt
self.clamp_voltages = clamp_voltages
self.v = self.resting_potentials
self.u = self.b * self.v
def reset_network(self):
self.v = self.resting_potentials
self.u = self.b * self.v
def step(self, input_currents):
firing = np.where(self.v >= self.spiking_thresholds)
self.v[firing] = self.c[firing]
self.u[firing] = self.u[firing] + self.d[firing]
not_firing = np.where(self.v < self.spiking_thresholds)
self.v[not_firing] = self.v[not_firing] + 0.5 * (
0.04 * self.v[not_firing] ** 2 + 5 * self.v[not_firing] + 140 - self.u[not_firing] + input_currents[
not_firing]) * self.dt
if self.clamp_voltages:
clamps = np.where(self.v > self.spiking_thresholds)
self.v[clamps] = self.spiking_thresholds[clamps]
clamps = np.where(self.v < self.resting_potentials)
self.v[clamps] = self.resting_potentials[clamps]
self.u[not_firing] = self.u[not_firing] + self.a[not_firing] * (
self.b[not_firing] * self.v[not_firing] - self.u[not_firing]) * self.dt
| [
"numpy.where"
] | [((651, 694), 'numpy.where', 'np.where', (['(self.v >= self.spiking_thresholds)'], {}), '(self.v >= self.spiking_thresholds)\n', (659, 694), True, 'import numpy as np\n'), ((814, 856), 'numpy.where', 'np.where', (['(self.v < self.spiking_thresholds)'], {}), '(self.v < self.spiking_thresholds)\n', (822, 856), True, 'import numpy as np\n'), ((1121, 1163), 'numpy.where', 'np.where', (['(self.v > self.spiking_thresholds)'], {}), '(self.v > self.spiking_thresholds)\n', (1129, 1163), True, 'import numpy as np\n'), ((1246, 1288), 'numpy.where', 'np.where', (['(self.v < self.resting_potentials)'], {}), '(self.v < self.resting_potentials)\n', (1254, 1288), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
import numpy as np
np.set_printoptions(threshold=5)
from scipy.sparse import csr_matrix
from frovedis.matrix.ml_data import FrovedisFeatureData
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if (argc < 2):
print ('Please give frovedis_server calling command as the first argument \n(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
from frovedis.exrpc.server import FrovedisServer
FrovedisServer.initialize(argvs[1])
# --- sparse data ---
data = np.array([1, 2, 3, 4, 5, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
indptr = np.array([0, 2, 3, 6])
X = csr_matrix((data, indices, indptr),
dtype=np.float64,
shape=(3, 3))
mat = np.matrix([[0, 0, 0, 0],
[0, 1, 1, 1],
[1, 0, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]])
mat = csr_matrix(mat)
data = FrovedisFeatureData(mat)
data.debug_print()
data.get().debug_print()
print(data.is_dense())
data = FrovedisFeatureData(mat, dense_kind='rowmajor', densify=True) # idensify sparse data
data.debug_print()
data.get().debug_print()
print(data.is_dense())
data.release()
data.debug_print() # no display, data has been released
FrovedisServer.shut_down()
| [
"frovedis.exrpc.server.FrovedisServer.shut_down",
"frovedis.matrix.ml_data.FrovedisFeatureData",
"numpy.array",
"frovedis.exrpc.server.FrovedisServer.initialize",
"scipy.sparse.csr_matrix",
"numpy.matrix",
"numpy.set_printoptions"
] | [((53, 85), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(5)'}), '(threshold=5)\n', (72, 85), True, 'import numpy as np\n'), ((471, 506), 'frovedis.exrpc.server.FrovedisServer.initialize', 'FrovedisServer.initialize', (['argvs[1]'], {}), '(argvs[1])\n', (496, 506), False, 'from frovedis.exrpc.server import FrovedisServer\n'), ((538, 566), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (546, 566), True, 'import numpy as np\n'), ((577, 605), 'numpy.array', 'np.array', (['[0, 2, 2, 0, 1, 2]'], {}), '([0, 2, 2, 0, 1, 2])\n', (585, 605), True, 'import numpy as np\n'), ((615, 637), 'numpy.array', 'np.array', (['[0, 2, 3, 6]'], {}), '([0, 2, 3, 6])\n', (623, 637), True, 'import numpy as np\n'), ((642, 709), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, indices, indptr)'], {'dtype': 'np.float64', 'shape': '(3, 3)'}), '((data, indices, indptr), dtype=np.float64, shape=(3, 3))\n', (652, 709), False, 'from scipy.sparse import csr_matrix\n'), ((747, 832), 'numpy.matrix', 'np.matrix', (['[[0, 0, 0, 0], [0, 1, 1, 1], [1, 0, 1, 0], [1, 1, 1, 0], [1, 1, 1, 1]]'], {}), '([[0, 0, 0, 0], [0, 1, 1, 1], [1, 0, 1, 0], [1, 1, 1, 0], [1, 1, 1,\n 1]])\n', (756, 832), True, 'import numpy as np\n'), ((903, 918), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat'], {}), '(mat)\n', (913, 918), False, 'from scipy.sparse import csr_matrix\n'), ((927, 951), 'frovedis.matrix.ml_data.FrovedisFeatureData', 'FrovedisFeatureData', (['mat'], {}), '(mat)\n', (946, 951), False, 'from frovedis.matrix.ml_data import FrovedisFeatureData\n'), ((1027, 1088), 'frovedis.matrix.ml_data.FrovedisFeatureData', 'FrovedisFeatureData', (['mat'], {'dense_kind': '"""rowmajor"""', 'densify': '(True)'}), "(mat, dense_kind='rowmajor', densify=True)\n", (1046, 1088), False, 'from frovedis.matrix.ml_data import FrovedisFeatureData\n'), ((1251, 1277), 'frovedis.exrpc.server.FrovedisServer.shut_down', 'FrovedisServer.shut_down', ([], {}), '()\n', (1275, 1277), False, 'from frovedis.exrpc.server import FrovedisServer\n')] |
# -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
"""
Library to handle SPM data.
This is the core module of all images retrieved by SPM and ToF-SIMS.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import scipy.optimize
import skimage
import skimage.exposure
import skimage.filters
import scipy.interpolate
from skimage import transform as tf
import copy
from .utils import CDF, funit
import sys
import matplotlib as mpl
import warnings
from .utils.misc import PB
try:
from skimage.filters import threshold_local
except:
# For compatibility with old versions of skimage
from skimage.filters import threshold_adaptive as threshold_local
class SPM_image:
"""
Main class to handle SPM images.
This class contains the pixels data of the images as well as it's real size.
It also provides a lot of tools to correct and perform various analysis and tasks on the image.
"""
def __init__(self, BIN, channel='Topography',
corr=None, real=None, zscale='?', _type='Unknown'):
"""
Create a new SPM_image
Parameters
----------
BIN : 2D numpy array
The pixel values of the image as a 2D numpy array
channel : string
The name of the channel. What does the image represents?
corr : string or None
'slope' : correct the SPM image for its slope (see pySPM.SPM.SPM_image.correct_slope)
'lines' : correct the SPM image for its lines (see pySPM.SPM.SPM_image.correct_lines)
'plane' : correct the SPM image by plane fitting (see pySPM.SPM.SPM_image.correct_plane)
real : None or dictionary
Information about the real size of the image {'x':width,'y':height,'unit':unit_name}
zscale : string
Unit used to describe the z-scale. (units of the data of BIN)
_type : string
represent the type of measurement
"""
self.channel = channel
self.direction = 'Unknown'
self.size = {'pixels': {'x': BIN.shape[1], 'y': BIN.shape[0]}}
if not real is None:
self.size['real'] = real
else:
self.size['real'] = {'unit': 'pixels',
'x': BIN.shape[1], 'y': BIN.shape[0]}
if not 'unit' in self.size['real']:
self.size['real']['unit'] = 'px'
self.pixels = BIN
self.type = _type
self.zscale = zscale
if corr is not None:
if corr.lower() == 'slope':
self.correct_slope()
elif corr.lower() == 'lines':
self.correct_lines()
elif corr.lower() == 'plane':
self.correct_plane()
def __add__(self, b):
"""
Add up two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels += b.pixels
New.channel += " + "+b.channel
elif type(b) in [int, float]:
New.pixels += b
New.channels += " + {:.2f}".format(b)
return New
def __sub__(self, b):
"""
Subtract two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels -= b.pixels
New.channel += " - "+b.channel
elif type(b) in [int, float]:
New.pixels -= b
New.channels += " - {:.2f}".format(b)
return New
def __mul__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels *= b.pixels
New.channel = "({})*{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels *= b
New.channels = "({})*{:.2f}".format(New.channel,b)
return New
def __div__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels /= b.pixels
New.channel = "({})/{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels /= b
New.channels = "({})/{:.2f}".format(New.channel,b)
return New
def pxs(self):
"""
Return the pixel size
"""
fxy = {xy: funit(self.size['real'][xy], self.size['real']['unit']) for xy in 'xy'}
return [(fxy[xy]['value']/self.size['pixels'][xy], fxy[xy]['unit']) for xy in 'xy']
def add_scale(self, length, ax=None, height=20, margin=5, color='w', loc=4, text=True, pixels=None, fontsize=20, edge_color='k', edge_width=3):
"""
Display a scale marker on an existing image
Parameters
----------
length : float
The length of the scale in real units
ax : matplotlib axis
if None the current axis will be taken (plt.gca())
height : int
The height of the scale bar in pixels
color : string
The color used to display the scale bar
loc : int
The location of the scale bar.
1 : top right
2 : top left
3 : bottom left
4 : bottom right
text : bool
display the size of the scale on top of it?
pixels : bool
Is the image plotted in ax with a x/y scale in pixels?
fontsize : float
The fontsize used to display the text
Example
-------
>>> img = pySPM.SPM_image()
>>> img.show()
>>> img.add_scale(50e-6, pixels=False);
Add a scale of 50 μm on an image displayed with real units
>>> img = pySPM.SPM_image()
>>> img.show(pixels=True)
>>> img.add_scale(50e-6);
Add a scale of 50 μm on an image displayed in pixels
"""
import matplotlib.patches
import matplotlib.patheffects as PathEffects
fL = length/self.size['real']['x']
L = self.size['pixels']['x']*fL
fH = height/self.size['pixels']['y']
if ax is None:
ax = plt.gca()
if pixels is None:
if hasattr(ax, 'isPixel'):
pixels = ax.isPixel
else:
pixels = False
flipped = False
if hasattr(ax, 'flipped'):
flipped = ax.flipped
if type(loc) is int:
assert loc in [1, 2, 3, 4]
ref = ax.transAxes.transform({1:(1-fL,0),2:(0,0),3:(0,1-fH),4:(1-fL,1-fH)}[loc])
if loc in [2,3]:
ref[0] += margin
else:
ref[0] -= margin
if loc in [1,2]:
ref[1] += margin
else:
ref[1] -= margin
else:
assert type(loc) in [tuple, list]
assert len(loc)==2
ref = ax.transData.transform(loc) + ax.transAxes.transform((-fL/2,-fH/2)) - ax.transAxes.transform((0,0))
inv = ax.transData.inverted()
ref = inv.transform(ref)
WH = inv.transform(ax.transAxes.transform((fL,fH)))-inv.transform(ax.transAxes.transform((0,0)))
rect = ax.add_patch(matplotlib.patches.Rectangle(ref, width=WH[0], height=WH[1], color=color))
if text:
r = funit(length, self.size['real']['unit'])
if r['unit'][0] == 'u':
r['unit'] = '$\\mu$' + r['unit'][1:]
if loc in [3,4]:
label_ref = [ref[0]+WH[0]/2, ref[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="top", ha="center")
else:
label_ref = [ref[0]+WH[0]/2, ref[1]+WH[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="bottom", ha="center")
ann.set_path_effects([PathEffects.withStroke(linewidth=edge_width, foreground=edge_color)])
def offset(self, profiles, width=1, ax=None, col='w', inline=True, **kargs):
"""
Correct an image by offsetting each row individually in order that the lines passed as argument in "profiles" becomes flat.
Parameters
----------
profiles: list of list
each sublist represent a line as [x1, y1, x2, y2] in pixels known to be flat
width : int, float
the line width in pixels used for better statistics
ax : matplotlib axis or None
If not None, axis in which the profiles will be plotted in
inline : bool
If True perform the correction on the current object, otherwise return a new image
col : string
matrplotlib color used to plot the profiles (if ax is not None)
labels : bool
display a label number with each profile
**kargs: arguments passed further to get_row_profile.
axPixels: set to True if you axis "ax" have the data plotted in pixel instead of real distance
Example
-------
Exampel if the data are plotted in pixels:
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False,axPixels=True)
>>> topo.show(pixels=True, ax=ax[0])
>>> topoC.show(ax=ax[1]);
Example if the data are plotted with real units
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False)
>>> topo.show(ax=ax[0])
>>> topoC.show(ax=ax[1]);
"""
offset = np.zeros(self.pixels.shape[0])
counts = np.zeros(self.pixels.shape[0])
for i, p in enumerate(profiles):
if kargs.get('labels', False):
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, label=str(i), **kargs)
else:
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, **kargs)
counts[y] += 1
offset[y[1:]] += np.diff(D)
counts[counts == 0] = 1
offset = offset/counts
offset = np.cumsum(offset)
offset = offset.reshape((self.pixels.shape[0], 1))
if inline:
self.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return self
else:
C = copy.deepcopy(self)
C.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return C
def pxRect2Real(self, xy, width, height):
"""
Transform a xy, width, height data in pixels to an equivalentz one with real units
"""
ll = self.px2real(xy[0],xy[1])
ur = self.px2real(xy[0]+width,xy[1]+height)
return ll,ur[0]-ll[0],ur[1]-ll[1]
def get_row_profile(self, x1, y1, x2, y2, width=1, col='C1', ax=None, alpha=0, **kargs):
"""
Get a profile per row along a given line. This function is mainly useful for the function offset.
x1, y1, x2, y2: int
coordinates of the line.
width : int
the width of the line used for statistics (in pixels)
col: string
color used to plot the line position
ax : matplotlib axis
axis in which the lines position will plotted
alpha : float
The alpha channel of the line color (≥0 and ≤1)
**kargs:
line style arguments: linewidth, color and linestyle
axis units: axPixels set to True if ax has the image plotted in pixels.
Returns
-------
Y coordinates : 1D numpy array
distance along the profile starting at 0
Z coordinates : 1D numpy array
profile
"""
plotargs = { key: kargs[key] for key in ['linewidth', 'color', 'linestyle'] if key in kargs }
if y2 < y1:
x1, y1, x2, y2 = x2, y2, x1, y1
if ax is not None:
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
if kargs.get('axPixels', False):
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], col)
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], col)
ax.plot((x1, x2), (y1, y2), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2),.5*(y1+y2)), color=col)
if alpha>0:
import matplotlib.patches
ax.add_patch(matplotlib.patches.Rectangle((x1+dx,y1+dy),width, d, -np.degrees(np.arctan2(x2-x1,y2-y1)), color=col, alpha=alpha))
else:
h = self.pixels.shape[0]
pxs = self.size['real']['x'] / self.pixels.shape[1]
pys = self.size['real']['y'] / h
ax.plot([(x1-dx)*pxs, (x1+dx)*pxs], [(h-(y1-dy))*pys, (h-(y1+dy))*pys], col)
ax.plot([(x2-dx)*pxs, (x2+dx)*pxs], [(h-(y2-dy))*pys, (h-(y2+dy))*pys], col)
ax.plot((x1*pxs, x2*pxs), ((h-y1)*pys, (h-y2)*pys), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2)*pxs,.5*(2*h-y1-y2)*pys), color=col)
if alpha>0:
import matplotlib.patches
W = np.sqrt((2*dx*pxs)**2+(2*dy*pys)**2)
L = np.sqrt(((x2-x1)*pxs)**2+((y2-y1)*pys)**2)
ax.add_patch(matplotlib.patches.Rectangle(((x1+dx)*pxs,(y1+dy)*pys), W, L, -np.degrees(np.arctan2((x2-x1)*pxs,(y2-y1)*pys)), color=col, alpha=alpha))
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
I = scipy.interpolate.interp2d(x, y, np.flipud(self.pixels))
Y = np.arange(y1, y2+1)
V = np.zeros(len(Y))
for w in np.arange(width):
xl = np.linspace(x1-(width-1)/2.+w, x2-(width-1)/2.+w, len(Y))
for i in range(len(Y)):
Z = I(xl[i], Y[i])
V[i] += Z
return Y, V/width
def correct_median_diff(self, inline=True):
"""
Correct the image with the median difference
"""
N = self.pixels
# Difference of the pixel between two consecutive row
N2 = np.vstack([N[1:, :], N[-1:, :]])-N
# Take the median of the difference and cumsum them
C = np.cumsum(np.median(N2, axis=1))
# Extend the vector to a matrix (row copy)
D = np.tile(C, (N.shape[0], 1)).T
if inline:
self.pixels = N-D
else:
New = copy.deepcopy(self)
New.pixels = N-D
return New
def correct_slope(self, inline=True):
"""
Correct the image by subtracting a fitted slope along the y-axis
"""
s = np.mean(self.pixels, axis=1)
i = np.arange(len(s))
fit = np.polyfit(i, s, 1)
if inline:
self.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return New
def correct_plane(self, inline=True, mask=None):
"""
Correct the image by subtracting a fitted 2D-plane on the data
Parameters
----------
inline : bool
If True the data of the current image will be updated otherwise a new image is created
mask : None or 2D numpy array
If not None define on which pixels the data should be taken.
"""
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
X0, Y0 = np.meshgrid(x, y)
Z0 = self.pixels
if mask is not None:
X = X0[mask]
Y = Y0[mask]
Z = Z0[mask]
else:
X = X0
Y = Y0
Z = Z0
A = np.column_stack((np.ones(Z.ravel().size), X.ravel(), Y.ravel()))
c, resid, rank, sigma = np.linalg.lstsq(A, Z.ravel(), rcond=-1)
if inline:
self.pixels -= c[0] * \
np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return self
else:
New = copy.deepcopy(self)
New.pixels -= c[0]*np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return New
def correct_lines(self, inline=True):
"""
Subtract the average of each line for the image.
if inline is True the current data are updated otherwise a new image with the corrected data is returned
"""
if inline:
self.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return New
def dist_v2(self, pixel=False):
"""
Return a 2D array with the distance between each pixel and the closest border.
Might be usefull for FFT filtering
"""
if pixel:
dx = 1
dy = 1
else:
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
x2 = np.arange(self.size['pixels']['x'])
x2 = (np.minimum(x2, self.size['pixels']['x']-x2) * dx)**2
y2 = np.arange(self.size['pixels']['y'])
y2 = (np.minimum(y2, self.size['pixels']['y'] - y2) * dy)**2
X, Y = np.meshgrid(x2, y2)
return np.sqrt(X+Y)
def inv_calc_flat(self, d, l=0.1):
"""
Function used for inverse MFM calculation (inspired from http://qmfm.empa.ch/qmfm/)
The function is in its early devlopment stage as not used by the developed.
Parameters
----------
d : float
Height distance in the input data
l : float
Tikhonov parameter for the deconvolution
"""
work_image = self.pixels
ny, nx = self.pixels.shape
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
k = self.dist_v2()
k[0, 0] = 1e-10
tf = np.exp(-d*k)
tf[0, 0] = np.mean(tf)
tf /= 2
tf *= 1-np.exp(-d * k)
recon_tf = np.ones(tf.shape) / (tf+l*np.ones(tf.shape) / np.conj(tf))
tf *= recon_tf
return np.real(np.fft.ifft2(np.fft.fft2(work_image)*recon_tf))
def get_extent(self):
"""
Get the image extent in real data
"""
if 'recorded' in self.size:
W = self.size['recorded']['real']['x']
H = self.size['recorded']['real']['y']
else:
W = self.size['real']['x']
H = self.size['real']['y']
return (0, W, 0, H)
def show(self, ax=None, sig=None, cmap=None, title=None,
adaptive=False, dmin=0, dmax=0, pixels=False, flip=False, wrap=None, mul=1, symmetric=False, **kargs):
"""
Function to display the image with a lot of parametrization
Parameters
----------
ax : matplotlib axis or None
matplotlib axis if given otherwise current axis will be used (plt.gca())
sig : float
sigma values to adjust the contrast range around the mean ±sig times the standard-deviation
cmap : string
colormap name used. By default a gray map is used. If the zscale of the data are in 'meter' (i.e. topography data) the 'hot' colormap is used
title : string
The title of the plot. By default is the channel name
adaptive : bool
The color scale used is linear. If adaptive is True a non linear color scale is used in order that each color is used with the same amount.
dmin : float
minimum value adjustment used for the colorscale
dmax: float
maximum value adjustment used for the colorscale
pixels : bool
Display the image with x/y-labels with real unit. If pixels is True, the axes are in pixels
flip : bool
Flip the image upside-down
wrap : Nont or int
wrap the title to a width of wrap chars
symmetric : bool
If True will place the middle of the colorscale to the value 0.
This is specially usefull for diverging colormaps such as : BrBG, bwr, coolwarm, seismiv, spectral, etc.
level : float
level should be ≥0 and <50. Adjust the lower and upper colorscale to level% and (100-level)% of the data range.
e.g. if level=1, the colorscale will display 1-99% of the data range
vmin : float
Minimum value used for the colorscale
vmax : flaot
Maximum value used for the colorscale
Returns
-------
matplotlib.image.AxesImage
matplolib axis instance returned by imshow
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, (ax, ax2) = plt.subplots(2, 3, figsize=(15, 10))
>>> topo.show(ax=ax[0], cmap='gray', title="color map=\"gray\"")
>>> topo.show(ax=ax[1], sig=2, title="standard deviation=2")
>>> topo.show(ax=ax[2], adaptive=True, title="Adaptive colormap")
>>> topo.show(ax=ax2[0], dmin=4e-8, cmap='gray', title="raise the lowest value for the colormap of +40nm")
>>> topo.show(ax=ax2[1], dmin=3e-8, dmax=-3e-8, cmap='gray',title="raise lower of +30nm and highest of -30nm")
>>> topo.show(ax=ax2[2], pixels=True, title="Set axis value in pixels");
"""
mpl.rc('axes', grid=False)
if ax is None:
ax = plt.gca()
ax.src = self
if title == None:
title = u"{0} - {1}".format(self.type, self.channel)
if wrap is not None:
title = "\n".join([title[i*wrap:(i+1)*wrap]
for i in range(int(len(title)/wrap)+1)])
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1 or unit in ['pixels']:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
W = self.size['real']['x']
H = self.size['real']['y']
fact = int(np.floor(np.log(W)/np.log(10)/3))
isunit += fact
W, H = W/10**(fact*3), H/10**(fact*3)
if cmap == None:
cmap = 'gray'
if unit == 'm' and self.channel == "Topography":
cmap = 'hot'
mi, ma = np.nanmin(self.pixels), np.nanmax(self.pixels)
if adaptive:
img = np.asarray(256**2*(self.pixels-mi)/(ma-mi), dtype=np.uint16)
mi, ma = 0, 1
img = skimage.exposure.equalize_adapthist(img, clip_limit=0.03)
else:
img = mul*self.pixels
mi *= mul
ma *= mul
if sig == None:
vmin = mi+dmin
vmax = ma+dmax
else:
std = np.nanstd(img)
avg = np.nanmean(img)
vmin = avg - sig * std
vmax = avg + sig * std
if 'level' in kargs:
if kargs['level'] < 0 or kargs['level']>=50:
raise ValueError("The level shoud have a value in [0,50)")
vmax = np.percentile(img, 100-kargs['level'])
vmin = np.percentile(img, kargs['level'])
del kargs['level']
if 'vmin' in kargs:
vmin = kargs['vmin']
del kargs['vmin']
if 'vmax' in kargs:
vmax = kargs['vmax']
del kargs['vmax']
if symmetric:
vmax = abs(max(vmin,vmax))
vmin = -vmax
if not flip:
ax.flipped = False
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), extent=[0, W, 0, H], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.flipped = True
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), cmap=cmap, extent=[0, W, 0, H], vmin=vmin, vmax=vmax, **kargs)
if pixels:
ax.set_xlim((0, self.pixels.shape[1]))
if flip:
ax.set_ylim((0, self.pixels.shape[0]))
else:
ax.set_ylim((self.pixels.shape[0], 0))
else:
ax.set_xlim((0,W))
if flip:
ax.set_ylim((H,0))
else:
ax.set_ylim((0,H))
if not pixels:
if isunit != 6:
u = sunit[isunit]
if u == 'u':
u = '$\\mu$'
ax.set_xlabel(u'x [{0}{1}]'.format(u, unit))
ax.set_ylabel(u'y [{0}{1}]'.format(u, unit))
else:
ax.set_xlabel(u'x [{0}]'.format(unit))
ax.set_ylabel(u'y [{0}]'.format(unit))
if title != None:
ax.set_title(title)
return r
def real2px(self, x, y):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
return self.real2pixels(x,y)
def real2pixels(self, x, y, float=False):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not float:
px = np.digitize(x, np.linspace(0,self.size['real']['x']/(10**fact),self.pixels.shape[1]), right=True)
py = np.digitize(y, np.linspace(0,self.size['real']['y']/(10**fact),self.pixels.shape[0]), right=False)
else:
px = x*(self.pixels.shape[1]-1)/(self.size['real']['x']/(10**fact))
py = y*(self.pixels.shape[0]-1)/(self.size['real']['y']/(10**fact))
return px, py
def px2real(self, x, y):
"""
Transform a (x,y) value from pixels to real
Units are the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
rx = x*self.size['real']['x']/(10**fact)/self.pixels.shape[1]
ry = (self.pixels.shape[0]-y)*self.size['real']['y']/(10**fact)/self.pixels.shape[0]
return rx, ry
def circular_profile(self, x0, y0, Ra=1, Rn=0, width=1, N=20, A=0, B=360,\
cmap='jet', axImg=None, axPolar=None, axProfile=None, plotProfileEvery=1,\
xtransf=lambda x: x*1e9, ytransf=lambda x:x*1e9,\
ToFcorr=False, fit=lambda x, *p: p[3]+p[2]*CDF(x, *p[:2]), p0=None, errors=False, bounds=(-np.inf, np.inf), fakefit=False, **kargs):
"""
Create radial profiles from point x0,y0 with length Ra (outer radius) and Rn (negative Radius).
Start from angle A° to angle B° with N profiles.
If you want to apply the ToF-correction, please set ToFcorr to the number of scans used to record the ToF-SIMS image.
Return the fitting uncertainty on sigma if errors is set to True
The fitting function can be adjusted by fit and the default parameters by p0 which is an array of function where the first parameter passed will be the x-values and the second the y-values.
"""
from matplotlib import colors, cm
# Create a colormap for each profile
CM = plt.get_cmap(cmap)
cNorm = colors.Normalize(vmin=0, vmax=N)
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=CM)
res = []
cov = []
angles = []
assert A<B
for i, angle in enumerate(np.linspace(A, B, N)):
a = np.radians(angle)
angles.append(a)
l, p = self.get_profile(
x0-Rn*np.cos(a),
y0+Rn*np.sin(a),
x0+Ra*np.cos(a),
y0-Ra*np.sin(a),
ax=axImg, width=width, color=scalarMap.to_rgba(i), **kargs)
if width==0:
profile = p
else:
profile = np.mean(p, axis=1)
if ToFcorr:
profile = -np.log(1.001-profile/ToFcorr)
if p0 is None:
AC = np.mean(profile[:len(l)//2])
AE = np.mean(profile[len(l)//2:])
if AC<AE:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), np.max(profile)-np.min(profile), np.min(profile) ]
else:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), -np.max(profile)+np.min(profile), np.max(profile) ]
else:
for j,p in enumerate(p0):
if callable(p):
p0[j] = p(l,profile)
if kargs.get('debug',False):
print("calculate fit parameters are", p0)
if not fakefit:
p0, pcov = scipy.optimize.curve_fit(fit, l , profile, p0)
else:
pcov = np.zeros((len(p0),len(p0)))
res.append(p0)
cov.append([np.sqrt(abs(pcov[i,i])) for i in range(len(p0))])
if axProfile and i%plotProfileEvery == 0:
axProfile.plot(xtransf(l-p0[0]), profile, color=scalarMap.to_rgba(i), linestyle=':')
axProfile.plot(xtransf(l-p0[0]), fit(l,*p0), color=scalarMap.to_rgba(i))
# close loop
if A%360 == B%360:
angles.append(angles[0])
res.append(res[0])
cov.append(cov[0])
# Plot polar
angles = np.array(angles)
res = np.array(res)
cov = np.array(cov)
fact = 2*np.sqrt(2*np.log(2))
if axPolar:
axPolar.plot(angles, ytransf(res[:,1]), color=kargs.get('sig_color','C0'), label="$\\sigma$")
axPolar.plot(angles, ytransf(fact*res[:,1]), color=kargs.get('fwhm_color','C1'), label="FWHM")
if errors:
axPolar.fill_between(angles, ytransf(res[:,1]-cov[:,1]),ytransf(res[:,1]+cov[:,1]), color=kargs.get('sig_color','C0'), alpha=kargs.get('fillalpha',.5))
axPolar.fill_between(angles, fact*ytransf(res[:, 1]-cov[:, 1]), ytransf(res[:, 1]+cov[:, 1]), color=kargs.get('fwhm_color', 'C1'), alpha=kargs.get('fillalpha',.5))
return angles, res, cov
def get_profile(self, x1, y1, x2, y2, width=0, ax=None, pixels=True, color='w', axPixels=None, **kargs):
"""
retrieve the profile of the image between pixel x1,y1 and x2,y2
Parameters
----------
x1, y1, x2, y2 : ints
coordinates for the profile
ax : matplotlib axis
defines the matplotlib axis on which the position of the profile should be drawn (in not None)
width : int
the width of the profile (for averaging/statistics) in pixels
color : string
color used to plot the profiles lines
axPixels : bool
If True the image plotted in the ax axis is displayed in pixels
Returns
-------
x data : 1D numpy array
profile : 1D numpy array
"""
if kargs.get('debug',False):
print("get_profile input coordinates:", x1, y1, x2, y2)
if ax is not None and axPixels is None:
if hasattr(ax, 'isPixel'):
axPixels = ax.isPixel
if axPixels is None:
axPixels = pixels
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not pixels:
if kargs.get('debug', False):
print("Image range (real scale):", self.size['real']['x']/(10**fact), self.size['real']['y']/(10**fact))
x1, y1 = self.real2pixels(x1, y1, float=True)
x2, y2 = self.real2pixels(x2, y2, float=True)
y1 = self.pixels.shape[0]-y1
y2 = self.pixels.shape[0]-y2
if kargs.get('debug', False):
print("Pixel coordinates:", x1, y1, x2, y2)
if not axPixels:
xvalues, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color,\
transx = lambda x: x*(self.size['real']['x']/(10**fact))/self.pixels.shape[1],\
transy = lambda x: (self.pixels.shape[0]-x)*(self.size['real']['y']/(10**fact))/self.pixels.shape[0],\
**kargs)
else:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color, **kargs)
else:
if axPixels:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color, **kargs)
else:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color,\
transx = lambda x: x*(self.size['real']['x']/(10**fact))/self.pixels.shape[1],\
transy = lambda x: (self.pixels.shape[0]-x)*(self.size['real']['y']/(10**fact))/self.pixels.shape[0],\
**kargs)
dx = (x2-x1)*self.size['real']['x']/self.size['pixels']['x']
dy = (y2-y1)*self.size['real']['y']/self.size['pixels']['y']
rd = np.sqrt(dx**2+dy**2)
xvalues = np.linspace(0, rd, len(p))
return xvalues, p
def plot_profile(self, x1, y1, x2, y2, width=0, ax=None, pixels=True, img=None, imgColor='w', ztransf=lambda x: x, zunit=None, **kargs):
"""
Retrieve and plot a profile from an image
Parameters
----------
x1, y1, x2, y2 : int
coordinate of the profile in real size or in pixels (if pixels is True)
width : float
the width of the profiles in pixels for better statistics
ax : matplotlib axis
The axis in which the profile will be plotted
pixels : bool
If True the coordinates are given in pixels and not in real units
img : matplotlib axis
The axis in which the profile position will be drawn
imgColor : string
The color used to display the profile positions
ztransf : function
function to transform the profile data. This can be used to scale the data.
Most profiles are retrieved in 'm' and a 'nm' value can be used by using ztransf=lambda x: x*1e9
zunit : string
the zunit name used if ztransft is used
color : string
The color of the profile
col : string
can be used instead of color
stdplot : bool
If True display the ±nσ plots where n is given by the sig parameter
sig : int
The number of sigmas used in stdplot
label : string
The label used for plotting the profile (useful if you perform a ax.legend() afterwards)
Returns
-------
dictionary : {'plot': matplotlib_plot_instance, 'l': profile_xaxis, 'z': profile_yaxis}
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topo.plot_profile(70, 100, 170, 200, ax=ax[1], img=ax[0], ztransf=lambda x:x*1e9, zunit='nm');
>>> topo.show(ax=ax[0], pixels=True);
"""
col = kargs.get('color',kargs.get('col','C0'))
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if ax == None:
ax = plt.gca()
xvalues, p = self.get_profile(x1, y1, x2, y2, width=width, color=imgColor, ax=img, pixels=pixels, **kargs)
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = (x2-x1)
dy = (y2-y1)
if pixels:
rd = d
u = ''
unit = 'px'
else:
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
isunit += fact//3
if isunit != 6:
u = sunit[isunit]
else:
u=''
if u == 'u':
u = '$\\mu$'
rd = np.sqrt(dx**2+dy**2)
xvalues = np.linspace(0, rd, len(p))
lab = kargs.get("label", "")
if width < 2:
profile = ztransf(p)
else:
profile = ztransf(np.mean(p, axis=1))
s = np.std(p)
if kargs.get('stdplot', False):
for ns in range(1, kargs.get('sig', 2)+1):
ax.fill_between(xvalues, profile-ns*s, profile+ns*s, color=col, alpha=.2, label=[lab+' ($\\sigma,\ldots {}\\sigma$)'.format(kargs.get('sig',2)),None][ns>1])
Plot = ax.plot(xvalues, profile, color=col, linewidth=kargs.get('linewidth',1),linestyle=kargs.get('linestyle','-'), label=lab+[' (mean)',''][width<2])
if kargs.get('min',False):
minStyle = kargs.get('minStyle', kargs.get('minmaxStyle', '--'))
minColor = kargs.get('minColor', kargs.get('minmaxColor', col))
minMarker = kargs.get('minMarker', kargs.get('minmaxMarker', ''))
ax.plot(xvalues, np.min(p, axis=1), color=minColor, linewidth=kargs.get('linewidth',1),linestyle=minStyle, marker=minMarker, label=lab+' (min)')
if kargs.get('max', False):
maxStyle = kargs.get('maxStyle',kargs.get('minmaxStyle','--'))
maxColor = kargs.get('maxColor',kargs.get('minmaxColor',col))
maxMarker = kargs.get('maxMarker',kargs.get('minmaxMarker',''))
ax.plot(xvalues, np.max(p, axis=1), color=maxColor, linestyle=maxStyle, linewidth=kargs.get('linewidth',1), marker=maxMarker, label=lab+' (max)')
ax.set_xlabel("Distance [{1}{0}]".format(unit, u))
if zunit is not None:
ax.set_ylabel("{1} [{0}]".format(zunit, self.channel))
else:
ax.set_ylabel("{1} [{0}]".format(self.zscale, self.channel))
return {'plot': Plot, 'l': xvalues, 'z': profile}
def get_bin_threshold(self, percent, high=True, adaptive=False, binary=True, img=False):
"""
Threshold the image into binary values
Parameters
----------
percent : float
The percentage where the thresholding is made
high : bool
If high a value of 1 is returned for values > percent
adaptive : bool
If True, performs an adaptive thresholding (see skimage.filters.threshold_adaptive)
binary : bool
If True return bool data (True/False) otherwise numeric (0/1)
img : bool
If True return a SPM_image otherwise a numpy array
"""
if adaptive:
if binary:
return self.pixels > threshold_local(self.pixels, percent)
return threshold_local(self.pixels, percent)
mi = np.min(self.pixels)
norm = (self.pixels-mi)/(np.max(self.pixels)-mi)
if high:
r = norm > percent
else:
r = norm < percent
if not img:
if binary:
return r
return np.ones(self.pixels.shape)*r
else:
I = copy.deepcopy(self)
I.channel = "Threshold from "+I.channel
if binary:
I.pixels = r
else:
I.pixels = np.ones(self.pixels.shape)*r
return I
def spline_offset(self, X, Y, Z=None, inline=True, ax=None, output='img', **kargs):
"""
subtract a spline interpolated by points corrdinates.
if Z is None, the image values will be used (default)
"""
if ax is not None:
if 'num' in kargs and kargs['num']:
text_color = 'k'
if 'text_color' in kargs:
text_color = kargs['text_color']
del kargs['text_color']
for i in range(len(X)):
l = self.pixels.shape[1]-X[i] < 20
ax.annotate(str(i), (X[i], Y[i]), ([
5, -5][l], 0), textcoords='offset pixels', va="center", ha=["left", "right"][l], color=text_color)
del kargs['num']
ax.plot(X, Y, 'o', **kargs)
import scipy.interpolate
T = np.flipud(self.pixels) - np.min(self.pixels)
if Z is None:
Z = [T[Y[i], X[i]] for i in range(len(X))]
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
xx, yy = np.meshgrid(x, y)
I = scipy.interpolate.SmoothBivariateSpline(X, Y, Z)
z = I.ev(xx, yy)
if inline:
self.pixels -= z
return z
else:
if output == 'img':
New = copy.deepcopy(self)
New.pixels -= z
return New
elif output == 'spline':
return z
else:
raise ValueError(
"The output parameter should be either 'img' or 'spline'")
def get_shadow_mask(self, angle, BIN=None, prog=False):
"""
If an image is recorded with a beam incident with a certain angle, the topography will shadow the data.
This function generates the shadow mask for a given topography and a given incident angle.
Parameters
----------
angle : float
The incidence angle in degrees
BIN : numpy array
Data. If given will move the recorded pixels at the correct x,y positions
prog : bool
display a progressbar ?
Note
----
This function is old, might not be optimized or working properly
"""
if BIN is not None:
BIN = BIN*1.0
slope = np.tan(np.radians(angle))
neg = False
if slope < 0:
neg = True
slope = -slope
topo = np.fliplr(self.pixels)
if BIN is not None:
BIN = np.fliplr(BIN)
else:
topo = self.pixels
x = np.linspace(0, self.size['real']['x'], self.pixels.shape[1])
if self.size['real']['unit'] == 'um':
x *= 1e-6
elif self.size['real']['unit'] == 'nm':
x *= 1e-9
mask = np.zeros(self.pixels.shape)
AFM_bin_shadow = np.zeros(self.pixels.shape)
Y = range(self.pixels.shape[0])
if prog:
Y = PB(Y)
for yi in Y:
for xi in range(self.pixels.shape[1]):
cut = self.pixels.shape[1]-2
y_ray = slope*(x-x[xi]) + topo[yi, xi]
while cut > xi and y_ray[cut] > topo[yi, cut]:
cut -= 1
if xi == cut:
if BIN is not None:
AFM_bin_shadow[yi, xi] = BIN[yi, xi]
continue
# Cut has been found
if BIN is not None:
x1 = x[cut]
x2 = x[cut+1]
y1 = topo[yi, cut]
y2 = topo[yi, cut+1]
x0 = x[xi]
y0 = topo[yi, xi]
if y2 == y1:
x_cut = (y1+slope*x0-y0)/slope
y_cut = y1
else:
numerator = x1/(x2-x1)+(y0-slope*x0-y1)/(y2-y1)
denominator = 1/(x2-x1)-slope/(y2-y1)
x_cut = numerator / denominator
y_cut = slope*(x_cut-x0)+y0
if x_cut >= x1 and x_cut <= x2:
y1 = BIN[yi, cut]
y2 = BIN[yi, cut+1]
yint = (((y2-y1)/(x2-x1))*(x_cut-x1))+y1
else:
yint = BIN[yi, xi]
AFM_bin_shadow[yi, xi] = yint
mask[yi, xi] = 1
if neg:
mask = np.fliplr(mask)
AFM_bin_shadow = np.fliplr(AFM_bin_shadow)
if BIN is not None:
return (mask, AFM_bin_shadow)
return mask
def adjust_position(self, fixed):
"""
Shift the current pixels to match a fixed image.
The shift is determined by position where the cross-correlation is maximized.
"""
adj = copy.deepcopy(self)
cor = np.fft.fft2(fixed.pixels)
cor = np.abs(np.fft.ifft2(np.conj(cor) * np.fft.fft2(self.pixels)))
cor = cor / fixed.pixels.size
ypeak, xpeak = np.unravel_index(cor.argmax(), cor.shape)
shift = [-(ypeak-1), -(xpeak-1)]
adj.pixels = np.roll(self.pixels, shift[0], axis=0)
adj.pixels = np.roll(adj.pixels, shift[1], axis=1)
return adj
def align(self, tform, cut=True):
"""
Apply an Affine transform on the data
Parameters
----------
tform : skimage.transform
the affine transform to perform
cut : bool
If True cut the data
"""
New = copy.deepcopy(self)
New.pixels = tf.warp(self.pixels, tform, preserve_range=True)
if not cut:
return New
cut = [0, 0] + list(self.pixels.shape)
if tform.translation[0] >= 0:
cut[2] -= tform.translation[0]
elif tform.translation[0] < 0:
cut[0] -= tform.translation[0]
if tform.translation[1] >= 0:
cut[1] += tform.translation[1]
elif tform.translation[1] < 0:
cut[3] += tform.translation[1]
cut = [int(x) for x in cut]
New.cut(cut, inplace=True)
return New, cut
def get_fft(self):
"""
return the FFT2 transform opf the image
"""
return np.fft.fftshift(np.fft.fft2(self.pixels))
def corr_fit2d(self, nx=2, ny=1, poly=False, inline=True, mask=None):
"""
Subtract a fitted 2D-polynom of nx and ny order from the data
Parameters
----------
nx : int
the polynom order for the x-axis
ny : int
the polynom order for the y-axis
poly : bool
if True the polynom is returned as output
inline : bool
create a new object?
mask : 2D numpy array
mask where the fitting should be performed
"""
r, z = fit2d(self.pixels, nx, ny, mask=mask)
if inline:
self.pixels -= z
else:
N = copy.deepcopy(self)
N.pixels -= z
if poly:
return N, z
return N
if poly:
return z
return self
def zero_min(self, inline=True):
"""
Shift the values so that the minimum becomes zero.
"""
if inline:
self.pixels -= np.min(self.pixels)
return self
else:
N = copy.deepcopy(self)
N.pixels -= np.min(N.pixels)
return N
def filter_lowpass(self, p, inline=True):
"""
Execute a lowpass filter on the data
"""
F = self.get_fft()
mask = self.getRmask() < p
if inline:
self.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
else:
C = copy.deepcopy(self)
C.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
return C
def _resize_infos(self):
"""
Internal to recalculate the real size when the image is cropped or cut
"""
self.size['real']['x'] *= self.pixels.shape[1]/self.size['pixels']['x']
self.size['real']['y'] *= self.pixels.shape[0]/self.size['pixels']['y']
self.size['pixels']['x'] = int(self.pixels.shape[1])
self.size['pixels']['y'] = int(self.pixels.shape[0])
if 'recorded' in self.size:
self.size['recorded']['real']['x'] \
*= (self.pixels.shape[1]/self.size['pixels']['x'])
self.size['recorded']['real']['y'] \
*= (self.pixels.shape[0]/self.size['pixels']['y'])
self.size['recorded']['pixels']['x'] = int(self.pixels.shape[1])
self.size['recorded']['pixels']['y'] = int(self.pixels.shape[0])
def filter_scars_removal(self, thresh=.5, inline=True):
"""
Filter function to remove scars from images.
"""
if not inline:
C = copy.deepcopy(self)
else:
C = self
for y in range(1, self.pixels.shape[0]-1):
b = self.pixels[y-1, :]
c = self.pixels[y, :]
a = self.pixels[y+1, :]
mask = np.abs(b-a) < thresh*(np.abs(c-a))
C.pixels[y, mask] = b[mask]
if not inline:
return C
return self
def cut(self, c, inline=False, pixels=True, **kargs):
"""
Clip/Crop the image
Parameters
----------
c : list [llx,lly,urx,ury]
list of the lowe-left (ll) and upper-right (ur) coordinates
inline: bool
perform the transformation inline or produce a new SPM_image?
pixels : bool
Are the coordinates given in pixels?
Returns
-------
self if inplace, clipped SPM_image otherwises2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))
"""
if 'inplace' in kargs:
inline=kargs['inplace']
if kargs.get('debug',False):
print("cut) Input coordinates:", c)
if not pixels:
c = [z for s in zip(*self.real2pixels(c[0::2], c[1::2])) for z in s]
if kargs.get('debug',False):
print("cut) pixel coordinates:", c)
if not inline:
new = copy.deepcopy(self)
new.pixels = cut(self.pixels, c, **kargs)
new._resize_infos()
return new
else:
self.pixels = cut(self.pixels, c, **kargs)
self._resize_infos()
return self
def zoom(self, zoom_factor, inplace=False, order=3):
"""
Resize the image to a new pixel size (but keep the real size) by pixel interpolation.
Parameters
----------
zoom_factor : float
> 1: up sampling
< 1: down sampling
order : int
The spline interpolation order to use. (default: 3). Use 0 for binary or very sharp images.
inplace : bool
create a new image?
"""
from scipy.ndimage.interpolation import zoom
if not inplace:
new = copy.deepcopy(self)
new.pixels = zoom(new.pixels, zoom_factor, order=order)
new.size['pixels']['x'] = new.pixels.shape[1]
new.size['pixels']['y'] = new.pixels.shape[0]
return new
else:
self.pixels = zoom(self.pixels, zoom_factor, order=order)
self.size['pixels']['x'] = self.pixels.shape[1]
self.size['pixels']['y'] = self.pixels.shape[0]
return self
# Note: The following functions are not part of the SPM_image class.
# All following functions are performed on numpy arrays
def cut(img, c, **kargs):
"""
Clip / Crop a numpy array
Parameters
----------
img : 2D numpy array
The input image array
c : list [llx, lly, urx, ury]
the lower-left (ll) and upper-right (ur) coordinates used for the cropping
"""
from .utils.geometry import Bbox
if kargs.get('debug',False):
print("cut in x", c[0], "->", c[2], " - in y", c[1], "->", c[3])
if isinstance(c, Bbox):
c = [c.left, c.bottom, c.right, c.top]
if c[3] < c[1]:
c = [c[0],c[3],c[2],c[1]]
if c[2] < c[0]:
c = [c[2],c[1],c[0],c[3]]
if c[2]-c[0] == img.shape[1] and c[3]-c[1] == img.shape[0]:
raise Exception("Reshaping the same array again?")
return img[c[1]:c[3], c[0]:c[2]]
def normalize(data, sig=None, vmin=None, vmax=None):
"""
Normalize the input data. Minimum_value -> 0 and maximum_value -> 1
Parameters
----------
data : numpy array
input data
sig : float or None
if not None:
mean(data)-sig*standard_deviation(data) -> 0
mean(data)+sig*standard_deviation(data) -> 1
vmin : float or None
if not None, define the lower bound i.e. vmin -> 0
vmax : float or None
if not None, defines the upper bound i.e. vmax -> 0
Note
----
All values below the lower bound will be = 0
and all values above the upper bound will be = 1
"""
if sig is None:
mi = np.min(data)
ma = np.max(data)
else:
s = sig*np.std(data)
mi = np.mean(data)-s
ma = np.mean(data)+s
if vmin is not None:
mi = vmin
if vmax is not None:
ma = vmax
N = (data-mi)/(ma-mi)
N[N < 0] = 0
N[N > 1] = 1
return N
def imshow_sig(img, sig=1, ax=None, **kargs):
"""
Shortcut to plot a numpy array around it's mean with bounds ±sig sigmas
Parameters
----------
img : 2D numpy array
input image to display
sig : float
The number of standard-deviation to plot
ax : matplotlib axis
matplotlib axis to use. If None, the current axis (plt.gca() will be used).
**kargs : additional parameters
will be passed to the imshow function of matplotls2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))ib
"""
if ax == None:
fig, ax = plt.subplots(1, 1)
std = np.std(img)
avg = np.mean(img)
vmin = avg - sig * std
vmax = avg + sig * std
ax.imshow(img, vmin=vmin, vmax=vmax, **kargs)
def adjust_position(fixed, to_adjust, shift=False):
""" Shift the current pixels to match a fixed image by rolling the data"""
adj = copy.deepcopy(to_adjust)
cor = np.fft.fft2(fixed)
cor = np.abs(np.fft.ifft2(np.conj(cor) * np.fft.fft2(to_adjust)))
cor = cor / to_adjust.size
ypeak, xpeak = np.unravel_index(cor.argmax(), cor.shape)
shift = [-(ypeak-1), -(xpeak-1)]
adj = np.roll(to_adjust, shift[0], axis=0)
adj = np.roll(adj, shift[1], axis=1)
if shift:
return adj, shift
return adj
def tukeyfy(A, alpha, type='default'):
"""
Apply a Tukey window on the current image
Parameters
----------
A : 2D numpy array
input array
alpha : float
Size of the Tukey windows in percent of the image (≥0 and ≤1)
type : string
if not "default" perform a mean centering (data will blend down to its mean instead of 0)
"""
tuky = tukeywin(A.shape[0], alpha)
tukx = tukeywin(A.shape[1], alpha)
tuk = np.multiply(tukx[:, None].T, tuky[:, None])
if type is 'default':
return A * tuk
avg = np.mean(A)
return avg+(A-avg) * tuk
def tukeywin(window_length, alpha=0.5):
'''The Tukey window, also known as the tapered cosine window, can be regarded as a cosine lobe of width \alpha * N / 2
that is convolved with a rectangle window of width (1 - \alpha / 2). At \alpha = 1 it becomes rectangular, and
at \alpha = 0 it becomes a Hann window.
We use the same reference as MATLAB to provide the same results in case users compare a MATLAB output to this function
output
Reference
---------
http://www.mathworks.com/access/helpdesk/help/toolbox/signal/tukeywin.html
'''
# Special cases
if alpha <= 0:
return np.ones(window_length) # rectangular window
elif alpha >= 1:
return np.hanning(window_length)
# Normal case
x = np.linspace(0, 1, window_length)
w = np.ones(x.shape)
# first condition 0 <= x < alpha/2
first_condition = x < alpha/2
w[first_condition] = 0.5 * \
(1 + np.cos(2*np.pi/alpha * (x[first_condition] - alpha/2)))
# second condition already taken care of
# third condition 1 - alpha / 2 <= x <= 1
third_condition = x >= (1 - alpha/2)
w[third_condition] = 0.5 * \
(1 + np.cos(2*np.pi/alpha * (x[third_condition] - 1 + alpha/2)))
return w
def overlay(ax, mask, color, **kargs):
"""
Plot an overlay on an existing axis
Parameters
----------
ax : matplotlib axis
input axis
mask : 2D numpy array
Binary array where a mask should be plotted
color : string
The color of the mask to plot
**kargs: additional parameters
passed to the imshow function of matploltib
"""
m = ma.masked_array(mask, ~mask)
col = np.array(colors.colorConverter.to_rgba(color))
I = col[:, None, None].T*m[:, :, None]
ax.imshow(I, **kargs)
def normP(x, p, trunk=True):
"""
Normalize the input data accroding to its percentile value.
Parameters
----------
x : 2D numpy array
input data
p : float
percentile to normalize the data.
lower bound = p percentile
upper bound = (100-p) percentile
trunk : bool
If True the data are truncated between 0 and 1
"""
thresh_high = np.percentile(x, 100-p)
thresh_low = np.percentile(x, p)
if thresh_low == thresh_high:
thresh_high = np.max(x)
thresh_low = np.min(x)
if thresh_low == thresh_high:
thresh_high = thresh_low+1
r = (x-thresh_low)/(thresh_high-thresh_low)
if trunk:
r[r < 0] = 0
r[r > 1] = 1
return r
def beam_profile(target, source, mu=1e-6, tukey=0, meanCorr=False, source_tukey=None, real=np.abs, **kargs):
"""
Calculate the PSF by deconvolution of the target
with the source using a Tikhonov regularization of factor mu.
"""
if source_tukey is None:
source_tukey = tukey
if kargs.get('source_centering', False):
source = 2*source-1
if meanCorr:
target = target-np.mean(target)
if tukey>0:
target = tukeyfy(target, tukey)
if source_tukey>0:
source = tukeyfy(source, tukey)
tf = np.fft.fft2(source)
tf /= np.size(tf)
recon_tf = np.conj(tf) / (np.abs(tf)**2 + mu)
return np.fft.fftshift(real(np.fft.ifft2(np.fft.fft2(target) * recon_tf)))/np.size(target)
def beam_profile1d(target, source, mu=1e-6, real=np.abs):
source = source
tf = np.fft.fft(source)
tf /= np.size(tf)
recon_tf = np.conj(tf) / (np.abs(tf)**2 + mu)
F = np.fft.fft(target) * recon_tf
return np.fft.fftshift(real(np.fft.ifft(F))), F
def zoom_center(img, sx, sy=None):
"""
Zoom by taking the sx × sy central pixels
Parameters
----------
img : 2D numpy array
The input data
sx : int
The number of pixels along the x-axis to take
sy : int or None
The number of pixels alongs the y-axis to take.
If None take the same value as for sx
"""
if sy is None:
sy = sx
assert type(sx) is int
assert type(sy) is int
return img[
img.shape[0]//2-sy//2: img.shape[0]//2 + sy//2,
img.shape[1]//2-sx//2: img.shape[1]//2 + sx//2]
def px2real(x, y, size, ext):
rx = ext[0]+(x/size[1])*(ext[1]-ext[0])
ry = ext[2]+(y/size[0])*(ext[3]-ext[2])
return rx, ry
def real2px(x, y, size, ext):
px = size[1]*(x-ext[0])/(ext[1]-ext[0])
py = size[0]*(y-ext[2])/(ext[3]-ext[2])
return px, py
def fit2d(Z0, dx=2, dy=1, mask=None):
"""
Fit the input data with a 2D polynom of order dx × dy
Parameters
----------
Z0 : 2D numpy array
input data
dx : int
order of the polynom for the x-axis
dy : int
order of the polynom for the y-xis
mask : 2D numpy array
Give a mask where True values only will be used to perform the fitting
Returns
-------
numpy array
fitting parameters
2D numpy array
result of the polynom
"""
x = np.arange(Z0.shape[1], dtype=np.float)
y = np.arange(Z0.shape[0], dtype=np.float)
X0, Y0 = np.meshgrid(x, y)
if mask is not None:
X = X0[mask]
Y = Y0[mask]
Z = Z0[mask]
else:
X = X0
Y = Y0
Z = Z0
x2 = X.ravel()
y2 = Y.ravel()
A = np.vstack([x2**i for i in range(dx+1)])
A = np.vstack([A]+[y2**i for i in range(1, dy+1)])
res = scipy.optimize.lsq_linear(A.T, Z.ravel())
r = res['x']
Z2 = r[0]*np.ones(Z0.shape)
for i in range(1, dx+1):
Z2 += r[i]*(X0**i)
for i in range(1, dy+1):
Z2 += r[dx+i]*(Y0**i)
return r, Z2
def warp_and_cut(img, tform, cut=True):
"""
Perform an Affine transform on the input data and cut them if cut=True
Parameters
----------
img : 2D numpy array
input data
tform : skimage.transform
An Affine fransform to perform on the data
cut : bool
Should the data be cutted?
"""
New = tf.warp(img, tform, preserve_range=True)
Cut = [0, 0] + list(img.shape)
if tform.translation[0] >= 0:
Cut[2] -= tform.translation[0]
elif tform.translation[0] < 0:
Cut[0] -= tform.translation[0]
if tform.translation[1] >= 0:
Cut[1] += tform.translation[1]
elif tform.translation[1] < 0:
Cut[3] += tform.translation[1]
Cut = [int(x) for x in Cut]
if cut:
New = cut(New, Cut)
return New, Cut
def get_profile(I, x1, y1, x2, y2, width=0, ax=None, color='w', alpha=0, N=None,\
transx=lambda x: x, transy=lambda x: x, interp_order=1, **kargs):
"""
Get a profile from an input matrix.
Low-level function. Doc will come laters2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))
"""
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
if N is None:
N = int(d)+1
P = []
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
for w in np.linspace(-width/2, width/2, max(1,width)):
dx = -w*(y2-y1)/d
dy = w*(x2-x1)/d
x = np.linspace(x1+dx, x2+dx, N)
y = np.linspace(y1+dy, y2+dy, N)
M = scipy.ndimage.interpolation.map_coordinates(I, np.vstack((y, x)), order=interp_order)
P.append(M)
if kargs.get('debug',False):
print("get_profile input coordinates:",x1,y1,x2,y2)
if not ax is None:
x1 = transx(x1)
x2 = transx(x2)
y1 = transy(y1)
y2 = transy(y2)
if kargs.get('debug',False):
print("Drawing coordinates:",x1,y1,x2,y2)
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
if type(color) in [tuple, list]:
ax.plot([x1, x2], [y1, y2], color=color, alpha=kargs.get('linealpha',1))
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], color=color, alpha=kargs.get('linealpha',1))
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], color=color, alpha=kargs.get('linealpha',1))
else:
ax.plot([x1, x2], [y1, y2], color, alpha=kargs.get('linealpha',1), lw=kargs.get('lw',1))
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], color, alpha=kargs.get('linealpha',1))
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], color, alpha=kargs.get('linealpha',1))
if alpha>0:
import matplotlib.patches
ax.add_patch(matplotlib.patches.Rectangle(
(x1+dx,y1+dy),
2*np.sqrt(dx**2+dy**2),
np.sqrt((x2-x1)**2+(y2-y1)**2),
-np.degrees(np.arctan2(x2-x1,y2-y1)), color=color, alpha=alpha))
if len(P)==1:
return np.linspace(0, d, N), P[0]
return np.linspace(0, d, N), np.vstack(P).T
def dist_v2(img, dx=1, dy=1):
"""
Return a 2D array with the distance in pixel with the clothest corner of the array.
"""
x2 = np.arange(img.shape[1])
x2 = (np.minimum(x2, img.shape[1]-x2) * dx)**2
y2 = np.arange(img.shape[0])
y2 = (np.minimum(y2, img.shape[0] - y2) * dy)**2
X, Y = np.meshgrid(x2, y2)
return np.sqrt(X+Y)
def generate_k_matrices(A, dx, dy):
"""
GENERATE_K_MATRICES k-Matrix generation (helper function).
generates k-matrices for the 2D-channel CHANNEL.
K is a matrix of the same size as the pixel matrix A, containing the real-life frequency distance of each
pixel position to the nearest corner of an matrix that is one pixel
wider/higher.
KX is of the same size as K and contains the real-life difference in x-direction of each pixel position to the nearest corner
of a matrix that is one pixel wider/higher.
Similarly, KY is of the same size as K, containing the real-life difference in y-direction of each pixel position to the nearest corner of an matrix that is one
pixel wider/higher.
"""
ny, nx = A.shape
dkx = 2*np.pi/(nx*dx)
dky = 2*np.pi/(ny*dy)
ky = np.arange(0, ny);
ky = (np.mod(ky+ny/2, ny) - ny/2) * dky
kx = np.arange(0, nx);
kx = (np.mod(kx+nx/2, nx) - nx/2) * dkx
kx, ky = np.meshgrid(kx, ky)
k = dist_v2(A, dkx, dky)
k[0, 0] = 1.0 # Prevent division by zero error and illegal operand errors. This may be improved...
return k, kx, ky
def mfm_tf(nx, dx, ny, dy, tf_in, derivative=0, transform=0, z=0, A=0, theta=None, phi=None, d=None, delta_w=None):
"""
Draft for the MFM tf function
"""
k, kx, ky = generate_k_matrices(tf_in, dx, dy)
# Distance loss
tf_out = np.exp(-z*k)
if d is not None:
tf_out = tf_out / 2.0
if not np.isinf(d):
if d == 0:
tf_out *= k
else:
tf_out *= 1 - np.exp(-d*k)
if A == 0:
if transform != 0:
assert theta is not None
assert phi is not None
tf_out *= ((np.cos(theta)+1j*(np.cos(phi)*np.sin(-theta)*kx+np.sin(phi)*np.sin(-theta)*ky)) / k)**transform
if derivative == 1:
tf_out *= k
else:
pass # TODO
return tf_out * tf_in
def mfm_inv_calc_flat(img, z, tf_in, thickness=None, delta_w=None, amplitude=0, derivative=0, transform=0, mu=1e-8):
"""
MFM inv calc function
"""
theta = np.radians(12)
phi = np.radians(-90)
ny, nx = img.shape
tf = mfm_tf(nx, 1, ny, 1, tf_in, derivative, transform, z, amplitude, theta, phi, thickness, delta_w)
tf[0,0] = np.real(np.mean(tf))
recon_tf = np.conj(tf) / (mu+np.abs(tf)**2)
work_img = np.abs(np.fft.ifft2(np.fft.fft2(img) * recon_tf ))
return work_img
def get_tik_tf(Img, mu, tukey=0, source_tukey=0, debug=False, d=200, real=np.real):
import scipy
def fit(x, a ,A, bg, x0):
return bg+(A-bg)*np.exp(-abs(x-x0)/a)
x = np.arange(Img.shape[1])
y = np.arange(Img.shape[0])
X, Y = np.meshgrid(x, y)
x0 = Img.shape[1]/2
y0 = Img.shape[0]/2
R = np.sqrt((X-x0)**2+(Y-y0)**2)
Z = beam_profile(Img, Img, mu=mu, tukey=tukey, source_tukey=source_tukey, real=real)
zoom = zoom_center(Z, d)
P = zoom[zoom.shape[0]//2, :]
p0 = (1,np.max(zoom), 0, len(P)/2)
popt, pcov = scipy.optimize.curve_fit(fit, np.arange(len(P)), P, p0, bounds=((0,0,-np.inf,0),np.inf))
bg = popt[2]
a = popt[0]
if debug:
return bg+np.exp(-np.abs(R)/a), Z, p0, popt
return bg+np.exp(-np.abs(R)/a)
| [
"numpy.radians",
"numpy.hanning",
"numpy.sqrt",
"numpy.polyfit",
"numpy.log",
"scipy.interpolate.SmoothBivariateSpline",
"matplotlib.colors.colorConverter.to_rgba",
"numpy.array",
"skimage.exposure.equalize_adapthist",
"numpy.nanmean",
"numpy.arctan2",
"numpy.percentile",
"matplotlib.rc",
... | [((54674, 54685), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (54680, 54685), True, 'import numpy as np\n'), ((54696, 54708), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (54703, 54708), True, 'import numpy as np\n'), ((54955, 54979), 'copy.deepcopy', 'copy.deepcopy', (['to_adjust'], {}), '(to_adjust)\n', (54968, 54979), False, 'import copy\n'), ((54990, 55008), 'numpy.fft.fft2', 'np.fft.fft2', (['fixed'], {}), '(fixed)\n', (55001, 55008), True, 'import numpy as np\n'), ((55218, 55254), 'numpy.roll', 'np.roll', (['to_adjust', 'shift[0]'], {'axis': '(0)'}), '(to_adjust, shift[0], axis=0)\n', (55225, 55254), True, 'import numpy as np\n'), ((55265, 55295), 'numpy.roll', 'np.roll', (['adj', 'shift[1]'], {'axis': '(1)'}), '(adj, shift[1], axis=1)\n', (55272, 55295), True, 'import numpy as np\n'), ((55820, 55863), 'numpy.multiply', 'np.multiply', (['tukx[:, None].T', 'tuky[:, None]'], {}), '(tukx[:, None].T, tuky[:, None])\n', (55831, 55863), True, 'import numpy as np\n'), ((55923, 55933), 'numpy.mean', 'np.mean', (['A'], {}), '(A)\n', (55930, 55933), True, 'import numpy as np\n'), ((56726, 56758), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'window_length'], {}), '(0, 1, window_length)\n', (56737, 56758), True, 'import numpy as np\n'), ((56767, 56783), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (56774, 56783), True, 'import numpy as np\n'), ((58175, 58200), 'numpy.percentile', 'np.percentile', (['x', '(100 - p)'], {}), '(x, 100 - p)\n', (58188, 58200), True, 'import numpy as np\n'), ((58216, 58235), 'numpy.percentile', 'np.percentile', (['x', 'p'], {}), '(x, p)\n', (58229, 58235), True, 'import numpy as np\n'), ((59084, 59103), 'numpy.fft.fft2', 'np.fft.fft2', (['source'], {}), '(source)\n', (59095, 59103), True, 'import numpy as np\n'), ((59114, 59125), 'numpy.size', 'np.size', (['tf'], {}), '(tf)\n', (59121, 59125), True, 'import numpy as np\n'), ((59359, 59377), 'numpy.fft.fft', 'np.fft.fft', (['source'], {}), '(source)\n', (59369, 59377), True, 'import numpy as np\n'), ((59388, 59399), 'numpy.size', 'np.size', (['tf'], {}), '(tf)\n', (59395, 59399), True, 'import numpy as np\n'), ((60930, 60968), 'numpy.arange', 'np.arange', (['Z0.shape[1]'], {'dtype': 'np.float'}), '(Z0.shape[1], dtype=np.float)\n', (60939, 60968), True, 'import numpy as np\n'), ((60977, 61015), 'numpy.arange', 'np.arange', (['Z0.shape[0]'], {'dtype': 'np.float'}), '(Z0.shape[0], dtype=np.float)\n', (60986, 61015), True, 'import numpy as np\n'), ((61029, 61046), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (61040, 61046), True, 'import numpy as np\n'), ((61913, 61953), 'skimage.transform.warp', 'tf.warp', (['img', 'tform'], {'preserve_range': '(True)'}), '(img, tform, preserve_range=True)\n', (61920, 61953), True, 'from skimage import transform as tf\n'), ((62689, 62729), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (62696, 62729), True, 'import numpy as np\n'), ((64685, 64708), 'numpy.arange', 'np.arange', (['img.shape[1]'], {}), '(img.shape[1])\n', (64694, 64708), True, 'import numpy as np\n'), ((64769, 64792), 'numpy.arange', 'np.arange', (['img.shape[0]'], {}), '(img.shape[0])\n', (64778, 64792), True, 'import numpy as np\n'), ((64857, 64876), 'numpy.meshgrid', 'np.meshgrid', (['x2', 'y2'], {}), '(x2, y2)\n', (64868, 64876), True, 'import numpy as np\n'), ((64888, 64902), 'numpy.sqrt', 'np.sqrt', (['(X + Y)'], {}), '(X + Y)\n', (64895, 64902), True, 'import numpy as np\n'), ((65734, 65750), 'numpy.arange', 'np.arange', (['(0)', 'ny'], {}), '(0, ny)\n', (65743, 65750), True, 'import numpy as np\n'), ((65810, 65826), 'numpy.arange', 'np.arange', (['(0)', 'nx'], {}), '(0, nx)\n', (65819, 65826), True, 'import numpy as np\n'), ((65890, 65909), 'numpy.meshgrid', 'np.meshgrid', (['kx', 'ky'], {}), '(kx, ky)\n', (65901, 65909), True, 'import numpy as np\n'), ((66322, 66336), 'numpy.exp', 'np.exp', (['(-z * k)'], {}), '(-z * k)\n', (66328, 66336), True, 'import numpy as np\n'), ((67041, 67055), 'numpy.radians', 'np.radians', (['(12)'], {}), '(12)\n', (67051, 67055), True, 'import numpy as np\n'), ((67066, 67081), 'numpy.radians', 'np.radians', (['(-90)'], {}), '(-90)\n', (67076, 67081), True, 'import numpy as np\n'), ((67571, 67594), 'numpy.arange', 'np.arange', (['Img.shape[1]'], {}), '(Img.shape[1])\n', (67580, 67594), True, 'import numpy as np\n'), ((67603, 67626), 'numpy.arange', 'np.arange', (['Img.shape[0]'], {}), '(Img.shape[0])\n', (67612, 67626), True, 'import numpy as np\n'), ((67638, 67655), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (67649, 67655), True, 'import numpy as np\n'), ((67712, 67750), 'numpy.sqrt', 'np.sqrt', (['((X - x0) ** 2 + (Y - y0) ** 2)'], {}), '((X - x0) ** 2 + (Y - y0) ** 2)\n', (67719, 67750), True, 'import numpy as np\n'), ((2968, 2987), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (2981, 2987), False, 'import copy\n'), ((3441, 3460), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3454, 3460), False, 'import copy\n'), ((3760, 3779), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3773, 3779), False, 'import copy\n'), ((4115, 4134), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (4128, 4134), False, 'import copy\n'), ((9956, 9986), 'numpy.zeros', 'np.zeros', (['self.pixels.shape[0]'], {}), '(self.pixels.shape[0])\n', (9964, 9986), True, 'import numpy as np\n'), ((10004, 10034), 'numpy.zeros', 'np.zeros', (['self.pixels.shape[0]'], {}), '(self.pixels.shape[0])\n', (10012, 10034), True, 'import numpy as np\n'), ((10484, 10501), 'numpy.cumsum', 'np.cumsum', (['offset'], {}), '(offset)\n', (10493, 10501), True, 'import numpy as np\n'), ((14091, 14122), 'numpy.arange', 'np.arange', (['self.pixels.shape[1]'], {}), '(self.pixels.shape[1])\n', (14100, 14122), True, 'import numpy as np\n'), ((14135, 14166), 'numpy.arange', 'np.arange', (['self.pixels.shape[0]'], {}), '(self.pixels.shape[0])\n', (14144, 14166), True, 'import numpy as np\n'), ((14249, 14270), 'numpy.arange', 'np.arange', (['y1', '(y2 + 1)'], {}), '(y1, y2 + 1)\n', (14258, 14270), True, 'import numpy as np\n'), ((14315, 14331), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (14324, 14331), True, 'import numpy as np\n'), ((15294, 15322), 'numpy.mean', 'np.mean', (['self.pixels'], {'axis': '(1)'}), '(self.pixels, axis=1)\n', (15301, 15322), True, 'import numpy as np\n'), ((15367, 15386), 'numpy.polyfit', 'np.polyfit', (['i', 's', '(1)'], {}), '(i, s, 1)\n', (15377, 15386), True, 'import numpy as np\n'), ((16100, 16131), 'numpy.arange', 'np.arange', (['self.pixels.shape[1]'], {}), '(self.pixels.shape[1])\n', (16109, 16131), True, 'import numpy as np\n'), ((16144, 16175), 'numpy.arange', 'np.arange', (['self.pixels.shape[0]'], {}), '(self.pixels.shape[0])\n', (16153, 16175), True, 'import numpy as np\n'), ((16193, 16210), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (16204, 16210), True, 'import numpy as np\n'), ((17814, 17849), 'numpy.arange', 'np.arange', (["self.size['pixels']['x']"], {}), "(self.size['pixels']['x'])\n", (17823, 17849), True, 'import numpy as np\n'), ((17930, 17965), 'numpy.arange', 'np.arange', (["self.size['pixels']['y']"], {}), "(self.size['pixels']['y'])\n", (17939, 17965), True, 'import numpy as np\n'), ((18050, 18069), 'numpy.meshgrid', 'np.meshgrid', (['x2', 'y2'], {}), '(x2, y2)\n', (18061, 18069), True, 'import numpy as np\n'), ((18085, 18099), 'numpy.sqrt', 'np.sqrt', (['(X + Y)'], {}), '(X + Y)\n', (18092, 18099), True, 'import numpy as np\n'), ((18772, 18786), 'numpy.exp', 'np.exp', (['(-d * k)'], {}), '(-d * k)\n', (18778, 18786), True, 'import numpy as np\n'), ((18804, 18815), 'numpy.mean', 'np.mean', (['tf'], {}), '(tf)\n', (18811, 18815), True, 'import numpy as np\n'), ((22201, 22227), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {'grid': '(False)'}), "('axes', grid=False)\n", (22207, 22227), True, 'import matplotlib as mpl\n'), ((28469, 28487), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (28481, 28487), True, 'import matplotlib.pyplot as plt\n'), ((28506, 28538), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0)', 'vmax': 'N'}), '(vmin=0, vmax=N)\n', (28522, 28538), False, 'from matplotlib import colors, cm\n'), ((28559, 28597), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'CM'}), '(norm=cNorm, cmap=CM)\n', (28576, 28597), False, 'from matplotlib import colors, cm\n'), ((30579, 30595), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (30587, 30595), True, 'import numpy as np\n'), ((30610, 30623), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (30618, 30623), True, 'import numpy as np\n'), ((30638, 30651), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (30646, 30651), True, 'import numpy as np\n'), ((34269, 34295), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (34276, 34295), True, 'import numpy as np\n'), ((36641, 36681), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (36648, 36681), True, 'import numpy as np\n'), ((40036, 40055), 'numpy.min', 'np.min', (['self.pixels'], {}), '(self.pixels)\n', (40042, 40055), True, 'import numpy as np\n'), ((41590, 41621), 'numpy.arange', 'np.arange', (['self.pixels.shape[1]'], {}), '(self.pixels.shape[1])\n', (41599, 41621), True, 'import numpy as np\n'), ((41634, 41665), 'numpy.arange', 'np.arange', (['self.pixels.shape[0]'], {}), '(self.pixels.shape[0])\n', (41643, 41665), True, 'import numpy as np\n'), ((41683, 41700), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (41694, 41700), True, 'import numpy as np\n'), ((41713, 41761), 'scipy.interpolate.SmoothBivariateSpline', 'scipy.interpolate.SmoothBivariateSpline', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (41752, 41761), False, 'import scipy\n'), ((43220, 43280), 'numpy.linspace', 'np.linspace', (['(0)', "self.size['real']['x']", 'self.pixels.shape[1]'], {}), "(0, self.size['real']['x'], self.pixels.shape[1])\n", (43231, 43280), True, 'import numpy as np\n'), ((43434, 43461), 'numpy.zeros', 'np.zeros', (['self.pixels.shape'], {}), '(self.pixels.shape)\n', (43442, 43461), True, 'import numpy as np\n'), ((43487, 43514), 'numpy.zeros', 'np.zeros', (['self.pixels.shape'], {}), '(self.pixels.shape)\n', (43495, 43514), True, 'import numpy as np\n'), ((45468, 45487), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (45481, 45487), False, 'import copy\n'), ((45502, 45527), 'numpy.fft.fft2', 'np.fft.fft2', (['fixed.pixels'], {}), '(fixed.pixels)\n', (45513, 45527), True, 'import numpy as np\n'), ((45769, 45807), 'numpy.roll', 'np.roll', (['self.pixels', 'shift[0]'], {'axis': '(0)'}), '(self.pixels, shift[0], axis=0)\n', (45776, 45807), True, 'import numpy as np\n'), ((45829, 45866), 'numpy.roll', 'np.roll', (['adj.pixels', 'shift[1]'], {'axis': '(1)'}), '(adj.pixels, shift[1], axis=1)\n', (45836, 45866), True, 'import numpy as np\n'), ((46178, 46197), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (46191, 46197), False, 'import copy\n'), ((46219, 46267), 'skimage.transform.warp', 'tf.warp', (['self.pixels', 'tform'], {'preserve_range': '(True)'}), '(self.pixels, tform, preserve_range=True)\n', (46226, 46267), True, 'from skimage import transform as tf\n'), ((53768, 53780), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (53774, 53780), True, 'import numpy as np\n'), ((53794, 53806), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (53800, 53806), True, 'import numpy as np\n'), ((54645, 54663), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (54657, 54663), True, 'import matplotlib.pyplot as plt\n'), ((56592, 56614), 'numpy.ones', 'np.ones', (['window_length'], {}), '(window_length)\n', (56599, 56614), True, 'import numpy as np\n'), ((57663, 57699), 'matplotlib.colors.colorConverter.to_rgba', 'colors.colorConverter.to_rgba', (['color'], {}), '(color)\n', (57692, 57699), False, 'from matplotlib import colors, cm\n'), ((58292, 58301), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (58298, 58301), True, 'import numpy as np\n'), ((58323, 58332), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (58329, 58332), True, 'import numpy as np\n'), ((59141, 59152), 'numpy.conj', 'np.conj', (['tf'], {}), '(tf)\n', (59148, 59152), True, 'import numpy as np\n'), ((59255, 59270), 'numpy.size', 'np.size', (['target'], {}), '(target)\n', (59262, 59270), True, 'import numpy as np\n'), ((59415, 59426), 'numpy.conj', 'np.conj', (['tf'], {}), '(tf)\n', (59422, 59426), True, 'import numpy as np\n'), ((59458, 59476), 'numpy.fft.fft', 'np.fft.fft', (['target'], {}), '(target)\n', (59468, 59476), True, 'import numpy as np\n'), ((61414, 61431), 'numpy.ones', 'np.ones', (['Z0.shape'], {}), '(Z0.shape)\n', (61421, 61431), True, 'import numpy as np\n'), ((62947, 62979), 'numpy.linspace', 'np.linspace', (['(x1 + dx)', '(x2 + dx)', 'N'], {}), '(x1 + dx, x2 + dx, N)\n', (62958, 62979), True, 'import numpy as np\n'), ((62988, 63020), 'numpy.linspace', 'np.linspace', (['(y1 + dy)', '(y2 + dy)', 'N'], {}), '(y1 + dy, y2 + dy, N)\n', (62999, 63020), True, 'import numpy as np\n'), ((64503, 64523), 'numpy.linspace', 'np.linspace', (['(0)', 'd', 'N'], {}), '(0, d, N)\n', (64514, 64523), True, 'import numpy as np\n'), ((67233, 67244), 'numpy.mean', 'np.mean', (['tf'], {}), '(tf)\n', (67240, 67244), True, 'import numpy as np\n'), ((67261, 67272), 'numpy.conj', 'np.conj', (['tf'], {}), '(tf)\n', (67268, 67272), True, 'import numpy as np\n'), ((67910, 67922), 'numpy.max', 'np.max', (['zoom'], {}), '(zoom)\n', (67916, 67922), True, 'import numpy as np\n'), ((6321, 6330), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6328, 6330), True, 'import matplotlib.pyplot as plt\n'), ((10393, 10403), 'numpy.diff', 'np.diff', (['D'], {}), '(D)\n', (10400, 10403), True, 'import numpy as np\n'), ((10751, 10770), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (10764, 10770), False, 'import copy\n'), ((12410, 12450), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (12417, 12450), True, 'import numpy as np\n'), ((14212, 14234), 'numpy.flipud', 'np.flipud', (['self.pixels'], {}), '(self.pixels)\n', (14221, 14234), True, 'import numpy as np\n'), ((14756, 14788), 'numpy.vstack', 'np.vstack', (['[N[1:, :], N[-1:, :]]'], {}), '([N[1:, :], N[-1:, :]])\n', (14765, 14788), True, 'import numpy as np\n'), ((14873, 14894), 'numpy.median', 'np.median', (['N2'], {'axis': '(1)'}), '(N2, axis=1)\n', (14882, 14894), True, 'import numpy as np\n'), ((14959, 14986), 'numpy.tile', 'np.tile', (['C', '(N.shape[0], 1)'], {}), '(C, (N.shape[0], 1))\n', (14966, 14986), True, 'import numpy as np\n'), ((15070, 15089), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (15083, 15089), False, 'import copy\n'), ((15544, 15563), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (15557, 15563), False, 'import copy\n'), ((16738, 16757), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (16751, 16757), False, 'import copy\n'), ((17272, 17291), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (17285, 17291), False, 'import copy\n'), ((18848, 18862), 'numpy.exp', 'np.exp', (['(-d * k)'], {}), '(-d * k)\n', (18854, 18862), True, 'import numpy as np\n'), ((18883, 18900), 'numpy.ones', 'np.ones', (['tf.shape'], {}), '(tf.shape)\n', (18890, 18900), True, 'import numpy as np\n'), ((22277, 22286), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22284, 22286), True, 'import matplotlib.pyplot as plt\n'), ((23189, 23211), 'numpy.nanmin', 'np.nanmin', (['self.pixels'], {}), '(self.pixels)\n', (23198, 23211), True, 'import numpy as np\n'), ((23213, 23235), 'numpy.nanmax', 'np.nanmax', (['self.pixels'], {}), '(self.pixels)\n', (23222, 23235), True, 'import numpy as np\n'), ((23287, 23357), 'numpy.asarray', 'np.asarray', (['(256 ** 2 * (self.pixels - mi) / (ma - mi))'], {'dtype': 'np.uint16'}), '(256 ** 2 * (self.pixels - mi) / (ma - mi), dtype=np.uint16)\n', (23297, 23357), True, 'import numpy as np\n'), ((23392, 23449), 'skimage.exposure.equalize_adapthist', 'skimage.exposure.equalize_adapthist', (['img'], {'clip_limit': '(0.03)'}), '(img, clip_limit=0.03)\n', (23427, 23449), False, 'import skimage\n'), ((23661, 23675), 'numpy.nanstd', 'np.nanstd', (['img'], {}), '(img)\n', (23670, 23675), True, 'import numpy as np\n'), ((23694, 23709), 'numpy.nanmean', 'np.nanmean', (['img'], {}), '(img)\n', (23704, 23709), True, 'import numpy as np\n'), ((23960, 24000), 'numpy.percentile', 'np.percentile', (['img', "(100 - kargs['level'])"], {}), "(img, 100 - kargs['level'])\n", (23973, 24000), True, 'import numpy as np\n'), ((24018, 24052), 'numpy.percentile', 'np.percentile', (['img', "kargs['level']"], {}), "(img, kargs['level'])\n", (24031, 24052), True, 'import numpy as np\n'), ((28713, 28733), 'numpy.linspace', 'np.linspace', (['A', 'B', 'N'], {}), '(A, B, N)\n', (28724, 28733), True, 'import numpy as np\n'), ((28752, 28769), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (28762, 28769), True, 'import numpy as np\n'), ((36504, 36513), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (36511, 36513), True, 'import matplotlib.pyplot as plt\n'), ((37307, 37333), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (37314, 37333), True, 'import numpy as np\n'), ((37545, 37554), 'numpy.std', 'np.std', (['p'], {}), '(p)\n', (37551, 37554), True, 'import numpy as np\n'), ((39985, 40022), 'skimage.filters.threshold_adaptive', 'threshold_local', (['self.pixels', 'percent'], {}), '(self.pixels, percent)\n', (40000, 40022), True, 'from skimage.filters import threshold_adaptive as threshold_local\n'), ((40352, 40371), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (40365, 40371), False, 'import copy\n'), ((41456, 41478), 'numpy.flipud', 'np.flipud', (['self.pixels'], {}), '(self.pixels)\n', (41465, 41478), True, 'import numpy as np\n'), ((41481, 41500), 'numpy.min', 'np.min', (['self.pixels'], {}), '(self.pixels)\n', (41487, 41500), True, 'import numpy as np\n'), ((42941, 42958), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (42951, 42958), True, 'import numpy as np\n'), ((43071, 43093), 'numpy.fliplr', 'np.fliplr', (['self.pixels'], {}), '(self.pixels)\n', (43080, 43093), True, 'import numpy as np\n'), ((45087, 45102), 'numpy.fliplr', 'np.fliplr', (['mask'], {}), '(mask)\n', (45096, 45102), True, 'import numpy as np\n'), ((45132, 45157), 'numpy.fliplr', 'np.fliplr', (['AFM_bin_shadow'], {}), '(AFM_bin_shadow)\n', (45141, 45157), True, 'import numpy as np\n'), ((46906, 46930), 'numpy.fft.fft2', 'np.fft.fft2', (['self.pixels'], {}), '(self.pixels)\n', (46917, 46930), True, 'import numpy as np\n'), ((47617, 47636), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (47630, 47636), False, 'import copy\n'), ((47958, 47977), 'numpy.min', 'np.min', (['self.pixels'], {}), '(self.pixels)\n', (47964, 47977), True, 'import numpy as np\n'), ((48032, 48051), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (48045, 48051), False, 'import copy\n'), ((48076, 48092), 'numpy.min', 'np.min', (['N.pixels'], {}), '(N.pixels)\n', (48082, 48092), True, 'import numpy as np\n'), ((48414, 48433), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (48427, 48433), False, 'import copy\n'), ((49547, 49566), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (49560, 49566), False, 'import copy\n'), ((50865, 50884), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (50878, 50884), False, 'import copy\n'), ((51717, 51736), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (51730, 51736), False, 'import copy\n'), ((51762, 51804), 'scipy.ndimage.interpolation.zoom', 'zoom', (['new.pixels', 'zoom_factor'], {'order': 'order'}), '(new.pixels, zoom_factor, order=order)\n', (51766, 51804), False, 'from scipy.ndimage.interpolation import zoom\n'), ((51984, 52027), 'scipy.ndimage.interpolation.zoom', 'zoom', (['self.pixels', 'zoom_factor'], {'order': 'order'}), '(self.pixels, zoom_factor, order=order)\n', (51988, 52027), False, 'from scipy.ndimage.interpolation import zoom\n'), ((53833, 53845), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (53839, 53845), True, 'import numpy as np\n'), ((53859, 53872), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (53866, 53872), True, 'import numpy as np\n'), ((53888, 53901), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (53895, 53901), True, 'import numpy as np\n'), ((56673, 56698), 'numpy.hanning', 'np.hanning', (['window_length'], {}), '(window_length)\n', (56683, 56698), True, 'import numpy as np\n'), ((56904, 56964), 'numpy.cos', 'np.cos', (['(2 * np.pi / alpha * (x[first_condition] - alpha / 2))'], {}), '(2 * np.pi / alpha * (x[first_condition] - alpha / 2))\n', (56910, 56964), True, 'import numpy as np\n'), ((57140, 57204), 'numpy.cos', 'np.cos', (['(2 * np.pi / alpha * (x[third_condition] - 1 + alpha / 2))'], {}), '(2 * np.pi / alpha * (x[third_condition] - 1 + alpha / 2))\n', (57146, 57204), True, 'import numpy as np\n'), ((58940, 58955), 'numpy.mean', 'np.mean', (['target'], {}), '(target)\n', (58947, 58955), True, 'import numpy as np\n'), ((63076, 63093), 'numpy.vstack', 'np.vstack', (['(y, x)'], {}), '((y, x))\n', (63085, 63093), True, 'import numpy as np\n'), ((64465, 64485), 'numpy.linspace', 'np.linspace', (['(0)', 'd', 'N'], {}), '(0, d, N)\n', (64476, 64485), True, 'import numpy as np\n'), ((64525, 64537), 'numpy.vstack', 'np.vstack', (['P'], {}), '(P)\n', (64534, 64537), True, 'import numpy as np\n'), ((64719, 64752), 'numpy.minimum', 'np.minimum', (['x2', '(img.shape[1] - x2)'], {}), '(x2, img.shape[1] - x2)\n', (64729, 64752), True, 'import numpy as np\n'), ((64803, 64836), 'numpy.minimum', 'np.minimum', (['y2', '(img.shape[0] - y2)'], {}), '(y2, img.shape[0] - y2)\n', (64813, 64836), True, 'import numpy as np\n'), ((65762, 65785), 'numpy.mod', 'np.mod', (['(ky + ny / 2)', 'ny'], {}), '(ky + ny / 2, ny)\n', (65768, 65785), True, 'import numpy as np\n'), ((65838, 65861), 'numpy.mod', 'np.mod', (['(kx + nx / 2)', 'nx'], {}), '(kx + nx / 2, nx)\n', (65844, 65861), True, 'import numpy as np\n'), ((66402, 66413), 'numpy.isinf', 'np.isinf', (['d'], {}), '(d)\n', (66410, 66413), True, 'import numpy as np\n'), ((17864, 17909), 'numpy.minimum', 'np.minimum', (['x2', "(self.size['pixels']['x'] - x2)"], {}), "(x2, self.size['pixels']['x'] - x2)\n", (17874, 17909), True, 'import numpy as np\n'), ((17980, 18025), 'numpy.minimum', 'np.minimum', (['y2', "(self.size['pixels']['y'] - y2)"], {}), "(y2, self.size['pixels']['y'] - y2)\n", (17990, 18025), True, 'import numpy as np\n'), ((26556, 26629), 'numpy.linspace', 'np.linspace', (['(0)', "(self.size['real']['x'] / 10 ** fact)", 'self.pixels.shape[1]'], {}), "(0, self.size['real']['x'] / 10 ** fact, self.pixels.shape[1])\n", (26567, 26629), True, 'import numpy as np\n'), ((26671, 26744), 'numpy.linspace', 'np.linspace', (['(0)', "(self.size['real']['y'] / 10 ** fact)", 'self.pixels.shape[0]'], {}), "(0, self.size['real']['y'] / 10 ** fact, self.pixels.shape[0])\n", (26682, 26744), True, 'import numpy as np\n'), ((29141, 29159), 'numpy.mean', 'np.mean', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (29148, 29159), True, 'import numpy as np\n'), ((29924, 29969), 'scipy.optimize.curve_fit', 'scipy.optimize.curve_fit', (['fit', 'l', 'profile', 'p0'], {}), '(fit, l, profile, p0)\n', (29948, 29969), False, 'import scipy\n'), ((37509, 37527), 'numpy.mean', 'np.mean', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (37516, 37527), True, 'import numpy as np\n'), ((38299, 38316), 'numpy.min', 'np.min', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (38305, 38316), True, 'import numpy as np\n'), ((38717, 38734), 'numpy.max', 'np.max', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (38723, 38734), True, 'import numpy as np\n'), ((40089, 40108), 'numpy.max', 'np.max', (['self.pixels'], {}), '(self.pixels)\n', (40095, 40108), True, 'import numpy as np\n'), ((40293, 40319), 'numpy.ones', 'np.ones', (['self.pixels.shape'], {}), '(self.pixels.shape)\n', (40300, 40319), True, 'import numpy as np\n'), ((41924, 41943), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (41937, 41943), False, 'import copy\n'), ((43148, 43162), 'numpy.fliplr', 'np.fliplr', (['BIN'], {}), '(BIN)\n', (43157, 43162), True, 'import numpy as np\n'), ((49778, 49791), 'numpy.abs', 'np.abs', (['(b - a)'], {}), '(b - a)\n', (49784, 49791), True, 'import numpy as np\n'), ((55039, 55051), 'numpy.conj', 'np.conj', (['cor'], {}), '(cor)\n', (55046, 55051), True, 'import numpy as np\n'), ((55054, 55076), 'numpy.fft.fft2', 'np.fft.fft2', (['to_adjust'], {}), '(to_adjust)\n', (55065, 55076), True, 'import numpy as np\n'), ((59156, 59166), 'numpy.abs', 'np.abs', (['tf'], {}), '(tf)\n', (59162, 59166), True, 'import numpy as np\n'), ((59430, 59440), 'numpy.abs', 'np.abs', (['tf'], {}), '(tf)\n', (59436, 59440), True, 'import numpy as np\n'), ((59520, 59534), 'numpy.fft.ifft', 'np.fft.ifft', (['F'], {}), '(F)\n', (59531, 59534), True, 'import numpy as np\n'), ((67279, 67289), 'numpy.abs', 'np.abs', (['tf'], {}), '(tf)\n', (67285, 67289), True, 'import numpy as np\n'), ((67329, 67345), 'numpy.fft.fft2', 'np.fft.fft2', (['img'], {}), '(img)\n', (67340, 67345), True, 'import numpy as np\n'), ((8171, 8238), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': 'edge_width', 'foreground': 'edge_color'}), '(linewidth=edge_width, foreground=edge_color)\n', (8193, 8238), True, 'import matplotlib.patheffects as PathEffects\n'), ((10648, 10695), 'numpy.repeat', 'np.repeat', (['offset', 'self.pixels.shape[1]'], {'axis': '(1)'}), '(offset, self.pixels.shape[1], axis=1)\n', (10657, 10695), True, 'import numpy as np\n'), ((10836, 10883), 'numpy.repeat', 'np.repeat', (['offset', 'self.pixels.shape[1]'], {'axis': '(1)'}), '(offset, self.pixels.shape[1], axis=1)\n', (10845, 10883), True, 'import numpy as np\n'), ((13804, 13854), 'numpy.sqrt', 'np.sqrt', (['((2 * dx * pxs) ** 2 + (2 * dy * pys) ** 2)'], {}), '((2 * dx * pxs) ** 2 + (2 * dy * pys) ** 2)\n', (13811, 13854), True, 'import numpy as np\n'), ((13865, 13921), 'numpy.sqrt', 'np.sqrt', (['(((x2 - x1) * pxs) ** 2 + ((y2 - y1) * pys) ** 2)'], {}), '(((x2 - x1) * pxs) ** 2 + ((y2 - y1) * pys) ** 2)\n', (13872, 13921), True, 'import numpy as np\n'), ((18929, 18940), 'numpy.conj', 'np.conj', (['tf'], {}), '(tf)\n', (18936, 18940), True, 'import numpy as np\n'), ((19001, 19024), 'numpy.fft.fft2', 'np.fft.fft2', (['work_image'], {}), '(work_image)\n', (19012, 19024), True, 'import numpy as np\n'), ((24491, 24505), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (24500, 24505), True, 'import numpy as np\n'), ((24672, 24686), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (24681, 24686), True, 'import numpy as np\n'), ((24882, 24896), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (24891, 24896), True, 'import numpy as np\n'), ((25063, 25077), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (25072, 25077), True, 'import numpy as np\n'), ((29211, 29244), 'numpy.log', 'np.log', (['(1.001 - profile / ToFcorr)'], {}), '(1.001 - profile / ToFcorr)\n', (29217, 29244), True, 'import numpy as np\n'), ((30679, 30688), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (30685, 30688), True, 'import numpy as np\n'), ((33106, 33128), 'numpy.flipud', 'np.flipud', (['self.pixels'], {}), '(self.pixels)\n', (33115, 33128), True, 'import numpy as np\n'), ((33490, 33512), 'numpy.flipud', 'np.flipud', (['self.pixels'], {}), '(self.pixels)\n', (33499, 33512), True, 'import numpy as np\n'), ((33651, 33673), 'numpy.flipud', 'np.flipud', (['self.pixels'], {}), '(self.pixels)\n', (33660, 33673), True, 'import numpy as np\n'), ((33791, 33813), 'numpy.flipud', 'np.flipud', (['self.pixels'], {}), '(self.pixels)\n', (33800, 33813), True, 'import numpy as np\n'), ((39928, 39965), 'skimage.filters.threshold_adaptive', 'threshold_local', (['self.pixels', 'percent'], {}), '(self.pixels, percent)\n', (39943, 39965), True, 'from skimage.filters import threshold_adaptive as threshold_local\n'), ((40521, 40547), 'numpy.ones', 'np.ones', (['self.pixels.shape'], {}), '(self.pixels.shape)\n', (40528, 40547), True, 'import numpy as np\n'), ((45562, 45574), 'numpy.conj', 'np.conj', (['cor'], {}), '(cor)\n', (45569, 45574), True, 'import numpy as np\n'), ((45577, 45601), 'numpy.fft.fft2', 'np.fft.fft2', (['self.pixels'], {}), '(self.pixels)\n', (45588, 45601), True, 'import numpy as np\n'), ((48358, 48383), 'numpy.fft.fftshift', 'np.fft.fftshift', (['(F * mask)'], {}), '(F * mask)\n', (48373, 48383), True, 'import numpy as np\n'), ((48478, 48503), 'numpy.fft.fftshift', 'np.fft.fftshift', (['(F * mask)'], {}), '(F * mask)\n', (48493, 48503), True, 'import numpy as np\n'), ((49800, 49813), 'numpy.abs', 'np.abs', (['(c - a)'], {}), '(c - a)\n', (49806, 49813), True, 'import numpy as np\n'), ((64319, 64359), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (64326, 64359), True, 'import numpy as np\n'), ((66514, 66528), 'numpy.exp', 'np.exp', (['(-d * k)'], {}), '(-d * k)\n', (66520, 66528), True, 'import numpy as np\n'), ((68164, 68173), 'numpy.abs', 'np.abs', (['R'], {}), '(R)\n', (68170, 68173), True, 'import numpy as np\n'), ((15441, 15459), 'numpy.polyval', 'np.polyval', (['fit', 'i'], {}), '(fit, i)\n', (15451, 15459), True, 'import numpy as np\n'), ((15598, 15616), 'numpy.polyval', 'np.polyval', (['fit', 'i'], {}), '(fit, i)\n', (15608, 15616), True, 'import numpy as np\n'), ((16631, 16657), 'numpy.ones', 'np.ones', (['self.pixels.shape'], {}), '(self.pixels.shape)\n', (16638, 16657), True, 'import numpy as np\n'), ((16789, 16815), 'numpy.ones', 'np.ones', (['self.pixels.shape'], {}), '(self.pixels.shape)\n', (16796, 16815), True, 'import numpy as np\n'), ((17155, 17183), 'numpy.mean', 'np.mean', (['self.pixels'], {'axis': '(1)'}), '(self.pixels, axis=1)\n', (17162, 17183), True, 'import numpy as np\n'), ((17326, 17354), 'numpy.mean', 'np.mean', (['self.pixels'], {'axis': '(1)'}), '(self.pixels, axis=1)\n', (17333, 17354), True, 'import numpy as np\n'), ((18909, 18926), 'numpy.ones', 'np.ones', (['tf.shape'], {}), '(tf.shape)\n', (18916, 18926), True, 'import numpy as np\n'), ((22937, 22946), 'numpy.log', 'np.log', (['W'], {}), '(W)\n', (22943, 22946), True, 'import numpy as np\n'), ((22947, 22957), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (22953, 22957), True, 'import numpy as np\n'), ((28858, 28867), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (28864, 28867), True, 'import numpy as np\n'), ((28891, 28900), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (28897, 28900), True, 'import numpy as np\n'), ((28924, 28933), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (28930, 28933), True, 'import numpy as np\n'), ((28957, 28966), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (28963, 28966), True, 'import numpy as np\n'), ((29482, 29497), 'numpy.min', 'np.min', (['profile'], {}), '(profile)\n', (29488, 29497), True, 'import numpy as np\n'), ((29611, 29626), 'numpy.max', 'np.max', (['profile'], {}), '(profile)\n', (29617, 29626), True, 'import numpy as np\n'), ((59221, 59240), 'numpy.fft.fft2', 'np.fft.fft2', (['target'], {}), '(target)\n', (59232, 59240), True, 'import numpy as np\n'), ((64281, 64307), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (64288, 64307), True, 'import numpy as np\n'), ((66665, 66678), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (66671, 66678), True, 'import numpy as np\n'), ((26475, 26484), 'numpy.log', 'np.log', (['W'], {}), '(W)\n', (26481, 26484), True, 'import numpy as np\n'), ((26485, 26495), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (26491, 26495), True, 'import numpy as np\n'), ((27199, 27208), 'numpy.log', 'np.log', (['W'], {}), '(W)\n', (27205, 27208), True, 'import numpy as np\n'), ((27209, 27219), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (27215, 27219), True, 'import numpy as np\n'), ((29449, 29464), 'numpy.max', 'np.max', (['profile'], {}), '(profile)\n', (29455, 29464), True, 'import numpy as np\n'), ((29465, 29480), 'numpy.min', 'np.min', (['profile'], {}), '(profile)\n', (29471, 29480), True, 'import numpy as np\n'), ((29594, 29609), 'numpy.min', 'np.min', (['profile'], {}), '(profile)\n', (29600, 29609), True, 'import numpy as np\n'), ((32523, 32532), 'numpy.log', 'np.log', (['W'], {}), '(W)\n', (32529, 32532), True, 'import numpy as np\n'), ((32533, 32543), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (32539, 32543), True, 'import numpy as np\n'), ((36437, 36446), 'numpy.log', 'np.log', (['W'], {}), '(W)\n', (36443, 36446), True, 'import numpy as np\n'), ((36447, 36457), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (36453, 36457), True, 'import numpy as np\n'), ((64379, 64407), 'numpy.arctan2', 'np.arctan2', (['(x2 - x1)', '(y2 - y1)'], {}), '(x2 - x1, y2 - y1)\n', (64389, 64407), True, 'import numpy as np\n'), ((68116, 68125), 'numpy.abs', 'np.abs', (['R'], {}), '(R)\n', (68122, 68125), True, 'import numpy as np\n'), ((29578, 29593), 'numpy.max', 'np.max', (['profile'], {}), '(profile)\n', (29584, 29593), True, 'import numpy as np\n'), ((13046, 13074), 'numpy.arctan2', 'np.arctan2', (['(x2 - x1)', '(y2 - y1)'], {}), '(x2 - x1, y2 - y1)\n', (13056, 13074), True, 'import numpy as np\n'), ((14015, 14059), 'numpy.arctan2', 'np.arctan2', (['((x2 - x1) * pxs)', '((y2 - y1) * pys)'], {}), '((x2 - x1) * pxs, (y2 - y1) * pys)\n', (14025, 14059), True, 'import numpy as np\n'), ((66683, 66694), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (66689, 66694), True, 'import numpy as np\n'), ((66695, 66709), 'numpy.sin', 'np.sin', (['(-theta)'], {}), '(-theta)\n', (66701, 66709), True, 'import numpy as np\n'), ((66713, 66724), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (66719, 66724), True, 'import numpy as np\n'), ((66725, 66739), 'numpy.sin', 'np.sin', (['(-theta)'], {}), '(-theta)\n', (66731, 66739), True, 'import numpy as np\n')] |
# Examples of empirical correlation matrix computation using cor function of eeyore
# %% Load packages
import numpy as np
import torch
from eeyore.stats import cor
# %% Read chains
chains = torch.as_tensor(np.genfromtxt('chain01.csv', delimiter=','))
num_iters, num_pars = chains.shape
# %% Compute correlation matrix
np_cor_matrix = cor(chains)
print('Correlation matrix based on eeyore cor function:\n{}'.format(np_cor_matrix))
| [
"eeyore.stats.cor",
"numpy.genfromtxt"
] | [((342, 353), 'eeyore.stats.cor', 'cor', (['chains'], {}), '(chains)\n', (345, 353), False, 'from eeyore.stats import cor\n'), ((211, 254), 'numpy.genfromtxt', 'np.genfromtxt', (['"""chain01.csv"""'], {'delimiter': '""","""'}), "('chain01.csv', delimiter=',')\n", (224, 254), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from collections import Counter, defaultdict, OrderedDict, namedtuple
from typing import Dict, List, DefaultDict, Optional, Tuple
from adaptiveleak.analysis.plot_utils import COLORS, PLOT_STYLE, LINE_WIDTH, MARKER, MARKER_SIZE, to_label
from adaptiveleak.analysis.plot_utils import LEGEND_FONT, AXIS_FONT, PLOT_SIZE, TITLE_FONT
from adaptiveleak.analysis.plot_utils import iterate_policy_folders, dataset_label
from adaptiveleak.utils.constants import POLICIES, SMALL_NUMBER
from adaptiveleak.utils.file_utils import read_json_gz, iterate_dir
THRESHOLD = 0.01
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--folder', type=str, required=True, help='The name of the experiment log directory.')
parser.add_argument('--datasets', type=str, required=True, nargs='+', help='The names of all datasets to analyze.')
args = parser.parse_args()
print('Num Datasets: {0}'.format(len(args.datasets)))
print('==========')
test_results: DefaultDict[str, int] = defaultdict(int)
budget_counts: DefaultDict[str, int] = defaultdict(int)
for dataset in args.datasets:
for folder in iterate_policy_folders([args.folder], dataset=dataset):
for sim_file in iterate_dir(folder, pattern='.*json.gz'):
model = read_json_gz(sim_file)
if model['policy']['encoding_mode'].lower() in ('single_group', 'group_unshifted', 'padded', 'pruned'):
continue
name = '{0}_{1}'.format(model['policy']['policy_name'].lower(), model['policy']['encoding_mode'].lower())
energy_per_seq = model['policy']['energy_per_seq']
p_value = model['mutual_information']['p_value']
num_trials = model['mutual_information']['num_trials']
upper_bound = p_value + 1.96 * (1.0 / (2 * np.sqrt(num_trials)))
test_results[name] += int(upper_bound < THRESHOLD)
budget_counts[name] += 1
policy_names: List[str] = []
policy_values: List[Tuple[int, int]] = []
for name in POLICIES:
encodings = ['standard', 'group'] if name not in ('uniform', 'random') else ['standard']
for encoding in encodings:
policy_name = '{0}_{1}'.format(name, encoding)
if (policy_name not in test_results):
continue
num_nontrivial = test_results[policy_name]
count = budget_counts[policy_name]
policy_names.append(policy_name)
policy_values.append((num_nontrivial, count))
print(' & '.join(policy_names))
print(' & '.join(map(lambda t: '{0} / {1}'.format(t[0], t[1]), policy_values)))
| [
"numpy.sqrt",
"argparse.ArgumentParser",
"adaptiveleak.utils.file_utils.iterate_dir",
"collections.defaultdict",
"adaptiveleak.analysis.plot_utils.iterate_policy_folders",
"adaptiveleak.utils.file_utils.read_json_gz"
] | [((692, 708), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (706, 708), False, 'from argparse import ArgumentParser\n'), ((1097, 1113), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1108, 1113), False, 'from collections import Counter, defaultdict, OrderedDict, namedtuple\n'), ((1157, 1173), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1168, 1173), False, 'from collections import Counter, defaultdict, OrderedDict, namedtuple\n'), ((1231, 1285), 'adaptiveleak.analysis.plot_utils.iterate_policy_folders', 'iterate_policy_folders', (['[args.folder]'], {'dataset': 'dataset'}), '([args.folder], dataset=dataset)\n', (1253, 1285), False, 'from adaptiveleak.analysis.plot_utils import iterate_policy_folders, dataset_label\n'), ((1315, 1355), 'adaptiveleak.utils.file_utils.iterate_dir', 'iterate_dir', (['folder'], {'pattern': '""".*json.gz"""'}), "(folder, pattern='.*json.gz')\n", (1326, 1355), False, 'from adaptiveleak.utils.file_utils import read_json_gz, iterate_dir\n'), ((1381, 1403), 'adaptiveleak.utils.file_utils.read_json_gz', 'read_json_gz', (['sim_file'], {}), '(sim_file)\n', (1393, 1403), False, 'from adaptiveleak.utils.file_utils import read_json_gz, iterate_dir\n'), ((1941, 1960), 'numpy.sqrt', 'np.sqrt', (['num_trials'], {}), '(num_trials)\n', (1948, 1960), True, 'import numpy as np\n')] |
'''_____Standard imports_____'''
import numpy as np
import json
def load_data(dir):
data = []
file = open(dir,'r')
for line in file:
data.append(float(line))
return data
def load_Bscan_spectra(file_dir, block_start=276, block_end = 632084, shape1=617, shape2=1024):
data = np.fromfile(file_dir, dtype = np.uint16)
block_end = block_start + 1024*1024
block_data = data[block_start: block_end]
Bscan_spectra = block_data.reshape([1024,1024])
return Bscan_spectra
def load_calibration(dir=None):
if dir is None:
dir = "calibration/calibration_parameters.json"
with open(dir) as json_file:
calibration = json.load(json_file)
return calibration
| [
"json.load",
"numpy.fromfile"
] | [((307, 345), 'numpy.fromfile', 'np.fromfile', (['file_dir'], {'dtype': 'np.uint16'}), '(file_dir, dtype=np.uint16)\n', (318, 345), True, 'import numpy as np\n'), ((680, 700), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (689, 700), False, 'import json\n')] |
import numpy as np
import sys
import pandas as pd
from pathlib import Path
import matplotlib as mpl
from matplotlib import pyplot as plt
import stat_tools as st
from datetime import datetime
from scipy import ndimage
from scipy.optimize import minimize
from scipy.optimize import least_squares
import ephem
import configparser as cfg
import yaml
import camcoord
# globals
pi2=np.pi/2.
# Read observed Moon position output from find_moon.py and optimize
# camera parameters by minimizing the mean square angular distance to
# the predicted location from ephem.Moon
# Initial parameters are read from camera_cal_bnl.yaml
#####params: nx0,cy,cx,rotation,beta,azm,c1,c2,c3
# ny0=nx0 are the y and x size of the region of interest 'roi', assumed square
# cy,cx is the central pixel of 'roi'
# rotation is the deviation from North in radian, positive to East.
def meansqdist(x):
# function to be minimized mean square of angular distance between measured
# and predicted moon positions
# vars = string array of parameters to vary in x
# start with just varying cam.rot
cam.pr0=x[0]
cam.cy=x[1]
cam.cx=x[2]
cam.rot=x[3]
cam.beta=x[4]
cam.azm=x[5]
cam.c1=x[6]
cam.c2=x[7]
cam.c3=(0.5-(cam.c1*pi2+cam.c2*pi2**3))/pi2**5
cam.azizen()
ix=(dfref.xmoon+0.5).astype(int)
iy=(dfref.ymoon+0.5).astype(int)
otheta=cam.theta0[iy,ix]
ophi=cam.phi0[iy,ix]
ptheta=[]
pphi=[]
for edate in dfref.ephemDate:
obs.date=edate
moon.compute(obs)
ptheta.append(np.pi/2.-moon.alt)
pphi.append((moon.az-np.pi)%(2*np.pi))
ptheta=np.array(ptheta)
pphi=np.array(pphi)
return np.mean(camcoord.great_circle_distance(otheta,ophi,ptheta,pphi)**2)
def dist(x):
# function returns residuals to be
# minimized by least-squares angular distance between measured
# and predicted moon positions
# vars = string array of parameters to vary in x
# start with just varying cam.rot
cam.pr0=x[0]
cam.cy=x[1]
cam.cx=x[2]
cam.rot=x[3]
cam.beta=x[4]
cam.azm=x[5]
cam.c1=x[6]
cam.c2=x[7]
if constrained_c3:
cam.c3=(0.5-(cam.c1*pi2+cam.c2*pi2**3))/pi2**5
else:
cam.c3=x[8]
cam.azizen()
ix=(dfref.xmoon+0.5).astype(int)
iy=(dfref.ymoon+0.5).astype(int)
otheta=cam.theta0[iy,ix]
ophi=cam.phi0[iy,ix]
try:
exist=(len(ptheta) == len(otheta))
except:
exist=False
if not exist:
ptheta=[]
pphi=[]
for edate in dfref.ephemDate:
obs.date=edate
moon.compute(obs)
ptheta.append(np.pi/2.-moon.alt)
pphi.append((moon.az-np.pi)%(2*np.pi))
ptheta=np.array(ptheta)
pphi=np.array(pphi)
return camcoord.great_circle_distance(otheta,ophi,ptheta,pphi)
if __name__ == "__main__":
######load the configuration file
config_file = sys.argv[1] if len(sys.argv) >= 2 else 'camera_calibration.conf'
config = cfg.ConfigParser()
config.read(config_file)
if len(sys.argv) >= 3:
cameraIDs = sys.argv[2:]
else:
cameraIDs = eval(config['camera']['cameraIDs'])
imagepath = config['path']['imagepath']
outpath = config['path']['outpath']
moon_obs_ext = config['path']['moon_obs_ext']
camera_cal_file_list = config['path']['camera_cal_file_list']
camera_cal_file_optimized = config['path']['camera_cal_file_optimized']
begin_date=ephem.date(config['optimization']['begin_date'])
end_date=ephem.date(config['optimization']['end_date'])
constrained_c3=eval(config['optimization']['constrained_c3'])
sitelat = float(config['geolocation']['lat'])
sitelon = float(config['geolocation']['lon'])
# big outer loop
for cameraID in cameraIDs:
# initial camera parameters from camera_cal_file_optimized
# which should be a copy of camera_cal_file
cam=camcoord.camera(cameraID,camera_cal_file=camera_cal_file_optimized)
# calculate azi and zen for each pixel, to optimize vary camera parameters
# rot,cx,cy,nr0,beta,azm,c1,c2, and optionally c3
# (if constrained_c3 is False) and recalculate.
cam.azizen()
nx0=ny0=cam.nx0
# pr0 is nx0/2, i.e. initial radius estimate.
pr0=cam.pr0
# obs is an ephem observer object
obs = ephem.Observer();
# lat, lon are the only parameters in config file specified in deg.
# ephem and numpy use radian
obs.lat = np.deg2rad(cam.lat)
obs.lon = np.deg2rad(cam.lon)
# moon is an ephem moon object
moon=ephem.Moon()
#
# read moon position data from find_moon_all output .csv files between
# begin_date and end_date
moonobsdir=Path(outpath,cam.camID)
moonobsfiles=sorted(moonobsdir.glob("*"+moon_obs_ext))
if len(moonobsfiles) < 1:
print("no moon obs found in {}, run find_moon_all.py first".format(moonobsdir))
exit(1)
for moonobsfile in moonobsfiles:
datestr=moonobsfile.name.split('_')
d1,d2=datestr[1].split('-')
d1=ephem.date(datetime.strptime(d1,'%Y%m%d').strftime('%Y-%m-%d'))
d2=ephem.date(datetime.strptime(d2,'%Y%m%d').strftime('%Y-%m-%d'))
if d1>end_date or d2<begin_date:
continue
# read into pandas DataFrame
dfrefi=pd.read_csv(moonobsfile,index_col=0)
# only use good points
# i.e. roundness check and minimum radius 3 pixels.
good=((dfrefi.flag & 512) == 0) & (dfrefi.rmoon > 3.)
dfrefi=dfrefi[good]
if len(dfrefi) < 1:
print("no good data points, skipping "+moonobsfile.name)
continue
# now concatenate
try:
print('{}: Adding {} records to {}'.format(moonobsfile.name,len(dfrefi),len(dfref)))
dfref=dfref.append(dfrefi)
except:
dfref=dfrefi
# now minimize mean square distance, second by varying the 3 camera orientation
# angles
# constraint optimization pr0,cy,cx should not vary by more than 1%
ptol=0.99
# rot should be small -0.5,0.5 (about +-30 deg)
# beta is the tilt and should be small positive 0,0.2 (11 deg)
# azm is -pi,pi
# c1, c2, c3 I am not sure off, but they cannot be 0.
if constrained_c3:
x0=np.array([cam.pr0,cam.cy,cam.cx,cam.rot,cam.beta,cam.azm,cam.c1,cam.c2])
bounds=([cam.pr0*ptol,cam.cy*ptol,cam.cx*ptol,-np.pi,0.,-np.pi,-1.,-1.],
[cam.pr0/ptol,cam.cy/ptol,cam.cx/ptol,np.pi,0.2,np.pi,1.,1.])
else:
x0=np.array([cam.pr0,cam.cy,cam.cx,cam.rot,cam.beta,cam.azm,cam.c1,cam.c2,cam.c3])
bounds=([cam.pr0*ptol,cam.cy*ptol,cam.cx*ptol,-np.pi,0.,-np.pi,-1.,-1.,-1.],
[cam.pr0/ptol,cam.cy/ptol,cam.cx/ptol,np.pi,0.2,np.pi,1.,1.,1.])
#
print('before',x0)
print('with bounds',bounds)
# res=minimize(meansqdist,x0,method='BFGS',options={'disp': True})
res=least_squares(dist,x0,verbose=2,bounds=bounds)
print('after',res.x)
if constrained_c3:
# append the constrained c3
c1=res.x[6]
c2=res.x[7]
res.x=np.append(res.x,(0.5-(c1*pi2+c2*pi2**3))/pi2**5)
# Note: res.x is type float64, which is not one of the canonical
# guessable data-types and hence yaml.dump adds additional "!!" tags
# to fully describe the dumped object, which is ugly.
# converting to float appears to resolve this make sure that's done in
# these "*_to_yaml" methods.
cam.save_dict_to_yaml(res.x[:],camera_cal_file_optimized)
cam.save_list_to_yaml(res.x[:],camera_cal_file_list)
| [
"camcoord.camera",
"ephem.Observer",
"ephem.date",
"configparser.ConfigParser",
"pathlib.Path",
"scipy.optimize.least_squares",
"pandas.read_csv",
"datetime.datetime.strptime",
"ephem.Moon",
"numpy.append",
"numpy.array",
"numpy.deg2rad",
"camcoord.great_circle_distance"
] | [((1619, 1635), 'numpy.array', 'np.array', (['ptheta'], {}), '(ptheta)\n', (1627, 1635), True, 'import numpy as np\n'), ((1645, 1659), 'numpy.array', 'np.array', (['pphi'], {}), '(pphi)\n', (1653, 1659), True, 'import numpy as np\n'), ((2769, 2827), 'camcoord.great_circle_distance', 'camcoord.great_circle_distance', (['otheta', 'ophi', 'ptheta', 'pphi'], {}), '(otheta, ophi, ptheta, pphi)\n', (2799, 2827), False, 'import camcoord\n'), ((2990, 3008), 'configparser.ConfigParser', 'cfg.ConfigParser', ([], {}), '()\n', (3006, 3008), True, 'import configparser as cfg\n'), ((3457, 3505), 'ephem.date', 'ephem.date', (["config['optimization']['begin_date']"], {}), "(config['optimization']['begin_date'])\n", (3467, 3505), False, 'import ephem\n'), ((3519, 3565), 'ephem.date', 'ephem.date', (["config['optimization']['end_date']"], {}), "(config['optimization']['end_date'])\n", (3529, 3565), False, 'import ephem\n'), ((2713, 2729), 'numpy.array', 'np.array', (['ptheta'], {}), '(ptheta)\n', (2721, 2729), True, 'import numpy as np\n'), ((2743, 2757), 'numpy.array', 'np.array', (['pphi'], {}), '(pphi)\n', (2751, 2757), True, 'import numpy as np\n'), ((3915, 3983), 'camcoord.camera', 'camcoord.camera', (['cameraID'], {'camera_cal_file': 'camera_cal_file_optimized'}), '(cameraID, camera_cal_file=camera_cal_file_optimized)\n', (3930, 3983), False, 'import camcoord\n'), ((4347, 4363), 'ephem.Observer', 'ephem.Observer', ([], {}), '()\n', (4361, 4363), False, 'import ephem\n'), ((4496, 4515), 'numpy.deg2rad', 'np.deg2rad', (['cam.lat'], {}), '(cam.lat)\n', (4506, 4515), True, 'import numpy as np\n'), ((4534, 4553), 'numpy.deg2rad', 'np.deg2rad', (['cam.lon'], {}), '(cam.lon)\n', (4544, 4553), True, 'import numpy as np\n'), ((4606, 4618), 'ephem.Moon', 'ephem.Moon', ([], {}), '()\n', (4616, 4618), False, 'import ephem\n'), ((4749, 4773), 'pathlib.Path', 'Path', (['outpath', 'cam.camID'], {}), '(outpath, cam.camID)\n', (4753, 4773), False, 'from pathlib import Path\n'), ((7136, 7185), 'scipy.optimize.least_squares', 'least_squares', (['dist', 'x0'], {'verbose': '(2)', 'bounds': 'bounds'}), '(dist, x0, verbose=2, bounds=bounds)\n', (7149, 7185), False, 'from scipy.optimize import least_squares\n'), ((1679, 1737), 'camcoord.great_circle_distance', 'camcoord.great_circle_distance', (['otheta', 'ophi', 'ptheta', 'pphi'], {}), '(otheta, ophi, ptheta, pphi)\n', (1709, 1737), False, 'import camcoord\n'), ((5399, 5436), 'pandas.read_csv', 'pd.read_csv', (['moonobsfile'], {'index_col': '(0)'}), '(moonobsfile, index_col=0)\n', (5410, 5436), True, 'import pandas as pd\n'), ((6450, 6529), 'numpy.array', 'np.array', (['[cam.pr0, cam.cy, cam.cx, cam.rot, cam.beta, cam.azm, cam.c1, cam.c2]'], {}), '([cam.pr0, cam.cy, cam.cx, cam.rot, cam.beta, cam.azm, cam.c1, cam.c2])\n', (6458, 6529), True, 'import numpy as np\n'), ((6719, 6811), 'numpy.array', 'np.array', (['[cam.pr0, cam.cy, cam.cx, cam.rot, cam.beta, cam.azm, cam.c1, cam.c2, cam.c3]'], {}), '([cam.pr0, cam.cy, cam.cx, cam.rot, cam.beta, cam.azm, cam.c1, cam.\n c2, cam.c3])\n', (6727, 6811), True, 'import numpy as np\n'), ((7341, 7404), 'numpy.append', 'np.append', (['res.x', '((0.5 - (c1 * pi2 + c2 * pi2 ** 3)) / pi2 ** 5)'], {}), '(res.x, (0.5 - (c1 * pi2 + c2 * pi2 ** 3)) / pi2 ** 5)\n', (7350, 7404), True, 'import numpy as np\n'), ((5137, 5168), 'datetime.datetime.strptime', 'datetime.strptime', (['d1', '"""%Y%m%d"""'], {}), "(d1, '%Y%m%d')\n", (5154, 5168), False, 'from datetime import datetime\n'), ((5216, 5247), 'datetime.datetime.strptime', 'datetime.strptime', (['d2', '"""%Y%m%d"""'], {}), "(d2, '%Y%m%d')\n", (5233, 5247), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 11 16:51:10 2016
@author: Derek
"""
'''This is the final that will perform analyisis on the models
Specifically, it will be performing "black box" testing.
The main components of the file:
Loading the models
In the case of the BayesNet, we defer to analysis.jl
For the DNN, we can load the model
For the LSTMs, it would be preferred to just load the model, but I have
been having major issues with TensorFlow, because it like cannot save the
variables correctly for some reason / it doesnt load them correctly.
I tried simply renaming the variables, and resetting their values,
but apparently there is more to it than that
Thus, we have to retrain the LSTMs for this purpose.
This is exactly the same as the real training, and the only downside
is (a pretty major one) that this analysis takes much longer.
Regardless, the same outcome should be present, just I must select
only a few testing situations to analyze.
Selecting the testing situations
This is done by hand, based on a visual inspection of the results, seeing which
make the least sense or otherwise are interesting.
At the moment, I am leaning towards analyzing only a few intersections but all the feature sets
Doing the stuff and the things
Basically, because all of the non-BN models normalized the inputs,
its very easy to compare the effectuve weights.
All that is done, is from a baseline input [0]* num_inputs
I iterate over each feature and vary it in range(-1,1,0.05)
Recording the probability distribution that is output.
From the outputs of the above, I will plot the sensitivity of each of the inputs,
and I hypothesize velocity will be the most sensitive, with headway mostly ignored.
'''
import os
import sys
sys.path.append(os.environ["INTENTPRED_PATH"])
from utils import LSTM
from sklearn.externals import joblib
from sklearn import svm
from sklearn import linear_model
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import tensorflow as tf
import tensorflow.contrib.learn as skflow
from utils import constants as c
from utils import data_util as du
import time
import numpy as np
#creates the set of data to test sensitivity of inputs
def createAnalysisTestData(numFeatures, traj_len=1):
base = [0.0]*numFeatures
Xtest = np.array([base]*traj_len)
Xtest = Xtest.reshape(1,traj_len,numFeatures)
Y = 0
Ytest = np.array([Y]*traj_len)
Ytest = Ytest.reshape(1,traj_len,1)
for i in range(numFeatures):
for val in [round(-1.0 + 0.05*x,2) for x in range(int(205/5))]:
this_features = [0.0]*numFeatures
this_features[i] = val
this_entry = np.array([this_features]*traj_len)
this_entry = this_entry.reshape(1,traj_len,numFeatures)
Xtest = np.vstack((Xtest, this_entry))
this_y = np.array([0]*traj_len)
this_y = this_y.reshape(1,traj_len,1)
Ytest = np.vstack((Ytest, this_y))
print(Xtest.shape)
print(Ytest.shape)
return Xtest, Ytest
#this function is given the model, well due to the load issues, just the intersection and feature sets
#test_inters is a list, like [1] or [1,2]
#testtype is a string like "001"
def analyze_model(test_inters, testtype, model):
path_to_load = c.PATH_TO_RESULTS + "ByIntersection" + os.sep
load_folder = path_to_load + testtype + os.sep
save_folder = load_folder + "TestOn" + ",".join([str(i) for i in test_inters]) + os.sep
Ypred = None
if "LSTM" in model:
Xtrain, Ytrain = du.getFeaturesLSTM(load_folder, testtype, list({1,2,3,4,5,6,7,8,9}-set(test_inters)))
#Xtest, Ytest = du.getFeaturesLSTM(load_folder, testtype, test_inters)
means, stddevs = du.normalize_get_params(Xtrain)
Xtrain = du.normalize(Xtrain, means, stddevs)
numFeatures = Xtrain.shape[2]
Xtest, Ytest = createAnalysisTestData(numFeatures, traj_len=Xtrain.shape[1])
#train the LSTM again
Ypred, timeFit, timePred, all_tests_x, all_tests_y = LSTM.run_LSTM((Xtrain,Ytrain), (Xtest, Ytest), model=model, save_path="ignore.out")
else:
Xtrain, Ytrain = du.getFeaturesnonLSTM(load_folder, testtype, list({1,2,3,4,5,6,7,8,9}-set(test_inters)))
#Xtest, Ytest = du.getFeaturesnonLSTM(load_folder, testtype, test_inters)
means, stddevs = du.normalize_get_params(Xtrain)
Xtrain = du.normalize(Xtrain, means, stddevs)
numFeatures = Xtrain.shape[1]
Xtest, _ = createAnalysisTestData(numFeatures, traj_len=1)
classifier = skflow.DNNClassifier(
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(Xtrain),
hidden_units = [128,128], n_classes=3)#, model_dir=save_folder)
#try:
# Ypred = classifier.predict_proba(Xtest)
#except:
print("Could not load saved model, re-training :(.")
Ytrain = [int(i-1) for i in Ytrain]
start = time.clock()
max_epochs = 10
if max_epochs:
start2 = time.clock()
for epoch in range(max_epochs):
classifier.fit(Xtrain, Ytrain, steps=1000)
end2 = time.clock()
print("Epoch",epoch,"Done. Took:", end2-start2)
start2 = end2
else:
classifier.fit(Xtrain, Ytrain)#, logdir=log_path)
Ypred = classifier.predict_proba(Xtest)
end = time.clock()
timeFit = end - start
print("Done fitting, time spent:", timeFit)
np.savetxt(save_folder + "analysis_Ypred_" + model, np.array(Ypred))
print(model, "analysis predictions saved, test", testtype, save_folder,"analysis_Ypred_", model)
return Ypred
def doTheThings(models=["LSTM_128x2","LSTM_128x3","LSTM_256x2"]):
for intersection in [3,7]:
for testtype in ["000","001","010","011","100"]:
for model in models:
analyze_model([intersection],testtype,model)
features_test = {
"000":9,"001":65,"010":45,"011":101,"100":13,"111":103
}
def doAnalysisThings(models, testtypes, testinters, opts):
score_folder = os.getcwd()+os.sep+"results"+os.sep+"ByIntersection"+os.sep
for intersect in testinters:
print("=".join(["="]*40))
print("Intersection",intersect)
for testnum in testtypes:
print("-".join(["-"]*40))
print("Testnum ", testnum)
numfeatures = features_test[testnum]
for model in models:
filepath = score_folder + str(testnum) + os.sep + "TestOn" + str(intersect) + os.sep + "analysis_Ypred_" + model
analysis_stuff = np.loadtxt(filepath)
#X, Y = createAnalysisTestData(numfeatures)
impact_per_feature = [0] * numfeatures
this_feature = 0
if model != "BN":
if "LSTM" in model:
traj_len = 20
analysis_stuff = analysis_stuff[:,1:]
analysis_stuff = analysis_stuff[0::(traj_len-1),:]
else:
analysis_stuff = analysis_stuff[0::numfeatures,:]
for row in range(1,len(analysis_stuff-1)):
if row % 41 == 0: #len(list(range(int(205/5))))
impact_per_feature[this_feature] /= 41
impact_per_feature[this_feature] *=numfeatures
this_feature += 1
continue
impact = abs(analysis_stuff[row,1] - analysis_stuff[row+1,1]) + abs(analysis_stuff[row,2] - analysis_stuff[row+1,2])
impact_per_feature[this_feature] += impact
print(model, " & ", " & ".join([str(i)[:6] for i in impact_per_feature]))
models = ["DNN","LSTM_128x2","LSTM_128x3","LSTM_256x2"]
testtypes = ["000","100"]
testintersections = [3,7]
options = None
models = ["LSTM_128x2"]
testtypes = ["111"]
testintersections = [1]
doAnalysisThings(models, testtypes, testintersections, options)
| [
"time.clock",
"utils.data_util.normalize_get_params",
"tensorflow.contrib.learn.infer_real_valued_columns_from_input",
"utils.LSTM.run_LSTM",
"os.getcwd",
"numpy.array",
"numpy.vstack",
"numpy.loadtxt",
"sys.path.append",
"utils.data_util.normalize"
] | [((2076, 2122), 'sys.path.append', 'sys.path.append', (["os.environ['INTENTPRED_PATH']"], {}), "(os.environ['INTENTPRED_PATH'])\n", (2091, 2122), False, 'import sys\n'), ((2685, 2712), 'numpy.array', 'np.array', (['([base] * traj_len)'], {}), '([base] * traj_len)\n', (2693, 2712), True, 'import numpy as np\n'), ((2783, 2807), 'numpy.array', 'np.array', (['([Y] * traj_len)'], {}), '([Y] * traj_len)\n', (2791, 2807), True, 'import numpy as np\n'), ((4127, 4158), 'utils.data_util.normalize_get_params', 'du.normalize_get_params', (['Xtrain'], {}), '(Xtrain)\n', (4150, 4158), True, 'from utils import data_util as du\n'), ((4176, 4212), 'utils.data_util.normalize', 'du.normalize', (['Xtrain', 'means', 'stddevs'], {}), '(Xtrain, means, stddevs)\n', (4188, 4212), True, 'from utils import data_util as du\n'), ((4427, 4516), 'utils.LSTM.run_LSTM', 'LSTM.run_LSTM', (['(Xtrain, Ytrain)', '(Xtest, Ytest)'], {'model': 'model', 'save_path': '"""ignore.out"""'}), "((Xtrain, Ytrain), (Xtest, Ytest), model=model, save_path=\n 'ignore.out')\n", (4440, 4516), False, 'from utils import LSTM\n'), ((4742, 4773), 'utils.data_util.normalize_get_params', 'du.normalize_get_params', (['Xtrain'], {}), '(Xtrain)\n', (4765, 4773), True, 'from utils import data_util as du\n'), ((4791, 4827), 'utils.data_util.normalize', 'du.normalize', (['Xtrain', 'means', 'stddevs'], {}), '(Xtrain, means, stddevs)\n', (4803, 4827), True, 'from utils import data_util as du\n'), ((5350, 5362), 'time.clock', 'time.clock', ([], {}), '()\n', (5360, 5362), False, 'import time\n'), ((5815, 5827), 'time.clock', 'time.clock', ([], {}), '()\n', (5825, 5827), False, 'import time\n'), ((5963, 5978), 'numpy.array', 'np.array', (['Ypred'], {}), '(Ypred)\n', (5971, 5978), True, 'import numpy as np\n'), ((3057, 3093), 'numpy.array', 'np.array', (['([this_features] * traj_len)'], {}), '([this_features] * traj_len)\n', (3065, 3093), True, 'import numpy as np\n'), ((3180, 3210), 'numpy.vstack', 'np.vstack', (['(Xtest, this_entry)'], {}), '((Xtest, this_entry))\n', (3189, 3210), True, 'import numpy as np\n'), ((3232, 3256), 'numpy.array', 'np.array', (['([0] * traj_len)'], {}), '([0] * traj_len)\n', (3240, 3256), True, 'import numpy as np\n'), ((3325, 3351), 'numpy.vstack', 'np.vstack', (['(Ytest, this_y)'], {}), '((Ytest, this_y))\n', (3334, 3351), True, 'import numpy as np\n'), ((5431, 5443), 'time.clock', 'time.clock', ([], {}), '()\n', (5441, 5443), False, 'import time\n'), ((5006, 5067), 'tensorflow.contrib.learn.infer_real_valued_columns_from_input', 'tf.contrib.learn.infer_real_valued_columns_from_input', (['Xtrain'], {}), '(Xtrain)\n', (5059, 5067), True, 'import tensorflow as tf\n'), ((5570, 5582), 'time.clock', 'time.clock', ([], {}), '()\n', (5580, 5582), False, 'import time\n'), ((7050, 7070), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {}), '(filepath)\n', (7060, 7070), True, 'import numpy as np\n'), ((6527, 6538), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6536, 6538), False, 'import os\n')] |
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
import pandas as pd
from d3m import container, utils, exceptions
from d3m.base import utils as d3m_base_utils
from d3m.metadata import base as metadata_base, hyperparams
from d3m.primitive_interfaces import base, transformer
import version
__all__ = ("VerticalConcatenationPrimitive",)
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
remove_duplicate_rows = hyperparams.Hyperparameter[bool](
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If there are two rows with the same d3mIndex, one is retained",
)
column_overlap = hyperparams.Enumeration[str](
default="union",
values=("union", "exact", "intersection"),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The logic to concat two dataframes.",
)
class VerticalConcatenationPrimitive(
transformer.TransformerPrimitiveBase[container.List, container.Dataset, Hyperparams]
):
"""
A primitive to encapsulate the functionality of pandas.concat.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "b93e3e85-c462-4290-8131-abc51d76a6dd",
"version": version.__version__,
"name": "DistilVerticalConcat",
"python_path": "d3m.primitives.data_transformation.concat.DistilVerticalConcat",
"source": {
"name": "Distil",
"contact": "mailto:<EMAIL>",
"uris": [
"https://github.com/uncharted-distil/distil-primitives-contrib/blob/main/main/distil_primitives_contrib/concat.py",
"https://github.com/uncharted-distil/distil-primitives-contrib",
],
},
"installation": [
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives-contrib.git@{git_commit}#egg=distil-primitives-contrib".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.ARRAY_CONCATENATION,
],
"primitive_family": metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,
},
)
def produce(
self, *, inputs: container.List, timeout: float = None, iterations: int = None
) -> base.CallResult[container.Dataset]:
# build the list of dataframes from the list of inputs
dataframes = []
metadata = None
for input in inputs:
if isinstance(input, container.DataFrame):
dataframes.append(input)
try:
_, main_dr = d3m_base_utils.get_tabular_resource(input, None)
dataframes.append(main_dr)
metadata = input.metadata
except ValueError as error:
raise exceptions.InvalidArgumentValueError(
"Failure to find tabular resource in dataset"
) from error
if self.hyperparams["column_overlap"] == "exact":
columns_to_handle = dataframes[0].columns
if np.sum(
np.array([np.all(df.columns == columns_to_handle) for df in dataframes])
) != len(dataframes):
raise exceptions.InvalidArgumentValueError(
"Dataframes don't have same columns, cannot exact concat"
)
concated = pd.concat(dataframes, ignore_index=True)
elif self.hyperparams["column_overlap"] == "union":
concated = pd.concat(dataframes, ignore_index=True)
elif self.hyperparams["column_overlap"] == "intersection":
concated = pd.concat(dataframes, join="inner", ignore_index=True)
if self.hyperparams["remove_duplicate_rows"]:
concated.drop_duplicates(
subset="d3mIndex", keep="first", inplace=True, ignore_index=True
)
if metadata is None:
metadata = container.Dataset(
{"learningData": concated.head(1)}, generate_metadata=True
).metadata
outputs = container.Dataset({"learningData": concated}, metadata)
outputs.metadata = outputs.metadata.update(
(metadata_base.ALL_ELEMENTS,), {"dimension": {"length": concated.shape[0]}}
)
return base.CallResult(outputs)
| [
"logging.getLogger",
"d3m.primitive_interfaces.base.CallResult",
"d3m.exceptions.InvalidArgumentValueError",
"d3m.container.Dataset",
"d3m.base.utils.get_tabular_resource",
"os.path.dirname",
"numpy.all",
"pandas.concat"
] | [((957, 984), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (974, 984), False, 'import logging\n'), ((5054, 5109), 'd3m.container.Dataset', 'container.Dataset', (["{'learningData': concated}", 'metadata'], {}), "({'learningData': concated}, metadata)\n", (5071, 5109), False, 'from d3m import container, utils, exceptions\n'), ((5276, 5300), 'd3m.primitive_interfaces.base.CallResult', 'base.CallResult', (['outputs'], {}), '(outputs)\n', (5291, 5300), False, 'from d3m.primitive_interfaces import base, transformer\n'), ((4368, 4408), 'pandas.concat', 'pd.concat', (['dataframes'], {'ignore_index': '(True)'}), '(dataframes, ignore_index=True)\n', (4377, 4408), True, 'import pandas as pd\n'), ((3601, 3649), 'd3m.base.utils.get_tabular_resource', 'd3m_base_utils.get_tabular_resource', (['input', 'None'], {}), '(input, None)\n', (3636, 3649), True, 'from d3m.base import utils as d3m_base_utils\n'), ((4211, 4311), 'd3m.exceptions.InvalidArgumentValueError', 'exceptions.InvalidArgumentValueError', (['"""Dataframes don\'t have same columns, cannot exact concat"""'], {}), '(\n "Dataframes don\'t have same columns, cannot exact concat")\n', (4247, 4311), False, 'from d3m import container, utils, exceptions\n'), ((4492, 4532), 'pandas.concat', 'pd.concat', (['dataframes'], {'ignore_index': '(True)'}), '(dataframes, ignore_index=True)\n', (4501, 4532), True, 'import pandas as pd\n'), ((3797, 3885), 'd3m.exceptions.InvalidArgumentValueError', 'exceptions.InvalidArgumentValueError', (['"""Failure to find tabular resource in dataset"""'], {}), "(\n 'Failure to find tabular resource in dataset')\n", (3833, 3885), False, 'from d3m import container, utils, exceptions\n'), ((4623, 4677), 'pandas.concat', 'pd.concat', (['dataframes'], {'join': '"""inner"""', 'ignore_index': '(True)'}), "(dataframes, join='inner', ignore_index=True)\n", (4632, 4677), True, 'import pandas as pd\n'), ((4092, 4131), 'numpy.all', 'np.all', (['(df.columns == columns_to_handle)'], {}), '(df.columns == columns_to_handle)\n', (4098, 4131), True, 'import numpy as np\n'), ((2862, 2887), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2877, 2887), False, 'import os\n')] |
#
# Conversion utilities for sound files for use with mimi speach to text
#
# This is the robots ears and voice
#
# we go from 44KHZ stereo to 16KHZ mono
#
#
import sys
import os
import wave
from sound import *
import numpy as np
from pylab import *
import struct
import scipy.signal
import numpy as np
from pydub import AudioSegment as am
# make mono for left channel
def stereo2monoral(sig):
monoral = []
for i in range(0,len(sig), 2):
monoral.append(sig[i])
return np.array(monoral)
# make mono from average l and r
def stereo2monoave(sig):
monorave = []
for i in range(0,len(sig), 2):
try:
monorave.append((sig[i]+sig[i+1])/2)
except:
monorave.append(sig[i])
return np.array(monorave)
# make mono from average l and r and deepen
def stereo2monorave2(sig):
monorave = []
for i in range(0,len(sig), 2):
try:
monorave.append(np.sqrt(((sig[i]*sig[i])+(sig[i+1]*sig[i+1])))/2)
except:
monorave.append(sig[i])
return np.array(monorave)
# make mono from average l and r and clean it up
def stereo2monorave3(sig):
monorave = []
for i in range(0,len(sig), 2):
try:
monorave.append((1/(1+np.exp((sig[i+1])*(sig[i])/2))*255)&255)
except:
monorave.append(sig[i])
return np.array(monorave)
# make mono from average l and r and clean it up
def stereo2monorave4(sig):
monorave = []
for i in range(0,len(sig), 2):
try:
monorave.append((1/(1+~np.exp((sig[i+1])*(sig[i])/2))*127)&255)
except:
monorave.append(sig[i])
return np.array(monorave)
# make mono from right channel only
def stereo2monorar(sig):
monorar = []
for i in range(0,len(sig), 2):
monorar.append(sig[i+1])
return np.array(monorar)
# open sound file
def openFile(filename, printInfo=1):
wf = wave.open(filename , "r" )
fs = wf.getframerate() # Sampling frequency
x = wf.readframes(wf.getnframes())
x = frombuffer(x, dtype= "int16") / 32768.0 # -1 - +1Normalized to
if printInfo == 1:
printWaveInfo(wf)
wf.close()
return x, fs, wf.getnchannels(), wf.getnframes()
# close sound file
def saveFile(data, fs, bit, filename, channel=1):
print("channel", channel)
data = [int(v * 32767.0) for v in data]
data = struct.pack("h" * len(data), *data)
w = wave.Wave_write(filename + ".wav")
w.setnchannels(channel)
w.setsampwidth(int(bit/8))
w.setframerate(fs)
w.writeframes(data)
w.close()
# print wave file information
def printWaveInfo(wf):
"""WAVE Get file information"""
print ("Number of channels:", wf.getnchannels())
print ("Sample width:", wf.getsampwidth())
print ("Sampling frequency:", wf.getframerate())
print ("Number of frames:", wf.getnframes())
print ("Parameters:", wf.getparams())
print ("Length (seconds):", float(wf.getnframes()) / wf.getframerate())
# normqalise sound file
def nomalize(x, xmax, xmin, a):
min = 1 / 32768.0
try:
z = a * (x - xmin) / (xmax - xmin)
except ZeroDivisionError:
z = a * (x - xmin) / min
return z
# pre-emphasis FIR filter
def preEmphasis(signal, p):
"""Emphasis filter"""
# Create FIR filter with coefficients (1.0, -p)
return scipy.signal.lfilter([1.0, -p], 1, signal)
# parent path file utility
def parentpath(path=__file__, f=0):
return str('/'.join(os.path.abspath(path).split('/')[0:-1-f]))
if __name__ == "__main__" :
if (len(sys.argv) - 1) <= 0: # throw exception if no file to process
print("<program> filename")
sys.exit()
mySoundFiles = "/mnt/c/linuxmirror/"
fileNam = mySoundFiles + sys.argv[1] # ------- read in the specified sound file ---------
fileNameOnly = sys.argv[1]
if os.path.isfile(fileNam) == False:
fileNam = fileNam + ".wav"
fileNameOnly = sys.argv[1] + ".wav"
if os.path.isfile(fileNam) == False:
print("invalid file name or path %s" % fileNam)
sys.exit(1)
fileNameSplit = fileNameOnly.split(".") # split the file and extension
filePydubDown = fileNameSplit[0] + "_down." + fileNameSplit[1] # define the extensions for each of the output files for each transformation
outPydubDown = mySoundFiles + filePydubDown
fileMonoLeft = fileNameSplit[0] + "_mono_l"
outMonoLeft = mySoundFiles + fileMonoLeft
fileMonoRight = fileNameSplit[0] + "_mono_r"
outMonoRight = mySoundFiles + fileMonoRight
fileMonoAve = fileNameSplit[0] + "_mono_av"
outMonoAve = mySoundFiles + fileMonoAve
fileMonoEffect = fileNameSplit[0] + "_mono_eff"
outMonoEffect = mySoundFiles + fileMonoEffect
fileMonoDeep = fileNameSplit[0] + "_mono_deep"
outMonoDeep = mySoundFiles + fileMonoDeep
fileMonoClean = fileNameSplit[0] + "_mono_clean"
outMonoClean = mySoundFiles + fileMonoClean
fileMonoInvert = fileNameSplit[0] + "_mono_invert"
outMonoInvert = mySoundFiles + fileMonoInvert
# downsampling to 16000 using pydub library function
sound = am.from_file(fileNam, format='wav', frame_rate=44000)
sound = sound.set_frame_rate(16000)
sound.export(outPydubDown, format='wav')
# opening the sound file specified with python wav library
filedata = openFile(outPydubDown)
sig = filedata[0]
fs = filedata[1]
L = filedata[2]
# left channel to mono
sig1 = stereo2monoral(sig);
saveFile(sig1, fs, 16, outMonoLeft, 1)
# effect the sound (sounds deeper)
sig1 = stereo2monorave2(sig);
saveFile(sig1, fs, 16, outMonoDeep, 1)
# clean up the sound (top and bottom) with a sigmoid function
sig1 = stereo2monorave3(sig);
saveFile(sig1, fs, 16, outMonoClean, 1)
# clean up the sound (top and bottom) with a sigmoid function (changed the bias)
sig1 = stereo2monorave4(sig);
saveFile(sig1, fs, 16, outMonoInvert, 1)
# average left and right channel to mono
sig1 = stereo2monoave(sig);
saveFile(sig1, fs, 16, outMonoAve, 1)
# create an effect (shown for purpose) by taking average of averaged mono signal
sig1 = stereo2monoave(sig1);
saveFile(sig1, fs, 16, outMonoEffect, 1)
# right channel to mono
sig1 = stereo2monorar(sig);
saveFile(sig1, fs, 16, outMonoRight, 1)
| [
"wave.open",
"numpy.sqrt",
"wave.Wave_write",
"os.path.isfile",
"numpy.array",
"pydub.AudioSegment.from_file",
"numpy.exp",
"sys.exit",
"os.path.abspath"
] | [((490, 507), 'numpy.array', 'np.array', (['monoral'], {}), '(monoral)\n', (498, 507), True, 'import numpy as np\n'), ((748, 766), 'numpy.array', 'np.array', (['monorave'], {}), '(monorave)\n', (756, 766), True, 'import numpy as np\n'), ((1050, 1068), 'numpy.array', 'np.array', (['monorave'], {}), '(monorave)\n', (1058, 1068), True, 'import numpy as np\n'), ((1354, 1372), 'numpy.array', 'np.array', (['monorave'], {}), '(monorave)\n', (1362, 1372), True, 'import numpy as np\n'), ((1659, 1677), 'numpy.array', 'np.array', (['monorave'], {}), '(monorave)\n', (1667, 1677), True, 'import numpy as np\n'), ((1848, 1865), 'numpy.array', 'np.array', (['monorar'], {}), '(monorar)\n', (1856, 1865), True, 'import numpy as np\n'), ((1944, 1968), 'wave.open', 'wave.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1953, 1968), False, 'import wave\n'), ((2514, 2548), 'wave.Wave_write', 'wave.Wave_write', (["(filename + '.wav')"], {}), "(filename + '.wav')\n", (2529, 2548), False, 'import wave\n'), ((5414, 5467), 'pydub.AudioSegment.from_file', 'am.from_file', (['fileNam'], {'format': '"""wav"""', 'frame_rate': '(44000)'}), "(fileNam, format='wav', frame_rate=44000)\n", (5426, 5467), True, 'from pydub import AudioSegment as am\n'), ((3811, 3821), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3819, 3821), False, 'import sys\n'), ((4027, 4050), 'os.path.isfile', 'os.path.isfile', (['fileNam'], {}), '(fileNam)\n', (4041, 4050), False, 'import os\n'), ((4147, 4170), 'os.path.isfile', 'os.path.isfile', (['fileNam'], {}), '(fileNam)\n', (4161, 4170), False, 'import os\n'), ((4246, 4257), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4254, 4257), False, 'import sys\n'), ((933, 983), 'numpy.sqrt', 'np.sqrt', (['(sig[i] * sig[i] + sig[i + 1] * sig[i + 1])'], {}), '(sig[i] * sig[i] + sig[i + 1] * sig[i + 1])\n', (940, 983), True, 'import numpy as np\n'), ((3565, 3586), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (3580, 3586), False, 'import os\n'), ((1246, 1277), 'numpy.exp', 'np.exp', (['(sig[i + 1] * sig[i] / 2)'], {}), '(sig[i + 1] * sig[i] / 2)\n', (1252, 1277), True, 'import numpy as np\n'), ((1551, 1582), 'numpy.exp', 'np.exp', (['(sig[i + 1] * sig[i] / 2)'], {}), '(sig[i + 1] * sig[i] / 2)\n', (1557, 1582), True, 'import numpy as np\n')] |
#!/usr/bin/python
import numpy as np
import wxmplot.interactive as wi
x = np.arange(0.0,10.0,0.1)
y = np.sin(2*x)/(x+2)
win1 = wi.plot(x, y, title='Window 1', xlabel='X (mm)', win=1)
win2 = wi.plot(x, np.cos(x-4), title='Window 2', xlabel='X (mm)', win=2)
pos = win2.GetPosition()
siz = win1.GetSize()
win2.SetPosition((pos[0]+int(siz[0]*0.8), pos[1]+10))
| [
"wxmplot.interactive.plot",
"numpy.sin",
"numpy.cos",
"numpy.arange"
] | [((75, 100), 'numpy.arange', 'np.arange', (['(0.0)', '(10.0)', '(0.1)'], {}), '(0.0, 10.0, 0.1)\n', (84, 100), True, 'import numpy as np\n'), ((129, 184), 'wxmplot.interactive.plot', 'wi.plot', (['x', 'y'], {'title': '"""Window 1"""', 'xlabel': '"""X (mm)"""', 'win': '(1)'}), "(x, y, title='Window 1', xlabel='X (mm)', win=1)\n", (136, 184), True, 'import wxmplot.interactive as wi\n'), ((103, 116), 'numpy.sin', 'np.sin', (['(2 * x)'], {}), '(2 * x)\n', (109, 116), True, 'import numpy as np\n'), ((204, 217), 'numpy.cos', 'np.cos', (['(x - 4)'], {}), '(x - 4)\n', (210, 217), True, 'import numpy as np\n')] |
# Create your first MLP in Keras
from keras.models import Sequential
from keras.layers import Dense
import numpy, json
# fix random seed for reproducibility
numpy.random.seed(7)
f = open("matlav", "r")
inp = json.loads(f.read())
f = open("matlov", "r")
outp = json.loads(f.read())
# load pima indians dataset
#dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = numpy.array(inp)#dataset[:,0:8]
Y = numpy.array(outp)#dataset[:,8]
# create model
model = Sequential()
model.add(Dense(128, input_dim=254, activation='relu')) #TODO
model.add(Dense(128, activation='relu'))
model.add(Dense(1, activation='linear'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
# Fit the model
model.fit(X, Y, epochs=1500, batch_size=10)
predictions = model.predict(X)
# evaluate the model
#scores = model.evaluate(X, Y)
#print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print(model.predict(X))
model.save_weights("weights.hdf5")
open("model.json", "w+").write(model.to_json())
# Recover model:
# from keras.models import model_from_json
# model = model_from_json(open("model.json", "r").read())
# model.load_weights("weights.hdf5", by_name=False)
| [
"keras.layers.Dense",
"numpy.array",
"numpy.random.seed",
"keras.models.Sequential"
] | [((162, 182), 'numpy.random.seed', 'numpy.random.seed', (['(7)'], {}), '(7)\n', (179, 182), False, 'import numpy, json\n'), ((453, 469), 'numpy.array', 'numpy.array', (['inp'], {}), '(inp)\n', (464, 469), False, 'import numpy, json\n'), ((490, 507), 'numpy.array', 'numpy.array', (['outp'], {}), '(outp)\n', (501, 507), False, 'import numpy, json\n'), ((554, 566), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (564, 566), False, 'from keras.models import Sequential\n'), ((578, 622), 'keras.layers.Dense', 'Dense', (['(128)'], {'input_dim': '(254)', 'activation': '"""relu"""'}), "(128, input_dim=254, activation='relu')\n", (583, 622), False, 'from keras.layers import Dense\n'), ((641, 670), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (646, 670), False, 'from keras.layers import Dense\n'), ((683, 712), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (688, 712), False, 'from keras.layers import Dense\n')] |
import numpy as np
class _BaseMetric:
"""
Abstract metric class
"""
def __init__(self, **kwargs):
allowed_kwargs = {
'factor',
'levelset',
'epsilon',
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood: ', kwarg)
self.factor = kwargs.get('factor', 2)
self.epsilon = kwargs.get('epsilon', 1.e-4)
self.levelset = kwargs.get('levelset')
if self.levelset is None:
raise ValueError('Argument levelset must be given')
self.object_mask = self.levelset < 0
def _evaluate(self, pred, obs):
raise NotImplementedError()
def evaluate(self, pred, obs):
return self._evaluate(pred, obs)
class Fraction(_BaseMetric):
"""
Fraction metric: FAC2, FAC5 etc
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _evaluate(self, pred, obs):
target_area = np.logical_and(obs > 0., self.object_mask )
fraction = np.where(target_area, pred/obs, 0)
count = np.logical_and( fraction >= 1/self.factor, fraction <= self.factor )
factor = np.sum(count) / np.sum(target_area)
return factor
class FB(_BaseMetric):
"""
Best: FB == 0
Bad: FB > 0 under estimateion, FB < 0 over estimation
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _evaluate(self, pred, obs):
pred = np.where(self.object_mask, pred, 0)
obs = np.where(self.object_mask, obs, 0)
return 2 * (np.mean(obs) - np.mean(pred)) / (np.mean(obs) + np.mean(pred))
class NAD(_BaseMetric):
"""
threshold-based normalized absolute difference (NAD)
Best: NAD == 0
Bad: NAD >> 0
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _evaluate(self, pred, obs):
# Firstly, exclude inside objects
pred = pred[self.object_mask]
obs = obs[self.object_mask]
# negative values are considered as zeros
self.valid = np.logical_and(pred > 0, obs > 0)
self.false_positive = np.logical_and(pred > 0, obs <= 0)
self.false_negative = np.logical_and(pred <= 0, obs > 0)
self.zero_zero = np.logical_and(pred <= 0, obs <= 0)
A_F = np.sum(self.false_positive) + np.sum(self.false_negative)
A_OV = np.sum(self.valid) + np.sum(self.zero_zero)
return A_F / (A_F + A_OV)
class MG(_BaseMetric):
"""
Geometric Mean
Best: MG == 1
Bad: MG << 1
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _evaluate(self, pred, obs):
# Firstly, exclude inside objects
pred = np.where(self.object_mask, pred, 0)
obs = np.where(self.object_mask, obs, 0)
pred = np.log(pred, where=pred>0, out=np.zeros_like(pred))
obs = np.log(obs, where=obs>0, out=np.zeros_like(obs))
return np.exp(np.mean(pred) - np.mean(obs))
class VG(_BaseMetric):
"""
Geometric Variance
Best: VG == 1
Bad: VG >> 1
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _evaluate(self, pred, obs):
# Firstly, exclude inside objects
pred = np.where(self.object_mask, pred, 0)
obs = np.where(self.object_mask, obs, 0)
pred = np.log(pred, where=pred>0, out=np.zeros_like(pred))
obs = np.log(obs, where=obs>0, out=np.zeros_like(obs))
return np.exp(np.mean(pred - obs)**2)
def get_metric(name):
METRICS = {
'FAC2': Fraction,
'FAC5': Fraction,
'MG': MG,
'VG': VG,
'NAD': NAD,
'FB': FB,
}
for n, m in METRICS.items():
if n.lower() == name.lower():
return m
raise ValueError(f'metric {name} is not defined')
| [
"numpy.mean",
"numpy.logical_and",
"numpy.where",
"numpy.sum",
"numpy.zeros_like"
] | [((1091, 1134), 'numpy.logical_and', 'np.logical_and', (['(obs > 0.0)', 'self.object_mask'], {}), '(obs > 0.0, self.object_mask)\n', (1105, 1134), True, 'import numpy as np\n'), ((1155, 1191), 'numpy.where', 'np.where', (['target_area', '(pred / obs)', '(0)'], {}), '(target_area, pred / obs, 0)\n', (1163, 1191), True, 'import numpy as np\n'), ((1207, 1275), 'numpy.logical_and', 'np.logical_and', (['(fraction >= 1 / self.factor)', '(fraction <= self.factor)'], {}), '(fraction >= 1 / self.factor, fraction <= self.factor)\n', (1221, 1275), True, 'import numpy as np\n'), ((1602, 1637), 'numpy.where', 'np.where', (['self.object_mask', 'pred', '(0)'], {}), '(self.object_mask, pred, 0)\n', (1610, 1637), True, 'import numpy as np\n'), ((1653, 1687), 'numpy.where', 'np.where', (['self.object_mask', 'obs', '(0)'], {}), '(self.object_mask, obs, 0)\n', (1661, 1687), True, 'import numpy as np\n'), ((2217, 2250), 'numpy.logical_and', 'np.logical_and', (['(pred > 0)', '(obs > 0)'], {}), '(pred > 0, obs > 0)\n', (2231, 2250), True, 'import numpy as np\n'), ((2281, 2315), 'numpy.logical_and', 'np.logical_and', (['(pred > 0)', '(obs <= 0)'], {}), '(pred > 0, obs <= 0)\n', (2295, 2315), True, 'import numpy as np\n'), ((2347, 2381), 'numpy.logical_and', 'np.logical_and', (['(pred <= 0)', '(obs > 0)'], {}), '(pred <= 0, obs > 0)\n', (2361, 2381), True, 'import numpy as np\n'), ((2407, 2442), 'numpy.logical_and', 'np.logical_and', (['(pred <= 0)', '(obs <= 0)'], {}), '(pred <= 0, obs <= 0)\n', (2421, 2442), True, 'import numpy as np\n'), ((2883, 2918), 'numpy.where', 'np.where', (['self.object_mask', 'pred', '(0)'], {}), '(self.object_mask, pred, 0)\n', (2891, 2918), True, 'import numpy as np\n'), ((2934, 2968), 'numpy.where', 'np.where', (['self.object_mask', 'obs', '(0)'], {}), '(self.object_mask, obs, 0)\n', (2942, 2968), True, 'import numpy as np\n'), ((3417, 3452), 'numpy.where', 'np.where', (['self.object_mask', 'pred', '(0)'], {}), '(self.object_mask, pred, 0)\n', (3425, 3452), True, 'import numpy as np\n'), ((3468, 3502), 'numpy.where', 'np.where', (['self.object_mask', 'obs', '(0)'], {}), '(self.object_mask, obs, 0)\n', (3476, 3502), True, 'import numpy as np\n'), ((1294, 1307), 'numpy.sum', 'np.sum', (['count'], {}), '(count)\n', (1300, 1307), True, 'import numpy as np\n'), ((1310, 1329), 'numpy.sum', 'np.sum', (['target_area'], {}), '(target_area)\n', (1316, 1329), True, 'import numpy as np\n'), ((2466, 2493), 'numpy.sum', 'np.sum', (['self.false_positive'], {}), '(self.false_positive)\n', (2472, 2493), True, 'import numpy as np\n'), ((2496, 2523), 'numpy.sum', 'np.sum', (['self.false_negative'], {}), '(self.false_negative)\n', (2502, 2523), True, 'import numpy as np\n'), ((2539, 2557), 'numpy.sum', 'np.sum', (['self.valid'], {}), '(self.valid)\n', (2545, 2557), True, 'import numpy as np\n'), ((2560, 2582), 'numpy.sum', 'np.sum', (['self.zero_zero'], {}), '(self.zero_zero)\n', (2566, 2582), True, 'import numpy as np\n'), ((1742, 1754), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (1749, 1754), True, 'import numpy as np\n'), ((1757, 1770), 'numpy.mean', 'np.mean', (['pred'], {}), '(pred)\n', (1764, 1770), True, 'import numpy as np\n'), ((3016, 3035), 'numpy.zeros_like', 'np.zeros_like', (['pred'], {}), '(pred)\n', (3029, 3035), True, 'import numpy as np\n'), ((3083, 3101), 'numpy.zeros_like', 'np.zeros_like', (['obs'], {}), '(obs)\n', (3096, 3101), True, 'import numpy as np\n'), ((3126, 3139), 'numpy.mean', 'np.mean', (['pred'], {}), '(pred)\n', (3133, 3139), True, 'import numpy as np\n'), ((3142, 3154), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (3149, 3154), True, 'import numpy as np\n'), ((3550, 3569), 'numpy.zeros_like', 'np.zeros_like', (['pred'], {}), '(pred)\n', (3563, 3569), True, 'import numpy as np\n'), ((3617, 3635), 'numpy.zeros_like', 'np.zeros_like', (['obs'], {}), '(obs)\n', (3630, 3635), True, 'import numpy as np\n'), ((3660, 3679), 'numpy.mean', 'np.mean', (['(pred - obs)'], {}), '(pred - obs)\n', (3667, 3679), True, 'import numpy as np\n'), ((1709, 1721), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (1716, 1721), True, 'import numpy as np\n'), ((1724, 1737), 'numpy.mean', 'np.mean', (['pred'], {}), '(pred)\n', (1731, 1737), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 13:05:37 2019
@author: josel
"""
from __future__ import division, print_function
"""Lee archivos de datos exportados del Vicon Nexus"""
import numpy as np
import pandas as pd
import xarray as xr
#import scipy.signal
__author__ = '<NAME>'
__version__ = 'v.2.2.0'
__date__ = '29/03/2021'
"""
Modificaciones:
29/03/2021, v2.1.1
- Incluido parámetro 'header_format' para que devuelva el encabezado como 'flat' en una sola línea (variable_x, variable_y, ...) o en dos líneas ((variable,x), (variable,y), ...).
28/03/2021, v2.1.1
- Mejorada lectura con Pandas. Ahora puede cargar archivos que empiezan sin datos en las primeras líneas.
21/03/2021, v2.1.0
- Cambiado lector del bloque de archivos por pd.read_csv con número de columnas delimitado a los que carga en las variables (quitando los de velocidad y aceleración)
- Solucionado fallo al leer frecuencia cuando terminaba la línea rellenando con separadores (como al exportar en Excel)
10/01/2021, v2.0.1
- Ajustado para que pueda devolver xArray con Model Outputs
13/12/2020, v2.0.0
- Con el argumento formatoxArray se puede pedir que devuelva los datos en formato xArray
"""
def read_vicon_csv(nombreArchivo, nomBloque='Model Outputs', separador=',', returnFrec=False, formatoxArray=False, header_format='flat'):
"""
Parameters
----------
versión : v2.2.0
nombreArchivo : string
ruta del archivo a abrir
nomBloque : string
tipo de datos a leer en el archivo original.
'Model Outputs', 'Trajectories' o 'Devices'
separador : string
caracter separador de los datos
returnFrec : bool
si es True devuelve un int con la frecuencia de muestreo
formatoxArray : bool
si es true devuelve los datos en formato xArray
header_format : str
'flat': devuelve el encabezado en una línea (por defecto)
otra cosa: devuelve el encabezaco en dos líneas (var y coord)
Returns
-------
data : datos leidos en formato DataFrame de Pandas o DataArray de xArray.
frec: frecuencia de registro de los datos.
Examples
--------
>>> dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
>>> dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
>>> #Con formato dataarray de xArray
>>> daDatos = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', formatoxArray=True)
"""
with open(nombreArchivo, mode='rt') as f:
numLinea=0
#busca etiqueta del inicio del bloque
linea = f.readline()
while nomBloque not in linea:
if linea == '':
raise Exception('No se ha encontrado el encabezado')
numLinea+=1
linea = f.readline()
inicioBloque = numLinea
#Lo que viene detrás de la etiqueta es la frecuencia
linea = f.readline()
frecuencia = int(linea.replace(separador,'')) #quita el separador para los casos en los que el archivo ha sido guardado con Excel (completa línea con separador)
#Carga el nombre de las columnas
#linea = f.readline()
nomColsVar = str(f.readline()[:-1]).split(separador) #nombreVariables
nomCols = str(f.readline()[:-1]).split(separador) #nombre coordenadas X,Y,Z.
#nomCols = [s.lower() for s in nomCols] # Lo fuerza a minúsculas
#busca etiqueta del final del bloque
while linea!='\n':
if linea == '':
raise Exception('No se ha encontrado el final del bloque')
numLinea+=1
#print('Linea '+ str(numLinea))
linea = f.readline()
finBloque = numLinea-1 #quita 1 para descontar la línea vacía
#Cuenta el nº de líneas totales
finArchivo=0
with open(nombreArchivo, mode='rt') as f:
for i in f:
finArchivo+=1
#primero asigna los nombres según el propio archivo
nomVars=['Frame', 'Sub Frame']
for i in range(2,len(nomCols),3):
if "'" not in nomCols[i] and "''" not in nomCols[i]: #elimina las posibles columnas de velocidad y aceleración
nomVars.append(nomColsVar[i].split(':')[1]+'_' + nomCols[i])#X
nomVars.append(nomColsVar[i].split(':')[1]+'_' + nomCols[i+1])#Y
nomVars.append(nomColsVar[i].split(':')[1]+'_' + nomCols[i+2])#Z
# [i for i in nomColsVar if "'" in i]
# nomColsVar = [i for i in nomColsVar if "'" not in i]
#carga todos los datos
#CON GENFROMTXT FALLA SI NO EMPIEZA LA PRIMERA LÍNEA CON DATOS
#provisional= np.genfromtxt(nombreArchivo, skip_header= inicioBloque+5, max_rows=finBloque-inicioBloque-1, delimiter=separador, missing_values='', filling_values=np.nan, invalid_raise=True)
#provisional=provisional[:, :len(nomVars)] #recorta solo hasta las variables
#Convierte los datos en pandas dataframe. Pasa solo los que no son de velocidad o aceleración
#dfReturn = pd.DataFrame(provisional[:, :len(nomVars)], columns=nomVars)
#dfReturn = dfReturn.iloc[:, :len(nomVars)] #se queda solo con las columnas de las variables, quita las de velocidad si las hay
#Con pandas directamente funciona (para evitar error si primera línea no son datos, lee la fina de las unidades y luego la quita)
dfReturn = pd.read_csv(nombreArchivo, delimiter=separador, header=None, skiprows=inicioBloque+4, skipfooter=finArchivo-finBloque-5, usecols=range(len(nomVars)), engine='python')
dfReturn = dfReturn.drop(index=0).reset_index(drop=True).astype(float) #borra la primera fila, que contiene las unidades
#Nombra encabezado
var=['_'.join(s.split('_')[:-1]) for s in nomVars[:len(nomVars)]] #gestiona si la variable tiene separador '_', lo mantiene
coord=[s.split(':')[-1] for s in nomCols[:len(nomVars)]]
dfReturn.columns=pd.MultiIndex.from_tuples(list(zip(*[var,coord])), names=['Variable', 'Coord'])
#dfReturn.columns=[var, coord]
#dfReturn.columns.set_names(names=['Variable', 'Coord'], level=[0,1], inplace=True)
if header_format=='flat':
dfReturn.columns = dfReturn.columns.map('_'.join).str.strip()
# #Elimina las columnas de velocidad y aceleración, si las hay
# borrarColsVA = dfReturn.filter(regex='|'.join(["'", "''"])).columns
# dfReturn = dfReturn.drop(columns=borrarColsVA)
#Si hace falta lo pasa a xArray
if formatoxArray:
daReturn=xr.DataArray()
#transforma los datos en xarray
x=dfReturn.filter(regex='|'.join(['_x','_X'])).to_numpy().T
y=dfReturn.filter(regex='|'.join(['_y','_Y'])).to_numpy().T
z=dfReturn.filter(regex='|'.join(['_z','_Z'])).to_numpy().T
data=np.stack([x,y,z])
#Quita el identificador de la coordenada del final
canales = dfReturn.filter(regex='|'.join(['_x','_X'])).columns.str.rstrip('|'.join(['_x','_X']))
n_frames = x.shape[1]
channels = canales
time = np.arange(start=0, stop=n_frames / frecuencia, step=1 / frecuencia)
coords = {}
coords['axis'] = ['x', 'y', 'z']
coords['channel'] = channels
coords['time'] = time
daReturn=xr.DataArray(
data=data,
dims=('axis', 'channel', 'time'),
coords=coords,
name=nomBloque,
attrs={'Frec':frecuencia}
#**kwargs,
)
if formatoxArray and returnFrec:
return dfReturn, daReturn, frecuencia
elif formatoxArray:
return dfReturn, daReturn
elif returnFrec:
return dfReturn, frecuencia
else:
return dfReturn
# =============================================================================
# %%
# =============================================================================
if __name__ == '__main__':
from pathlib import Path
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Con Models al final
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN_ModeloAlFinal.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Sin fila inicial en blanco
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN_SinFilaBlancoInicial.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Solo bloque modelos
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN_2.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Con hueco muy grande al inicio
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconConHuecoInicio_S27_WHT_T2_L01.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
dfDatos['R5Meta_Z'].plot()
#Con formato dataarray de xArray
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos, daDatos = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', formatoxArray=True)
dfDatos['Right_Toe_Z'].plot()
daDatos.sel(channel='Right_Toe', axis='z').plot.line()
dfDatos, daDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs', formatoxArray=True)
dfDatos['AngArtLKnee_x'].plot()
daDatos.sel(channel='AngArtLKnee', axis='x').plot.line()
#Archivo con huecos
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconConHuecos_S01_WHF_T1_L04.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
dfDatos.plot()
#prueba con encabezado multiindex
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatosFlat = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatosMulti = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs', header_format='multi')
dfDatosFlat[['AngArtLKnee_x','AngArtLKnee_y','AngArtLKnee_z']].plot()
dfDatosMulti['AngArtLKnee'].plot()
dfDatosMulti.loc[:, (slice(None), 'x')].plot() #todas las variables de una misma coordenada
dfDatosFlat = read_vicon_csv(nombreArchivo, nomBloque='Trajectories')
dfDatosMulti = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', header_format='multi')
dfDatosFlat[['Right_Toe_X','Right_Toe_Y','Right_Toe_Z']].plot()
dfDatosMulti['Right_Toe'].plot()
dfDatosMulti.loc[:, (slice(None), 'Z')].plot() #todas las variables de una misma coordenada
| [
"numpy.stack",
"numpy.arange",
"xarray.DataArray",
"pathlib.Path"
] | [((8619, 8637), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (8623, 8637), False, 'from pathlib import Path\n'), ((9001, 9019), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (9005, 9019), False, 'from pathlib import Path\n'), ((9398, 9416), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (9402, 9416), False, 'from pathlib import Path\n'), ((9770, 9788), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (9774, 9788), False, 'from pathlib import Path\n'), ((10145, 10163), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (10149, 10163), False, 'from pathlib import Path\n'), ((10471, 10489), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (10475, 10489), False, 'from pathlib import Path\n'), ((11068, 11086), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (11072, 11086), False, 'from pathlib import Path\n'), ((11383, 11401), 'pathlib.Path', 'Path', (['ruta_Archivo'], {}), '(ruta_Archivo)\n', (11387, 11401), False, 'from pathlib import Path\n'), ((6910, 6924), 'xarray.DataArray', 'xr.DataArray', ([], {}), '()\n', (6922, 6924), True, 'import xarray as xr\n'), ((7193, 7212), 'numpy.stack', 'np.stack', (['[x, y, z]'], {}), '([x, y, z])\n', (7201, 7212), True, 'import numpy as np\n'), ((7479, 7546), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(n_frames / frecuencia)', 'step': '(1 / frecuencia)'}), '(start=0, stop=n_frames / frecuencia, step=1 / frecuencia)\n', (7488, 7546), True, 'import numpy as np\n'), ((7707, 7827), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'data', 'dims': "('axis', 'channel', 'time')", 'coords': 'coords', 'name': 'nomBloque', 'attrs': "{'Frec': frecuencia}"}), "(data=data, dims=('axis', 'channel', 'time'), coords=coords,\n name=nomBloque, attrs={'Frec': frecuencia})\n", (7719, 7827), True, 'import xarray as xr\n')] |
import numpy as np
import matplotlib.pyplot as plt
x=np.arange(1,10,2)
y=x
plt.plot(x,y,linewidth=2.0,linestyle=':',color='red',alpha=0.5,marker='o')
plt.plot(x,y+3,linewidth=2.0,linestyle='-',color='blue',alpha=0.5,marker='x')
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.legend(['line1','line2'],loc='best')
plt.grid(True)
plt.savefig('output/linegraph.jpeg')
plt.show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((57, 76), 'numpy.arange', 'np.arange', (['(1)', '(10)', '(2)'], {}), '(1, 10, 2)\n', (66, 76), True, 'import numpy as np\n'), ((81, 166), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'linewidth': '(2.0)', 'linestyle': '""":"""', 'color': '"""red"""', 'alpha': '(0.5)', 'marker': '"""o"""'}), "(x, y, linewidth=2.0, linestyle=':', color='red', alpha=0.5, marker='o'\n )\n", (89, 166), True, 'import matplotlib.pyplot as plt\n'), ((157, 246), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(y + 3)'], {'linewidth': '(2.0)', 'linestyle': '"""-"""', 'color': '"""blue"""', 'alpha': '(0.5)', 'marker': '"""x"""'}), "(x, y + 3, linewidth=2.0, linestyle='-', color='blue', alpha=0.5,\n marker='x')\n", (165, 246), True, 'import matplotlib.pyplot as plt\n'), ((236, 256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x axis"""'], {}), "('x axis')\n", (246, 256), True, 'import matplotlib.pyplot as plt\n'), ((258, 278), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y axis"""'], {}), "('y axis')\n", (268, 278), True, 'import matplotlib.pyplot as plt\n'), ((280, 322), 'matplotlib.pyplot.legend', 'plt.legend', (["['line1', 'line2']"], {'loc': '"""best"""'}), "(['line1', 'line2'], loc='best')\n", (290, 322), True, 'import matplotlib.pyplot as plt\n'), ((322, 336), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (330, 336), True, 'import matplotlib.pyplot as plt\n'), ((338, 374), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/linegraph.jpeg"""'], {}), "('output/linegraph.jpeg')\n", (349, 374), True, 'import matplotlib.pyplot as plt\n'), ((376, 386), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (384, 386), True, 'import matplotlib.pyplot as plt\n')] |
from config import *
import pickle
import numpy as np
def prepare_mano_model():
"""
Convert the official MANO model into compatible format with this project.
"""
with open(OFFICIAL_MANO_PATH, 'rb') as f:
data = pickle.load(f, encoding='latin1')
params = {
'pose_pca_basis': np.array(data['hands_components']),
'pose_pca_mean': np.array(data['hands_mean']),
'J_regressor': data['J_regressor'].toarray(),
'skinning_weights': np.array(data['weights']),
# pose blend shape
'mesh_pose_basis': np.array(data['posedirs']),
'mesh_shape_basis': np.array(data['shapedirs']),
'mesh_template': np.array(data['v_template']),
'faces': np.array(data['f']),
'parents': data['kintree_table'][0].tolist(),
}
params['parents'][0] = None
with open(MANO_MODEL_PATH, 'wb') as f:
pickle.dump(params, f)
def prepare_smpl_model():
"""
Convert the official SMPL model into compatible format with this project.
"""
with open(OFFICIAL_SMPL_PATH, 'rb') as f:
data = pickle.load(f, encoding='latin1')
params = {
# SMPL does not provide pose PCA
'pose_pca_basis': np.eye(23 * 3),
'pose_pca_mean': np.zeros(23 * 3),
'J_regressor': data['J_regressor'].toarray(),
'skinning_weights': np.array(data['weights']),
# pose blend shape
'mesh_pose_basis': np.array(data['posedirs']),
'mesh_shape_basis': np.array(data['shapedirs']),
'mesh_template': np.array(data['v_template']),
'faces': np.array(data['f']),
'parents': data['kintree_table'][0].tolist(),
}
params['parents'][0] = None
with open(SMPL_MODEL_PATH, 'wb') as f:
pickle.dump(params, f)
def prepare_smplh_model():
"""
Convert the official SMPLH model into compatible format with this project.
"""
data = np.load(OFFICIAL_SMPLH_PATH)
params = {
# SMPL does not provide pose PCA
'pose_pca_basis': np.eye(51 * 3),
'pose_pca_mean': np.zeros(51 * 3),
'J_regressor': data['J_regressor'],
'skinning_weights': np.array(data['weights']),
# pose blend shape
'mesh_pose_basis': np.array(data['posedirs']),
'mesh_shape_basis': np.array(data['shapedirs']),
'mesh_template': np.array(data['v_template']),
'faces': np.array(data['f']),
'parents': data['kintree_table'][0].tolist(),
}
params['parents'][0] = None
with open(SMPLH_MODEL_PATH, 'wb') as f:
pickle.dump(params, f)
if __name__ == '__main__':
prepare_smplh_model()
| [
"numpy.eye",
"pickle.dump",
"pickle.load",
"numpy.array",
"numpy.zeros",
"numpy.load"
] | [((1770, 1798), 'numpy.load', 'np.load', (['OFFICIAL_SMPLH_PATH'], {}), '(OFFICIAL_SMPLH_PATH)\n', (1777, 1798), True, 'import numpy as np\n'), ((225, 258), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (236, 258), False, 'import pickle\n'), ((294, 328), 'numpy.array', 'np.array', (["data['hands_components']"], {}), "(data['hands_components'])\n", (302, 328), True, 'import numpy as np\n'), ((351, 379), 'numpy.array', 'np.array', (["data['hands_mean']"], {}), "(data['hands_mean'])\n", (359, 379), True, 'import numpy as np\n'), ((455, 480), 'numpy.array', 'np.array', (["data['weights']"], {}), "(data['weights'])\n", (463, 480), True, 'import numpy as np\n'), ((528, 554), 'numpy.array', 'np.array', (["data['posedirs']"], {}), "(data['posedirs'])\n", (536, 554), True, 'import numpy as np\n'), ((580, 607), 'numpy.array', 'np.array', (["data['shapedirs']"], {}), "(data['shapedirs'])\n", (588, 607), True, 'import numpy as np\n'), ((630, 658), 'numpy.array', 'np.array', (["data['v_template']"], {}), "(data['v_template'])\n", (638, 658), True, 'import numpy as np\n'), ((673, 692), 'numpy.array', 'np.array', (["data['f']"], {}), "(data['f'])\n", (681, 692), True, 'import numpy as np\n'), ((823, 845), 'pickle.dump', 'pickle.dump', (['params', 'f'], {}), '(params, f)\n', (834, 845), False, 'import pickle\n'), ((1017, 1050), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (1028, 1050), False, 'import pickle\n'), ((1123, 1137), 'numpy.eye', 'np.eye', (['(23 * 3)'], {}), '(23 * 3)\n', (1129, 1137), True, 'import numpy as np\n'), ((1160, 1176), 'numpy.zeros', 'np.zeros', (['(23 * 3)'], {}), '(23 * 3)\n', (1168, 1176), True, 'import numpy as np\n'), ((1252, 1277), 'numpy.array', 'np.array', (["data['weights']"], {}), "(data['weights'])\n", (1260, 1277), True, 'import numpy as np\n'), ((1325, 1351), 'numpy.array', 'np.array', (["data['posedirs']"], {}), "(data['posedirs'])\n", (1333, 1351), True, 'import numpy as np\n'), ((1377, 1404), 'numpy.array', 'np.array', (["data['shapedirs']"], {}), "(data['shapedirs'])\n", (1385, 1404), True, 'import numpy as np\n'), ((1427, 1455), 'numpy.array', 'np.array', (["data['v_template']"], {}), "(data['v_template'])\n", (1435, 1455), True, 'import numpy as np\n'), ((1470, 1489), 'numpy.array', 'np.array', (["data['f']"], {}), "(data['f'])\n", (1478, 1489), True, 'import numpy as np\n'), ((1620, 1642), 'pickle.dump', 'pickle.dump', (['params', 'f'], {}), '(params, f)\n', (1631, 1642), False, 'import pickle\n'), ((1871, 1885), 'numpy.eye', 'np.eye', (['(51 * 3)'], {}), '(51 * 3)\n', (1877, 1885), True, 'import numpy as np\n'), ((1908, 1924), 'numpy.zeros', 'np.zeros', (['(51 * 3)'], {}), '(51 * 3)\n', (1916, 1924), True, 'import numpy as np\n'), ((1990, 2015), 'numpy.array', 'np.array', (["data['weights']"], {}), "(data['weights'])\n", (1998, 2015), True, 'import numpy as np\n'), ((2063, 2089), 'numpy.array', 'np.array', (["data['posedirs']"], {}), "(data['posedirs'])\n", (2071, 2089), True, 'import numpy as np\n'), ((2115, 2142), 'numpy.array', 'np.array', (["data['shapedirs']"], {}), "(data['shapedirs'])\n", (2123, 2142), True, 'import numpy as np\n'), ((2165, 2193), 'numpy.array', 'np.array', (["data['v_template']"], {}), "(data['v_template'])\n", (2173, 2193), True, 'import numpy as np\n'), ((2208, 2227), 'numpy.array', 'np.array', (["data['f']"], {}), "(data['f'])\n", (2216, 2227), True, 'import numpy as np\n'), ((2359, 2381), 'pickle.dump', 'pickle.dump', (['params', 'f'], {}), '(params, f)\n', (2370, 2381), False, 'import pickle\n')] |
import torch
from torch.distributions import Categorical
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from rlcard.utils.utils import remove_illegal_torch
class PolicyNetwork(torch.nn.Module):
def __init__(self,
input_size,
output_size):
super().__init__()
self.input_size = input_size
self.linear1 = nn.Linear(input_size, 32)
self.linear2 = nn.Linear(32, output_size)
def forward(self, x):
x = x.view((-1, self.input_size))
out = F.relu(self.linear1(x))
# TODO check that the dimension along which to do compute the softmax is correct
out = F.softmax(self.linear2(out), dim=1)
return out
class Policy:
eps = np.finfo(np.float32).eps.item()
def __init__(self,
action_num,
state_shape,
learning_rate,
discount_factor,
device):
self.discount_factor = discount_factor
self.device = device
self._init_policy_network(action_num, state_shape)
self.optimizer = torch.optim.Adam(self.policy_network.parameters(), lr=learning_rate)
self.log_probs = []
self.rewards = []
def _init_policy_network(self, action_num, state_shape):
policy_network = PolicyNetwork(np.prod(state_shape), action_num)
self.policy_network = policy_network.to(self.device)
def predict(self, observed_state):
state = torch.from_numpy(observed_state).float().to(self.device) # To check that the shape is correct
probs = self.policy_network(state)
return probs
def terminate_episode(self):
G = 0
returns = []
for r in self.rewards[::-1]:
G = r + self.discount_factor * G
returns.insert(0, G)
returns = torch.tensor(returns)
if returns.std() > self.eps:
returns = (returns - returns.mean()) / (returns.std())
# TODO: check that this operation is correctly done element-wise
# TODO: verify that the data are in the correct device - still not clear to me
policy_loss = - (torch.cat(self.log_probs) * returns).sum()
self.optimizer.zero_grad()
policy_loss.backward()
self.optimizer.step()
self.rewards = []
self.log_probs = []
# TODO: verify this loss is the real training loss
return policy_loss
class ReinforceAgent:
def __init__(self,
scope,
action_num,
state_shape,
discount_factor=0.99,
learning_rate=1e-5,
device=None):
# TODO: check that it is correct to have use_raw == False
self.use_raw = False
self.scope = scope
self._init_device(device)
self.policy = Policy(action_num=action_num, state_shape=state_shape, learning_rate=learning_rate,
discount_factor=discount_factor, device=device)
def _init_device(self, device):
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = device
def feed(self, ts):
(_, _, reward, _, _) = tuple(ts)
self.policy.rewards.append(reward)
def step(self, state):
probs = self.policy.predict(state["obs"])
# TODO: check removing the actions like this is fine for the computation of the gradient
probs = remove_illegal_torch(probs, state["legal_actions"])
m = Categorical(probs)
action = m.sample()
self.policy.log_probs.append(m.log_prob(action))
return action.item()
def eval_step(self, state):
with torch.no_grad():
probs = self.policy.predict(state["obs"])
probs = remove_illegal_torch(probs, state["legal_actions"])
# TODO: could be also good to keep the policy stochastic also at evaluation
best_action = np.argmax(probs)
return best_action, probs
def train(self):
return self.policy.terminate_episode()
def get_state_dict(self):
''' Get the state dict to save models
Returns:
(dict): A dict of model states
'''
policy_key = self.scope + 'policy_network'
policy = self.policy.policy_network.state_dict()
return {policy_key: policy}
def load(self):
raise NotImplementedError()
| [
"numpy.prod",
"torch.distributions.Categorical",
"numpy.argmax",
"rlcard.utils.utils.remove_illegal_torch",
"torch.from_numpy",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.Linear",
"numpy.finfo",
"torch.no_grad",
"torch.cat"
] | [((393, 418), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(32)'], {}), '(input_size, 32)\n', (402, 418), True, 'import torch.nn as nn\n'), ((442, 468), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'output_size'], {}), '(32, output_size)\n', (451, 468), True, 'import torch.nn as nn\n'), ((1863, 1884), 'torch.tensor', 'torch.tensor', (['returns'], {}), '(returns)\n', (1875, 1884), False, 'import torch\n'), ((3535, 3586), 'rlcard.utils.utils.remove_illegal_torch', 'remove_illegal_torch', (['probs', "state['legal_actions']"], {}), "(probs, state['legal_actions'])\n", (3555, 3586), False, 'from rlcard.utils.utils import remove_illegal_torch\n'), ((3599, 3617), 'torch.distributions.Categorical', 'Categorical', (['probs'], {}), '(probs)\n', (3610, 3617), False, 'from torch.distributions import Categorical\n'), ((1351, 1371), 'numpy.prod', 'np.prod', (['state_shape'], {}), '(state_shape)\n', (1358, 1371), True, 'import numpy as np\n'), ((3778, 3793), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3791, 3793), False, 'import torch\n'), ((3869, 3920), 'rlcard.utils.utils.remove_illegal_torch', 'remove_illegal_torch', (['probs', "state['legal_actions']"], {}), "(probs, state['legal_actions'])\n", (3889, 3920), False, 'from rlcard.utils.utils import remove_illegal_torch\n'), ((4035, 4051), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (4044, 4051), True, 'import numpy as np\n'), ((760, 780), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (768, 780), True, 'import numpy as np\n'), ((3149, 3174), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3172, 3174), False, 'import torch\n'), ((1502, 1534), 'torch.from_numpy', 'torch.from_numpy', (['observed_state'], {}), '(observed_state)\n', (1518, 1534), False, 'import torch\n'), ((2176, 2201), 'torch.cat', 'torch.cat', (['self.log_probs'], {}), '(self.log_probs)\n', (2185, 2201), False, 'import torch\n')] |
import numpy as np
from pylot.utils import Location, Rotation, Transform
def create_rgb_camera_setup(camera_name,
camera_location,
width,
height,
fov=90):
transform = Transform(camera_location, Rotation())
return RGBCameraSetup(camera_name, width, height, transform, fov)
def create_depth_camera_setup(camera_name_prefix,
camera_location,
width,
height,
fov=90):
transform = Transform(camera_location, Rotation())
return DepthCameraSetup(camera_name_prefix + '_depth',
width,
height,
transform,
fov=fov)
def create_segmented_camera_setup(camera_name_prefix,
camera_location,
width,
height,
fov=90):
transform = Transform(camera_location, Rotation())
return SegmentedCameraSetup(camera_name_prefix + '_segmented',
width,
height,
transform,
fov=fov)
def create_left_right_camera_setups(camera_name_prefix,
location,
width,
height,
camera_offset,
fov=90):
rotation = Rotation()
left_loc = location + Location(0, -camera_offset, 0)
right_loc = location + Location(0, camera_offset, 0)
left_transform = Transform(left_loc, rotation)
right_transform = Transform(right_loc, rotation)
left_camera_setup = RGBCameraSetup(camera_name_prefix + '_left',
width,
height,
left_transform,
fov=fov)
right_camera_setup = RGBCameraSetup(camera_name_prefix + '_right',
width,
height,
right_transform,
fov=fov)
return (left_camera_setup, right_camera_setup)
def create_center_lidar_setup(location):
rotation = Rotation()
# Place the lidar in the same position as the camera.
lidar_transform = Transform(location, rotation)
return LidarSetup(
name='front_center_lidar',
lidar_type='sensor.lidar.ray_cast',
transform=lidar_transform,
range=5000, # in centimers
rotation_frequency=20,
channels=32,
upper_fov=15,
lower_fov=-30,
points_per_second=500000)
def create_imu_setup(location):
return IMUSetup(name='imu', transform=Transform(location, Rotation()))
class CameraSetup(object):
""" A helper class storing infromation about the setup of a camera."""
def __init__(self, name, camera_type, width, height, transform, fov=90):
self.name = name
self.camera_type = camera_type
assert width > 1, "Valid camera setup should have width > 1"
self.width = width
self.height = height
self._transform = transform
self.fov = fov
self._intrinsic_mat = CameraSetup.__create_intrinsic_matrix(
self.width, self.height, self.fov)
self._unreal_transform = CameraSetup.__create_unreal_transform(
self._transform)
def get_fov(self):
return self.fov
@staticmethod
def __create_intrinsic_matrix(width, height, fov):
import numpy as np
k = np.identity(3)
# Center column of the image.
k[0, 2] = (width - 1) / 2.0
# Center row of the image.
k[1, 2] = (height - 1) / 2.0
# Focal length.
k[0, 0] = k[1, 1] = (width - 1) / (2.0 * np.tan(fov * np.pi / 360.0))
return k
@staticmethod
def __create_unreal_transform(transform):
"""
Takes in a Transform that occurs in unreal coordinates,
and converts it into a Transform that goes from camera
coordinates to unreal coordinates.
With no additional rotations:
Unreal coordinates: +x is into the screen, +y is to the right, +z is up
Camera coordinates: +x is to the right, +y is down, +z is into the screen
Lidar coordinates: +x is to the right, +y is out of the screen, +z is down
"""
import numpy as np
to_unreal_transform = Transform(matrix=np.array(
[[0, 0, 1, 0], [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]]))
return transform * to_unreal_transform
def get_intrinsic_matrix(self):
return self._intrinsic_mat
def get_extrinsic_matrix(self):
return self._unreal_transform.matrix
def get_name(self):
return self.name
def get_unreal_transform(self):
return self._unreal_transform
def get_transform(self):
return self._transform
def set_transform(self, transform):
self._transform = transform
self._unreal_transform = CameraSetup.__create_unreal_transform(
self._transform)
def __repr__(self):
return self.__str__()
def __str__(self):
return 'CameraSetup(name: {}, type: {}, width: {}, height: {}, '\
'transform: {}, fov: {}'.format(
self.name, self.camera_type, self.width, self.height,
self._transform, self.fov)
transform = property(get_transform, set_transform)
class RGBCameraSetup(CameraSetup):
def __init__(self, name, width, height, transform, fov=90):
super(RGBCameraSetup, self).__init__(name, 'sensor.camera.rgb', width,
height, transform, fov)
class DepthCameraSetup(CameraSetup):
def __init__(self, name, width, height, transform, fov=90):
super(DepthCameraSetup, self).__init__(name, 'sensor.camera.depth',
width, height, transform, fov)
class SegmentedCameraSetup(CameraSetup):
def __init__(self, name, width, height, transform, fov=90):
super(SegmentedCameraSetup,
self).__init__(name, 'sensor.camera.semantic_segmentation',
width, height, transform, fov)
class LidarSetup(object):
""" A helper class storing infromation about the setup of a Lidar."""
def __init__(self, name, lidar_type, transform, range, rotation_frequency,
channels, upper_fov, lower_fov, points_per_second):
self.name = name
self.lidar_type = lidar_type
self._transform = transform
self.range = range
self.rotation_frequency = rotation_frequency
self.channels = channels
self.upper_fov = upper_fov
self.lower_fov = lower_fov
self.points_per_second = points_per_second
self._unreal_transform = LidarSetup.__create_unreal_transform(
self._transform)
@staticmethod
def __create_unreal_transform(transform):
"""
Takes in a Transform that occurs in camera coordinates,
and converts it into a Transform that goes from lidar
coordinates to camera coordinates.
"""
to_camera_transform = Transform(matrix=np.array(
[[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]))
return transform * to_camera_transform
def get_name(self):
return self.name
def get_transform(self):
return self._transform
def set_transform(self, transform):
self._transform = transform
self._unreal_transform = LidarSetup.__create_unreal_transform(
self._transform)
def get_unreal_transform(self):
return self._unreal_transform
def get_range_in_meters(self):
return self.range / 1000
def __repr__(self):
return self.__str__()
def __str__(self):
return 'LidarSetup(name: {}, type: {}, transform: {}, range: {}, '\
'rotation freq: {}, channels: {}, upper_fov: {}, lower_fov: {}, '\
'points_per_second: {}'.format(
self.name, self.lidar_type, self._transform, self.range,
self.rotation_frequency, self.channels, self.upper_fov,
self.lower_fov, self.points_per_second)
transform = property(get_transform, set_transform)
class IMUSetup(object):
def __init__(self, name, transform):
self.name = name
self.transform = transform
def get_name(self):
return self.name
def get_transform(self):
return self.transform
def __repr__(self):
return self.__str__()
def __str__(self):
return "IMUSetup(name: {}, transform: {})".format(
self.name, self.transform)
| [
"numpy.identity",
"pylot.utils.Location",
"numpy.tan",
"pylot.utils.Rotation",
"numpy.array",
"pylot.utils.Transform"
] | [((1689, 1699), 'pylot.utils.Rotation', 'Rotation', ([], {}), '()\n', (1697, 1699), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((1835, 1864), 'pylot.utils.Transform', 'Transform', (['left_loc', 'rotation'], {}), '(left_loc, rotation)\n', (1844, 1864), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((1887, 1917), 'pylot.utils.Transform', 'Transform', (['right_loc', 'rotation'], {}), '(right_loc, rotation)\n', (1896, 1917), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((2564, 2574), 'pylot.utils.Rotation', 'Rotation', ([], {}), '()\n', (2572, 2574), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((2655, 2684), 'pylot.utils.Transform', 'Transform', (['location', 'rotation'], {}), '(location, rotation)\n', (2664, 2684), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((313, 323), 'pylot.utils.Rotation', 'Rotation', ([], {}), '()\n', (321, 323), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((651, 661), 'pylot.utils.Rotation', 'Rotation', ([], {}), '()\n', (659, 661), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((1145, 1155), 'pylot.utils.Rotation', 'Rotation', ([], {}), '()\n', (1153, 1155), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((1726, 1756), 'pylot.utils.Location', 'Location', (['(0)', '(-camera_offset)', '(0)'], {}), '(0, -camera_offset, 0)\n', (1734, 1756), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((1784, 1813), 'pylot.utils.Location', 'Location', (['(0)', 'camera_offset', '(0)'], {}), '(0, camera_offset, 0)\n', (1792, 1813), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((3905, 3919), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (3916, 3919), True, 'import numpy as np\n'), ((3085, 3095), 'pylot.utils.Rotation', 'Rotation', ([], {}), '()\n', (3093, 3095), False, 'from pylot.utils import Location, Rotation, Transform\n'), ((4139, 4166), 'numpy.tan', 'np.tan', (['(fov * np.pi / 360.0)'], {}), '(fov * np.pi / 360.0)\n', (4145, 4166), True, 'import numpy as np\n'), ((4802, 4869), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]]'], {}), '([[0, 0, 1, 0], [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]])\n', (4810, 4869), True, 'import numpy as np\n'), ((7586, 7653), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]])\n', (7594, 7653), True, 'import numpy as np\n')] |
from genericpath import isfile
import os, os.path
import cv2
import numpy as np
import json
from typing import Dict, List, Tuple
from ..flowstorageconfig import FlowStorageConfig
class FlowIOUtilsFs():
def __init__(self, config: FlowStorageConfig) -> None:
self._config = config
def close(self) -> None:
return
def clean_ext_storage(self) -> None:
if self._config.storage_location == '.':
print('storage location is not defined!!!')
return
for root, dirs, files in os.walk(self._config.storage_location):
for file in files:
os.remove(os.path.join(root, file))
return
# Readers
def np_array_reader(self, fn: str) -> np.ndarray:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.npy'
if isfile(ffn):
return np.load(ffn)
return None
def json_reader(self, fn: str) -> Dict:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
if not isfile(ffn):
return None
with open(ffn, 'rt') as f:
data = json.load(f)
return data
def list_np_arrays_reader(self, fn: str) -> List[np.ndarray]:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
if not isfile(ffn):
return None
with open(ffn, 'rt') as f:
ld = json.load(f)
data = [np.array(d) for d in ld]
return data
def list_of_lists_np_arrays_reader(self, fn: str) -> List[List[np.ndarray]]:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
if not isfile(ffn):
return None
with open(ffn, 'rt') as f:
ld = json.load(f)
data = [np.array(d) for d in ld]
return data
def list_tuples_reader(self, fn: str) -> List[Tuple]:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
with open(ffn, 'rt') as f:
ld = json.load(f)
data = [np.array(d) for d in ld]
return data
def list_keypoints_reader(self, fn: str) -> List[cv2.KeyPoint]:
def _list_dict_to_list_key_points(data: List[Dict]) -> List[cv2.KeyPoint]:
list_kps = []
for kp_dict in data:
angle = kp_dict.get('angle'),
class_id = kp_dict.get('class_id'),
ptl = kp_dict.get('pt'),
x = ptl[0][0]
y = ptl[0][1]
octave = kp_dict.get('octave'),
response = kp_dict.get('response'),
size = kp_dict.get('size')
# kp = cv2.KeyPoint(x, y, size, angle, response, octave, class_id)
kp = cv2.KeyPoint(x, y, size)
list_kps.append(kp)
return list_kps
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
with open(ffn, 'rt') as f:
data = json.load(f)
list_kps = _list_dict_to_list_key_points(data)
return list_kps
# Writers
def np_array_writer(self, fn: str, arr: np.ndarray) -> None:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.npy'
np.save(ffn, arr)
return
def list_np_arrays_writer(self, fn: str, data: List[np.ndarray]) -> None:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
sd = [d.tolist() for d in data]
with open(ffn, 'w') as fp:
json.dump(sd, fp, indent=2)
return
def list_of_lists_np_arrays_writer(self, fn: str, data: List[List[np.ndarray]]) -> None:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
sd = [d.tolist() for d in data]
with open(ffn, 'w') as fp:
json.dump(sd, fp, indent=2)
return
def json_writer(self, fn: str, data: Dict) -> None:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
with open(ffn, 'w') as fp:
json.dump(data, fp, indent=2)
return
def list_tuples_writer(self, fn: str, data: List[Tuple]) -> None:
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
with open(ffn, 'w') as fp:
json.dump(data, fp, indent=2)
return
def list_keypoints_writer(self, fn: str, data: List[cv2.KeyPoint]) -> None:
def _list_key_points_to_List_dict(data: List[cv2.KeyPoint]) -> List[Dict]:
list_dict = []
for kp in data:
kp_dict = {
'angle': kp.angle,
'class_id': kp.class_id,
'pt': kp.pt,
'octave': kp.octave,
'response': kp.response,
'size': kp.size
}
list_dict.append(kp_dict)
return list_dict
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.json'
kps = _list_key_points_to_List_dict(data)
with open(ffn, 'w') as fp:
json.dump(kps, fp, indent=2)
return
# Cleaner
def data_cleaner(self, ext: str) -> None:
extension = ext
def _cleaner( fn: str):
ffn = f'{self._config.storage_location}/{fn}'
ffn = f'{ffn}.{extension}'
if os.path.exists (ffn) :
os.remove (ffn)
else :
print(f'The {ffn} does not exist')
return
return _cleaner
| [
"os.path.exists",
"json.dump",
"os.walk",
"os.path.join",
"genericpath.isfile",
"numpy.array",
"json.load",
"numpy.load",
"cv2.KeyPoint",
"numpy.save",
"os.remove"
] | [((514, 552), 'os.walk', 'os.walk', (['self._config.storage_location'], {}), '(self._config.storage_location)\n', (521, 552), False, 'import os, os.path\n'), ((777, 788), 'genericpath.isfile', 'isfile', (['ffn'], {}), '(ffn)\n', (783, 788), False, 'from genericpath import isfile\n'), ((2903, 2920), 'numpy.save', 'np.save', (['ffn', 'arr'], {}), '(ffn, arr)\n', (2910, 2920), True, 'import numpy as np\n'), ((803, 815), 'numpy.load', 'np.load', (['ffn'], {}), '(ffn)\n', (810, 815), True, 'import numpy as np\n'), ((960, 971), 'genericpath.isfile', 'isfile', (['ffn'], {}), '(ffn)\n', (966, 971), False, 'from genericpath import isfile\n'), ((1035, 1047), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1044, 1047), False, 'import json\n'), ((1216, 1227), 'genericpath.isfile', 'isfile', (['ffn'], {}), '(ffn)\n', (1222, 1227), False, 'from genericpath import isfile\n'), ((1289, 1301), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1298, 1301), False, 'import json\n'), ((1524, 1535), 'genericpath.isfile', 'isfile', (['ffn'], {}), '(ffn)\n', (1530, 1535), False, 'from genericpath import isfile\n'), ((1597, 1609), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1606, 1609), False, 'import json\n'), ((1840, 1852), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1849, 1852), False, 'import json\n'), ((2664, 2676), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2673, 2676), False, 'import json\n'), ((3156, 3183), 'json.dump', 'json.dump', (['sd', 'fp'], {'indent': '(2)'}), '(sd, fp, indent=2)\n', (3165, 3183), False, 'import json\n'), ((3434, 3461), 'json.dump', 'json.dump', (['sd', 'fp'], {'indent': '(2)'}), '(sd, fp, indent=2)\n', (3443, 3461), False, 'import json\n'), ((3640, 3669), 'json.dump', 'json.dump', (['data', 'fp'], {'indent': '(2)'}), '(data, fp, indent=2)\n', (3649, 3669), False, 'import json\n'), ((3861, 3890), 'json.dump', 'json.dump', (['data', 'fp'], {'indent': '(2)'}), '(data, fp, indent=2)\n', (3870, 3890), False, 'import json\n'), ((4530, 4558), 'json.dump', 'json.dump', (['kps', 'fp'], {'indent': '(2)'}), '(kps, fp, indent=2)\n', (4539, 4558), False, 'import json\n'), ((4772, 4791), 'os.path.exists', 'os.path.exists', (['ffn'], {}), '(ffn)\n', (4786, 4791), False, 'import os, os.path\n'), ((1316, 1327), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (1324, 1327), True, 'import numpy as np\n'), ((1624, 1635), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (1632, 1635), True, 'import numpy as np\n'), ((1867, 1878), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (1875, 1878), True, 'import numpy as np\n'), ((2470, 2494), 'cv2.KeyPoint', 'cv2.KeyPoint', (['x', 'y', 'size'], {}), '(x, y, size)\n', (2482, 2494), False, 'import cv2\n'), ((4803, 4817), 'os.remove', 'os.remove', (['ffn'], {}), '(ffn)\n', (4812, 4817), False, 'import os, os.path\n'), ((597, 621), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (609, 621), False, 'import os, os.path\n')] |
import base
# 验证加载器
import matplotlib.pyplot as plt
import numpy as np
# 定义一个pyplot图片查看器
def imshow(img):
# print(img)
# 此处还原的图片是一个tensor,每个像素点中包含负数,需要计算得到0~1之间的每个像素点值,然后将tensor还原为numpy数组
img = (img / 2 + 0.5).numpy() # unnormalize
# print(img)
# 此处进行矩阵转换,具体作用貌似是把numpy数组转换成可以用于pyplot展示图片使用的数组,不是特别理解,后续慢慢深入研究
# 详见官方文档:https://numpy.org/doc/stable/reference/generated/numpy.transpose.html
img = np.transpose(img, (1, 2, 0))
# print(img)
plt.imshow(img)
plt.show()
# 展示一张训练集图片,以验证加载器是否正常运行
train_features, train_labels = next(iter(base.trainloader))
img = train_features[0].squeeze()
label = base.classes[train_labels[0]]
print(f"Feature batch shape: {train_features.size()}")
print(f"Labels batch shape: {train_labels.size()}")
print(f"Label: {label}")
imshow(img)
| [
"matplotlib.pyplot.imshow",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((428, 456), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (440, 456), True, 'import numpy as np\n'), ((478, 493), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (488, 493), True, 'import matplotlib.pyplot as plt\n'), ((498, 508), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (506, 508), True, 'import matplotlib.pyplot as plt\n')] |
# --------------------------------------------------------
# CRPN
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
from quad_convert import quad_2_aabb
from sort_points import sort_points
def quad_transform(ex_rois, gt_rois):
ex_rois = sort_points(ex_rois)
ex_aabbs = quad_2_aabb(ex_rois)
ex_widths = ex_aabbs[:, 2] - ex_aabbs[:, 0] + 1.0
ex_heights = ex_aabbs[:, 3] - ex_aabbs[:, 1] + 1.0
ex_x1 = ex_rois[:, 0]
ex_y1 = ex_rois[:, 1]
ex_x2 = ex_rois[:, 2]
ex_y2 = ex_rois[:, 3]
ex_x3 = ex_rois[:, 4]
ex_y3 = ex_rois[:, 5]
ex_x4 = ex_rois[:, 6]
ex_y4 = ex_rois[:, 7]
gt_x1 = gt_rois[:, 0]
gt_y1 = gt_rois[:, 1]
gt_x2 = gt_rois[:, 2]
gt_y2 = gt_rois[:, 3]
gt_x3 = gt_rois[:, 4]
gt_y3 = gt_rois[:, 5]
gt_x4 = gt_rois[:, 6]
gt_y4 = gt_rois[:, 7]
target_dx1 = (gt_x1 - ex_x1) / ex_widths
target_dy1 = (gt_y1 - ex_y1) / ex_heights
target_dx2 = (gt_x2 - ex_x2) / ex_widths
target_dy2 = (gt_y2 - ex_y2) / ex_heights
target_dx3 = (gt_x3 - ex_x3) / ex_widths
target_dy3 = (gt_y3 - ex_y3) / ex_heights
target_dx4 = (gt_x4 - ex_x4) / ex_widths
target_dy4 = (gt_y4 - ex_y4) / ex_heights
targets = np.vstack(
(target_dx1, target_dy1, target_dx2, target_dy2,
target_dx3, target_dy3, target_dx4, target_dy4)).transpose()
return targets
def quad_transform_inv(quads, deltas):
if quads.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
quads = sort_points(quads)
aabbs = quad_2_aabb(quads)
widths = aabbs[:, 2] - aabbs[:, 0] + 1.0
heights = aabbs[:, 3] - aabbs[:, 1] + 1.0
x1 = quads[:, 0]
y1 = quads[:, 1]
x2 = quads[:, 2]
y2 = quads[:, 3]
x3 = quads[:, 4]
y3 = quads[:, 5]
x4 = quads[:, 6]
y4 = quads[:, 7]
dx1 = deltas[:, 0::8]
dy1 = deltas[:, 1::8]
dx2 = deltas[:, 2::8]
dy2 = deltas[:, 3::8]
dx3 = deltas[:, 4::8]
dy3 = deltas[:, 5::8]
dx4 = deltas[:, 6::8]
dy4 = deltas[:, 7::8]
pred_x1 = dx1 * widths[:, np.newaxis] + x1[:, np.newaxis]
pred_y1 = dy1 * heights[:, np.newaxis] + y1[:, np.newaxis]
pred_x2 = dx2 * widths[:, np.newaxis] + x2[:, np.newaxis]
pred_y2 = dy2 * heights[:, np.newaxis] + y2[:, np.newaxis]
pred_x3 = dx3 * widths[:, np.newaxis] + x3[:, np.newaxis]
pred_y3 = dy3 * heights[:, np.newaxis] + y3[:, np.newaxis]
pred_x4 = dx4 * widths[:, np.newaxis] + x4[:, np.newaxis]
pred_y4 = dy4 * heights[:, np.newaxis] + y4[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
pred_boxes[:, 0::8] = pred_x1
pred_boxes[:, 1::8] = pred_y1
pred_boxes[:, 2::8] = pred_x2
pred_boxes[:, 3::8] = pred_y2
pred_boxes[:, 4::8] = pred_x3
pred_boxes[:, 5::8] = pred_y3
pred_boxes[:, 6::8] = pred_x4
pred_boxes[:, 7::8] = pred_y4
return pred_boxes
def clip_quads(quads, im_shape):
"""
Clip quads to image boundaries.
"""
quads[:, 0::8] = np.maximum(np.minimum(quads[:, 0::8], im_shape[1] - 1), 0)
quads[:, 1::8] = np.maximum(np.minimum(quads[:, 1::8], im_shape[0] - 1), 0)
quads[:, 2::8] = np.maximum(np.minimum(quads[:, 2::8], im_shape[1] - 1), 0)
quads[:, 3::8] = np.maximum(np.minimum(quads[:, 3::8], im_shape[0] - 1), 0)
quads[:, 4::8] = np.maximum(np.minimum(quads[:, 4::8], im_shape[1] - 1), 0)
quads[:, 5::8] = np.maximum(np.minimum(quads[:, 5::8], im_shape[0] - 1), 0)
quads[:, 6::8] = np.maximum(np.minimum(quads[:, 6::8], im_shape[1] - 1), 0)
quads[:, 7::8] = np.maximum(np.minimum(quads[:, 7::8], im_shape[0] - 1), 0)
return quads
| [
"numpy.minimum",
"numpy.zeros",
"numpy.vstack",
"quad_convert.quad_2_aabb",
"sort_points.sort_points"
] | [((292, 312), 'sort_points.sort_points', 'sort_points', (['ex_rois'], {}), '(ex_rois)\n', (303, 312), False, 'from sort_points import sort_points\n'), ((329, 349), 'quad_convert.quad_2_aabb', 'quad_2_aabb', (['ex_rois'], {}), '(ex_rois)\n', (340, 349), False, 'from quad_convert import quad_2_aabb\n'), ((1564, 1582), 'sort_points.sort_points', 'sort_points', (['quads'], {}), '(quads)\n', (1575, 1582), False, 'from sort_points import sort_points\n'), ((1596, 1614), 'quad_convert.quad_2_aabb', 'quad_2_aabb', (['quads'], {}), '(quads)\n', (1607, 1614), False, 'from quad_convert import quad_2_aabb\n'), ((2602, 2644), 'numpy.zeros', 'np.zeros', (['deltas.shape'], {'dtype': 'deltas.dtype'}), '(deltas.shape, dtype=deltas.dtype)\n', (2610, 2644), True, 'import numpy as np\n'), ((1500, 1550), 'numpy.zeros', 'np.zeros', (['(0, deltas.shape[1])'], {'dtype': 'deltas.dtype'}), '((0, deltas.shape[1]), dtype=deltas.dtype)\n', (1508, 1550), True, 'import numpy as np\n'), ((3059, 3102), 'numpy.minimum', 'np.minimum', (['quads[:, 0::8]', '(im_shape[1] - 1)'], {}), '(quads[:, 0::8], im_shape[1] - 1)\n', (3069, 3102), True, 'import numpy as np\n'), ((3139, 3182), 'numpy.minimum', 'np.minimum', (['quads[:, 1::8]', '(im_shape[0] - 1)'], {}), '(quads[:, 1::8], im_shape[0] - 1)\n', (3149, 3182), True, 'import numpy as np\n'), ((3219, 3262), 'numpy.minimum', 'np.minimum', (['quads[:, 2::8]', '(im_shape[1] - 1)'], {}), '(quads[:, 2::8], im_shape[1] - 1)\n', (3229, 3262), True, 'import numpy as np\n'), ((3299, 3342), 'numpy.minimum', 'np.minimum', (['quads[:, 3::8]', '(im_shape[0] - 1)'], {}), '(quads[:, 3::8], im_shape[0] - 1)\n', (3309, 3342), True, 'import numpy as np\n'), ((3379, 3422), 'numpy.minimum', 'np.minimum', (['quads[:, 4::8]', '(im_shape[1] - 1)'], {}), '(quads[:, 4::8], im_shape[1] - 1)\n', (3389, 3422), True, 'import numpy as np\n'), ((3459, 3502), 'numpy.minimum', 'np.minimum', (['quads[:, 5::8]', '(im_shape[0] - 1)'], {}), '(quads[:, 5::8], im_shape[0] - 1)\n', (3469, 3502), True, 'import numpy as np\n'), ((3539, 3582), 'numpy.minimum', 'np.minimum', (['quads[:, 6::8]', '(im_shape[1] - 1)'], {}), '(quads[:, 6::8], im_shape[1] - 1)\n', (3549, 3582), True, 'import numpy as np\n'), ((3619, 3662), 'numpy.minimum', 'np.minimum', (['quads[:, 7::8]', '(im_shape[0] - 1)'], {}), '(quads[:, 7::8], im_shape[0] - 1)\n', (3629, 3662), True, 'import numpy as np\n'), ((1257, 1368), 'numpy.vstack', 'np.vstack', (['(target_dx1, target_dy1, target_dx2, target_dy2, target_dx3, target_dy3,\n target_dx4, target_dy4)'], {}), '((target_dx1, target_dy1, target_dx2, target_dy2, target_dx3,\n target_dy3, target_dx4, target_dy4))\n', (1266, 1368), True, 'import numpy as np\n')] |
"""
Module providing the survey scheduler.
"""
import os,sys
import copy
import numpy as np
import time
import ephem
import matplotlib.pyplot as plt
import logging
from collections import OrderedDict as odict
import obztak.utils.projector
import obztak.utils.constants
import obztak.utils.ortho
from obztak.utils import constants
from obztak.utils import ortho
from obztak.utils import fileio
from obztak.ctio import CTIO
from obztak.field import FieldArray
from obztak.tactician import CoverageTactician
from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite
from obztak.factory import tactician_factory
# For debugging (use the verbose command line argument)
#logging.basicConfig(level=20) # KCB
############################################################
class Scheduler(object):
"""
Deal with survey scheduling.
"""
_defaults = odict([
#('tactician','coverage'),
('windows',os.path.join(fileio.get_datadir(),"maglites-windows.csv")),
('targets',os.path.join(fileio.get_datadir(),"maglites-target-fields.csv")),
])
FieldType = FieldArray
def __init__(self,target_fields=None,windows=None,completed_fields=None):
self.load_target_fields(target_fields)
self.load_windows(windows)
self.load_observed_fields()
self.load_completed_fields(completed_fields)
self.scheduled_fields = self.FieldType()
self.observatory = CTIO()
def load_target_fields(self, target_fields=None):
if target_fields is None:
target_fields = self._defaults['targets']
if isinstance(target_fields,basestring):
self.target_fields = self.FieldType.read(target_fields)
else:
self.target_fields = self.FieldType(target_fields)
return self.target_fields
def load_windows(self, windows=None):
"""
Load the set of start and stop times for the observation windows.
"""
if windows is None:
windows = self._defaults['windows']
logging.info("Setting default observing windows:\n %s"%windows)
if isinstance(windows,basestring):
windows = fileio.csv2rec(windows)
self.windows = []
for start,end in windows:
self.windows.append([ephem.Date(start), ephem.Date(end)])
# Sanity check that observation windows are properly sorted
for ii,(start,end) in enumerate(self.windows):
msg = 'Observation windows are not properly sorted\n'
msg+= '%s: %s -- %s'%(get_nite(start),datestr(start),datestr(end))
if (end < start):
logging.warn(msg)
if ii > 0 and (start < self.windows[ii-1][1]):
logging.warn(msg)
logging.info('Observation Windows:')
for start,end in self.windows:
logging.info(' %s: %s UTC -- %s UTC'%(get_nite(start),datestr(start),datestr(end)))
logging.info(30*'-')
def load_observed_fields(self):
"""
Load fields from the telemetry database that were already observed.
"""
try:
fields = self.FieldType.load_database()
except Exception as e:
logging.warn("Failed to load completed exposures from database")
logging.info(e)
fields = self.FieldType()
self.observed_fields = fields
return self.observed_fields
def load_completed_fields(self, completed_fields=None):
"""Load completed fields. The default behavior is to load the
observed_fields as completed_fields. However, if the string
'None' is passed then return an empty FieldArray.
Parameters:
-----------
completed_fields : Filename, list of filenames, or FieldArray-type object.
Returns:
--------
fields : FieldArray of the completed fields
"""
# Deal with 'None' string
if isinstance(completed_fields,list):
if completed_fields[0].lower()=='none':
self.completed_fields = self.FieldType()
return self.completed_fields
elif isinstance(completed_fields,basestring):
if completed_fields.lower()=='none':
self.completed_fields = self.FieldType()
return self.completed_fields
self.completed_fields = copy.deepcopy(self.observed_fields)
if not completed_fields:
return self.completed_fields
if isinstance(completed_fields,basestring):
completed_fields = [completed_fields]
if isinstance(completed_fields,list):
fields = self.FieldType()
for filename in completed_fields:
fields = fields + self.FieldType.read(filename)
completed_fields = fields
new=~np.in1d(completed_fields.unique_id,self.completed_fields.unique_id)
new_fields = completed_fields[new]
self.completed_fields = self.completed_fields + new_fields
return self.completed_fields
def create_tactician(self, mode=None):
return tactician_factory(cls=mode,mode=mode)
def select_field(self, date, mode=None):
"""
Select field(s) using the survey tactician.
Parameters:
-----------
date : ephem.Date object
mode : Type of tactician to use for selecting field
Returns:
--------
field : selected field(s) from tactician
"""
sel = ~np.in1d(self.target_fields['ID'],self.completed_fields['ID'])
self.tactician = self.create_tactician(mode)
self.tactician.set_date(date)
self.tactician.set_target_fields(self.target_fields[sel])
self.tactician.set_completed_fields(self.completed_fields)
field_select = self.tactician.select_fields()
logging.debug(str(field_select))
# For diagnostic purposes
if False and len(self.scheduled_fields) % 10 == 0:
weight = self.tactician.weight
ortho.plotWeight(field_select[-1], self.target_fields, self.tactician.weight)
raw_input('WAIT')
if len(field_select) == 0:
logging.error("No field selected... we've got problems.")
msg = "date=%s\n"%(datestr(date))
msg += "index_select=%s, index=%s\n"%(index_select,index)
msg += "nselected=%s, selection=%s\n"%(cut.sum(),cut[index_select])
msg += "weights=%s"%weight
logging.info(msg)
#ortho.plotWeight(self.scheduled_fields[-1], self.target_fields, self.tactician.weight)
ortho.plotField(self.scheduled_fields[-1],self.scheduled_fields,options_basemap=dict(date='2017/02/20 05:00:00'))
raw_input('WAIT')
import pdb; pdb.set_trace()
raise Exception()
return field_select
def run(self, tstart=None, tstop=None, clip=False, plot=False, mode=None):
"""
Schedule a chunk of exposures. This is the loop where date is incremented
Parameters:
-----------
tstart : Chunk start time
tstop : Chunk end time (may be replace with chunk length)
plot : Plot the chunk (may be removed)
Returns:
--------
fields : Scheduled fields
"""
# Reset the scheduled fields
self.scheduled_fields = self.FieldType()
# If no tstop, run for 90 minutes
timedelta = 90*ephem.minute
if tstart is None: tstart = ephem.now()
if tstop is None: tstop = tstart + timedelta
# Convert strings into dates
if isinstance(tstart,basestring):
tstart = ephem.Date(tstart)
if isinstance(tstop,basestring):
tstop = ephem.Date(tstop)
msg = "Run start: %s\n"%datestr(tstart,4)
msg += "Run end: %s\n"%datestr(tstop,4)
msg += "Run time: %s minutes"%(timedelta/ephem.minute)
logging.debug(msg)
msg = "Previously completed fields: %i"%len(self.completed_fields)
logging.info(msg)
# This is not safe since tactician is re-created in select_field
self.tactician = self.create_tactician(mode)
msg = "Scheduling with '%s' in mode '%s'"%(self.tactician.__class__.__name__,self.tactician.mode)
logging.info(msg)
date = tstart
latch = True
while latch:
logging.debug(' '+datestr(date,4))
# Check to see if in valid observation window
if self.windows is not None:
inside = False
for window in self.windows:
if date >= window[0] and date < window[-1]:
inside = True
break
if not inside:
if clip:
break
else:
msg = 'Date outside of nominal observing windows'
logging.warning(msg)
# Select one (or more) fields from the tactician
try:
field_select = self.select_field(date, mode)
except Exception as e:
# Only write if error occurred outside observing window
if not inside:
logging.warning(str(e))
break
else:
raise(e)
# Now update the time from the selected field
date = ephem.Date(field_select[-1]['DATE']) + constants.FIELDTIME
self.completed_fields = self.completed_fields + field_select
self.scheduled_fields = self.scheduled_fields + field_select
msg=" %(DATE).19s: id=%(ID)10s, secz=%(AIRMASS).2f, slew=%(SLEW).2f"
msg+=", moon=%(PHASE).0f%%,%(ALT).0fdeg"
for i,f in zip(field_select.unique_id,field_select):
params = dict([('ID',i)]+[(k,f[k]) for k in f.dtype.names])
params.update({'PHASE':self.tactician.moon.phase,"ALT":np.degrees(self.tactician.moon.alt)})
logging.info(msg%params)
#if plot: self.plotField(date, field_select)
if plot:
ortho.plotField(field_select[:-1],self.target_fields,self.completed_fields)
if date >= tstop: break
msg = "Newly scheduled fields: %i"%len(self.scheduled_fields)
logging.info(msg)
return self.scheduled_fields
def schedule_field(self, hex, tiling, band=None, date=None, plot=False, mode=None):
"""
Schedule a single filed at a given time.
Parameters:
-----------
hexid : the hex ID of the field
tiling : the tiling number of the field
band : The band of the field
date : The date/time for observation
plot : Plot the output
mode : Mode for scheduler tactician
Returns:
--------
field : The scheduled field
"""
# Probably cleaner to make this it's own tactician
date = ephem.Date(date) if date else ephem.now()
select = (self.target_fields['HEX']==hex)
select &= (self.target_fields['TILING']==tiling)
if band is not None:
select &= (self.target_fields['FILTER']==band)
index = np.nonzero(select)[0]
field = self.target_fields[select]
nfields = select.sum()
field['DATE'] = map(datestring,nfields*[date])
return field
def schedule_chunk(self,tstart=None,chunk=60,clip=False,plot=False,mode=None):
"""
Schedule a chunk of exposures.
Parameters:
-----------
tstart : Start time (UTC); in `None` use `ephem.now()`
chunk : Chunk of time to schedule.
plot : Dynamically plot each scheduled exposure
mode : Mode for scheduler tactician
Returns:
--------
fields : Scheduled fields
"""
# If no tstop, run for 90 minutes
if tstart is None: tstart = ephem.now()
tstop = tstart + chunk*ephem.minute
return self.run(tstart,tstop,clip,plot,mode)
def schedule_nite(self,date=None,chunk=60,clip=False,plot=False,mode=None):
"""
Schedule a night of observing.
A `nite` is defined by the day (UTC) at noon local time before
observing started.
Parameters:
-----------
date : The date of the nite to schedule
chunk : The duration of a chunk of exposures (minutes)
plot : Dynamically plot the progress after each chunk
mode : Mode for scheduler tactician
Returns:
--------
chunks : A list of the chunks generated for the scheduled nite.
"""
# Create the nite
nite = get_nite(date)
# Convert chunk to MJD
if chunk > 1: chunk = chunk*ephem.minute
try:
nites = [get_nite(w[0]) for w in self.windows]
idx = nites.index(nite)
start,finish = self.windows[idx]
except (TypeError, ValueError):
msg = "Requested nite (%s) not found in windows:\n"%nite
msg += '['+', '.join([n for n in nites])+']'
logging.warning(msg)
start = date
self.observatory.date = date
self.observatory.horizon = self.observatory.twilight
finish = self.observatory.next_rising(ephem.Sun(), use_center=True)
self.observatory.horizon = '0'
logging.info("Night start (UTC): %s"%datestr(start))
logging.info("Night finish (UTC): %s"%datestr(finish))
chunks = []
i = 0
while start < finish:
i+=1
msg = "Scheduling %s -- Chunk %i"%(start,i)
logging.debug(msg)
end = start+chunk
scheduled_fields = self.run(start,end,clip=clip,plot=False,mode=mode)
if plot:
field_select = scheduled_fields[-1:]
bmap = ortho.plotField(field_select,self.target_fields,self.completed_fields)
if (raw_input(' ...continue ([y]/n)').lower()=='n'):
import pdb; pdb.set_trace()
chunks.append(scheduled_fields)
start = ephem.Date(chunks[-1]['DATE'][-1]) + constants.FIELDTIME
#start = end
if plot: raw_input(' ...finish... ')
return chunks
def schedule_survey(self,start=None,end=None,chunk=60,plot=False,mode=None):
"""
Schedule the entire survey.
Parameters:
-----------
start : Start of survey (int or str)
end : End of survey (int or str)
chunk : The duration of a chunk of exposures (minutes)
plot : Dynamically plot the progress after each night
mode : Mode of scheduler tactician
Returns:
--------
scheduled_nites : An ordered dictionary of scheduled nites
"""
self.scheduled_nites = odict()
for tstart,tend in self.windows:
if start is not None and ephem.Date(tstart) < ephem.Date(start):
continue
if end is not None and ephem.Date(tend) > ephem.Date(end):
continue
#nite = nitestring(tstart)
nite = get_nite(tstart)
try:
chunks = self.schedule_nite(tstart,chunk,clip=True,plot=False,mode=mode)
except ValueError as error:
ortho.plotField(self.completed_fields[-1:],self.target_fields,
self.completed_fields)
raise(error)
self.scheduled_nites[nite] = chunks
if plot:
ortho.plotField(self.completed_fields[-1:],self.target_fields,self.completed_fields)#,options_basemap=dict(date='2017/02/21 05:00:00'))
if (raw_input(' ...continue ([y]/n)').lower()=='n'):
import pdb; pdb.set_trace()
if plot: raw_input(' ...finish... ')
return self.scheduled_nites
def write(self,filename):
self.scheduled_fields.write(filename)
@classmethod
def common_parser(cls):
"""
Comman argument parser for scheduler tools.
"""
from obztak.utils.parser import Parser, DatetimeAction
description = __doc__
parser = Parser(description=description)
#parser.add_argument('--survey',choices=['obztak','maglites','bliss'],
# default = None, help='choose survey to schedule.')
parser.add_argument('-p','--plot',action='store_true',
help='create visual output.')
parser.add_argument('--utc','--utc-start',dest='utc_start',action=DatetimeAction,
help="start time for observation.")
parser.add_argument('--utc-end',action=DatetimeAction,
help="end time for observation.")
parser.add_argument('-k','--chunk', default=60., type=float,
help = 'time chunk')
parser.add_argument('-f','--fields',default=None,
help='all target fields.')
#parser.add_argument('-m','--mode',default='coverage',
# help='Mode for scheduler tactician.')
parser.add_argument('-m','--mode',default=None,
help='Mode for scheduler tactician.')
parser.add_argument('-w','--windows',default=None,
help='observation windows.')
parser.add_argument('-c','--complete',nargs='?',action='append',
help="fields that have been completed.")
parser.add_argument('-o','--outfile',default=None,
help='save output file of scheduled fields.')
parser.add_argument('--write-protect',action='store_true',
help='write-protect output files')
return parser
@classmethod
def parser(cls):
return cls.common_parser()
@classmethod
def main(cls):
args = cls.parser().parse_args()
scheduler = cls(args.fields,args.windows,args.complete)
scheduler.run(tstart=args.utc_start,tstop=args.utc_end,plot=args.plot)
if args.outfile:
scheduler.scheduled_fields.write(args.outfile)
return scheduler
############################################################
if __name__ == '__main__':
scheduler = Scheduler.main()
############################################################
| [
"logging.debug",
"ephem.Sun",
"obztak.ctio.CTIO",
"obztak.factory.tactician_factory",
"obztak.utils.ortho.plotWeight",
"copy.deepcopy",
"ephem.Date",
"logging.info",
"logging.error",
"obztak.utils.parser.Parser",
"logging.warn",
"obztak.utils.fileio.get_datadir",
"obztak.utils.fileio.csv2rec... | [((1451, 1457), 'obztak.ctio.CTIO', 'CTIO', ([], {}), '()\n', (1455, 1457), False, 'from obztak.ctio import CTIO\n'), ((2779, 2815), 'logging.info', 'logging.info', (['"""Observation Windows:"""'], {}), "('Observation Windows:')\n", (2791, 2815), False, 'import logging\n'), ((2960, 2982), 'logging.info', 'logging.info', (["(30 * '-')"], {}), "(30 * '-')\n", (2972, 2982), False, 'import logging\n'), ((4393, 4428), 'copy.deepcopy', 'copy.deepcopy', (['self.observed_fields'], {}), '(self.observed_fields)\n', (4406, 4428), False, 'import copy\n'), ((5129, 5167), 'obztak.factory.tactician_factory', 'tactician_factory', ([], {'cls': 'mode', 'mode': 'mode'}), '(cls=mode, mode=mode)\n', (5146, 5167), False, 'from obztak.factory import tactician_factory\n'), ((7990, 8008), 'logging.debug', 'logging.debug', (['msg'], {}), '(msg)\n', (8003, 8008), False, 'import logging\n'), ((8093, 8110), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (8105, 8110), False, 'import logging\n'), ((8352, 8369), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (8364, 8369), False, 'import logging\n'), ((10420, 10437), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (10432, 10437), False, 'import logging\n'), ((12815, 12829), 'obztak.utils.date.get_nite', 'get_nite', (['date'], {}), '(date)\n', (12823, 12829), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((15011, 15018), 'collections.OrderedDict', 'odict', ([], {}), '()\n', (15016, 15018), True, 'from collections import OrderedDict as odict\n'), ((16378, 16409), 'obztak.utils.parser.Parser', 'Parser', ([], {'description': 'description'}), '(description=description)\n', (16384, 16409), False, 'from obztak.utils.parser import Parser, DatetimeAction\n'), ((2059, 2127), 'logging.info', 'logging.info', (['("""Setting default observing windows:\n %s""" % windows)'], {}), '("""Setting default observing windows:\n %s""" % windows)\n', (2071, 2127), False, 'import logging\n'), ((2189, 2212), 'obztak.utils.fileio.csv2rec', 'fileio.csv2rec', (['windows'], {}), '(windows)\n', (2203, 2212), False, 'from obztak.utils import fileio\n'), ((4855, 4923), 'numpy.in1d', 'np.in1d', (['completed_fields.unique_id', 'self.completed_fields.unique_id'], {}), '(completed_fields.unique_id, self.completed_fields.unique_id)\n', (4862, 4923), True, 'import numpy as np\n'), ((5539, 5601), 'numpy.in1d', 'np.in1d', (["self.target_fields['ID']", "self.completed_fields['ID']"], {}), "(self.target_fields['ID'], self.completed_fields['ID'])\n", (5546, 5601), True, 'import numpy as np\n'), ((6072, 6149), 'obztak.utils.ortho.plotWeight', 'ortho.plotWeight', (['field_select[-1]', 'self.target_fields', 'self.tactician.weight'], {}), '(field_select[-1], self.target_fields, self.tactician.weight)\n', (6088, 6149), False, 'from obztak.utils import ortho\n'), ((6228, 6285), 'logging.error', 'logging.error', (['"""No field selected... we\'ve got problems."""'], {}), '("No field selected... we\'ve got problems.")\n', (6241, 6285), False, 'import logging\n'), ((6534, 6551), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (6546, 6551), False, 'import logging\n'), ((6832, 6847), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (6845, 6847), False, 'import pdb\n'), ((7555, 7566), 'ephem.now', 'ephem.now', ([], {}), '()\n', (7564, 7566), False, 'import ephem\n'), ((7721, 7739), 'ephem.Date', 'ephem.Date', (['tstart'], {}), '(tstart)\n', (7731, 7739), False, 'import ephem\n'), ((7801, 7818), 'ephem.Date', 'ephem.Date', (['tstop'], {}), '(tstop)\n', (7811, 7818), False, 'import ephem\n'), ((7853, 7871), 'obztak.utils.date.datestr', 'datestr', (['tstart', '(4)'], {}), '(tstart, 4)\n', (7860, 7871), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((7902, 7919), 'obztak.utils.date.datestr', 'datestr', (['tstop', '(4)'], {}), '(tstop, 4)\n', (7909, 7919), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((11078, 11094), 'ephem.Date', 'ephem.Date', (['date'], {}), '(date)\n', (11088, 11094), False, 'import ephem\n'), ((11108, 11119), 'ephem.now', 'ephem.now', ([], {}), '()\n', (11117, 11119), False, 'import ephem\n'), ((11333, 11351), 'numpy.nonzero', 'np.nonzero', (['select'], {}), '(select)\n', (11343, 11351), True, 'import numpy as np\n'), ((12052, 12063), 'ephem.now', 'ephem.now', ([], {}), '()\n', (12061, 12063), False, 'import ephem\n'), ((13803, 13821), 'logging.debug', 'logging.debug', (['msg'], {}), '(msg)\n', (13816, 13821), False, 'import logging\n'), ((15318, 15334), 'obztak.utils.date.get_nite', 'get_nite', (['tstart'], {}), '(tstart)\n', (15326, 15334), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((2659, 2676), 'logging.warn', 'logging.warn', (['msg'], {}), '(msg)\n', (2671, 2676), False, 'import logging\n'), ((2752, 2769), 'logging.warn', 'logging.warn', (['msg'], {}), '(msg)\n', (2764, 2769), False, 'import logging\n'), ((3226, 3290), 'logging.warn', 'logging.warn', (['"""Failed to load completed exposures from database"""'], {}), "('Failed to load completed exposures from database')\n", (3238, 3290), False, 'import logging\n'), ((3303, 3318), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (3315, 3318), False, 'import logging\n'), ((6318, 6331), 'obztak.utils.date.datestr', 'datestr', (['date'], {}), '(date)\n', (6325, 6331), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((9502, 9538), 'ephem.Date', 'ephem.Date', (["field_select[-1]['DATE']"], {}), "(field_select[-1]['DATE'])\n", (9512, 9538), False, 'import ephem\n'), ((10109, 10135), 'logging.info', 'logging.info', (['(msg % params)'], {}), '(msg % params)\n', (10121, 10135), False, 'import logging\n'), ((10229, 10306), 'obztak.utils.ortho.plotField', 'ortho.plotField', (['field_select[:-1]', 'self.target_fields', 'self.completed_fields'], {}), '(field_select[:-1], self.target_fields, self.completed_fields)\n', (10244, 10306), False, 'from obztak.utils import ortho\n'), ((12946, 12960), 'obztak.utils.date.get_nite', 'get_nite', (['w[0]'], {}), '(w[0])\n', (12954, 12960), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((13243, 13263), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (13258, 13263), False, 'import logging\n'), ((14032, 14104), 'obztak.utils.ortho.plotField', 'ortho.plotField', (['field_select', 'self.target_fields', 'self.completed_fields'], {}), '(field_select, self.target_fields, self.completed_fields)\n', (14047, 14104), False, 'from obztak.utils import ortho\n'), ((14285, 14319), 'ephem.Date', 'ephem.Date', (["chunks[-1]['DATE'][-1]"], {}), "(chunks[-1]['DATE'][-1])\n", (14295, 14319), False, 'import ephem\n'), ((15732, 15823), 'obztak.utils.ortho.plotField', 'ortho.plotField', (['self.completed_fields[-1:]', 'self.target_fields', 'self.completed_fields'], {}), '(self.completed_fields[-1:], self.target_fields, self.\n completed_fields)\n', (15747, 15823), False, 'from obztak.utils import ortho\n'), ((958, 978), 'obztak.utils.fileio.get_datadir', 'fileio.get_datadir', ([], {}), '()\n', (976, 978), False, 'from obztak.utils import fileio\n'), ((1037, 1057), 'obztak.utils.fileio.get_datadir', 'fileio.get_datadir', ([], {}), '()\n', (1055, 1057), False, 'from obztak.utils import fileio\n'), ((2307, 2324), 'ephem.Date', 'ephem.Date', (['start'], {}), '(start)\n', (2317, 2324), False, 'import ephem\n'), ((2326, 2341), 'ephem.Date', 'ephem.Date', (['end'], {}), '(end)\n', (2336, 2341), False, 'import ephem\n'), ((2568, 2583), 'obztak.utils.date.get_nite', 'get_nite', (['start'], {}), '(start)\n', (2576, 2583), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((2584, 2598), 'obztak.utils.date.datestr', 'datestr', (['start'], {}), '(start)\n', (2591, 2598), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((2599, 2611), 'obztak.utils.date.datestr', 'datestr', (['end'], {}), '(end)\n', (2606, 2611), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((8465, 8481), 'obztak.utils.date.datestr', 'datestr', (['date', '(4)'], {}), '(date, 4)\n', (8472, 8481), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((13446, 13457), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (13455, 13457), False, 'import ephem\n'), ((14204, 14219), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (14217, 14219), False, 'import pdb\n'), ((15098, 15116), 'ephem.Date', 'ephem.Date', (['tstart'], {}), '(tstart)\n', (15108, 15116), False, 'import ephem\n'), ((15119, 15136), 'ephem.Date', 'ephem.Date', (['start'], {}), '(start)\n', (15129, 15136), False, 'import ephem\n'), ((15198, 15214), 'ephem.Date', 'ephem.Date', (['tend'], {}), '(tend)\n', (15208, 15214), False, 'import ephem\n'), ((15217, 15232), 'ephem.Date', 'ephem.Date', (['end'], {}), '(end)\n', (15227, 15232), False, 'import ephem\n'), ((15498, 15589), 'obztak.utils.ortho.plotField', 'ortho.plotField', (['self.completed_fields[-1:]', 'self.target_fields', 'self.completed_fields'], {}), '(self.completed_fields[-1:], self.target_fields, self.\n completed_fields)\n', (15513, 15589), False, 'from obztak.utils import ortho\n'), ((15970, 15985), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15983, 15985), False, 'import pdb\n'), ((2906, 2921), 'obztak.utils.date.get_nite', 'get_nite', (['start'], {}), '(start)\n', (2914, 2921), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((2922, 2936), 'obztak.utils.date.datestr', 'datestr', (['start'], {}), '(start)\n', (2929, 2936), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((2937, 2949), 'obztak.utils.date.datestr', 'datestr', (['end'], {}), '(end)\n', (2944, 2949), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((9004, 9024), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (9019, 9024), False, 'import logging\n'), ((10055, 10090), 'numpy.degrees', 'np.degrees', (['self.tactician.moon.alt'], {}), '(self.tactician.moon.alt)\n', (10065, 10090), True, 'import numpy as np\n'), ((13570, 13584), 'obztak.utils.date.datestr', 'datestr', (['start'], {}), '(start)\n', (13577, 13584), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n'), ((13636, 13651), 'obztak.utils.date.datestr', 'datestr', (['finish'], {}), '(finish)\n', (13643, 13651), False, 'from obztak.utils.date import get_nite, datestr, datestring, nitestring, utc2nite\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
def feature_combination(batchsize, mfcc_features,text_features):
features = np.concatenate((mfcc_features,text_features))
return features
| [
"numpy.concatenate"
] | [((248, 294), 'numpy.concatenate', 'np.concatenate', (['(mfcc_features, text_features)'], {}), '((mfcc_features, text_features))\n', (262, 294), True, 'import numpy as np\n')] |
import gzip
import os
from urllib.request import urlretrieve
import numpy as np
import tensorflow as tf
from odin.fuel.dataset_base import IterableDataset, get_partition
from odin.utils import md5_checksum, one_hot
from odin.utils.net_utils import download_and_extract
# ===========================================================================
# Helpers
# ===========================================================================
class ImageDataset(IterableDataset):
def sample_images(self,
save_path=None,
dpi=120,
n_samples=25,
partition='train',
seed=1):
r""" Sample a subset of image from training set """
n = int(np.sqrt(n_samples))
assert n * n == n_samples, "Sqrt of n_samples is not an integer"
train = self.create_dataset(batch_size=n_samples,
partition=str(partition),
inc_labels=0.5)
# prepare the data
images = []
labels = []
mask = []
for data in train.take(10):
if isinstance(data, dict):
X, y = data['inputs']
mask.append(data['mask'])
elif isinstance(data, (tuple, list)):
if len(data) >= 2:
X, y = data[:2]
else:
X = data[0]
y = None
else:
X = data
y = None
images.append(X)
if y is not None:
labels.append(y)
rand = np.random.RandomState(seed=seed)
idx = rand.choice(10)
images = images[idx].numpy()
labels = labels[idx].numpy() if len(labels) > 0 else None
mask = mask[idx].numpy().ravel() if len(mask) > 0 else None
# check labels type
labels_type = 'multinomial'
if np.all(np.unique(labels) == [0., 1.]):
labels_type = 'binary'
# plot and save the figure
if save_path is not None:
plot_images = images
if plot_images.shape[-1] == 1:
plot_images = np.squeeze(plot_images, axis=-1)
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(16, 16))
for i in range(n_samples):
plt.subplot(n, n, i + 1)
img = plot_images[i]
plt.imshow(img, cmap='gray' if img.ndim == 2 else None)
plt.axis('off')
if labels is not None:
if labels_type == 'binary':
y = [
str(j) for j in self.labels[np.array(labels[i], dtype=np.bool)]
]
lab = ('\n'.join(y) + '\n') if len(y) > 1 else (y[0] + ' ')
else:
lab = '\n'.join(
["%s=%s" % (l, str(j)) for l, j in zip(self.labels, labels[i])])
lab += '\n'
m = True if mask is None else mask[i]
plt.title("%s[Mask:%s]" % (lab, m), fontsize=6)
plt.tight_layout()
fig.savefig(save_path, dpi=int(dpi))
plt.close(fig)
return images
def normalize_255(self, image):
return tf.clip_by_value(image / 255., 1e-6, 1. - 1e-6)
# ===========================================================================
# Dataset
# ===========================================================================
class BinarizedMNIST(ImageDataset):
r""" BinarizedMNIST """
def __init__(self):
import tensorflow_datasets as tfds
self.train, self.valid, self.test = tfds.load(
name='binarized_mnist',
split=['train', 'validation', 'test'],
as_supervised=False)
@property
def is_binary(self):
return True
@property
def shape(self):
return (28, 28, 1)
def create_dataset(self,
batch_size=64,
drop_remainder=False,
shuffle=1000,
prefetch=tf.data.experimental.AUTOTUNE,
cache='',
parallel=None,
partition='train',
inc_labels=False,
seed=1) -> tf.data.Dataset:
r"""
Arguments:
partition : {'train', 'valid', 'test'}
inc_labels : a Boolean or Scalar. If True, return both image and label,
otherwise, only image is returned.
If a scalar is provided, it indicate the percent of labelled data
in the mask.
Return :
tensorflow.data.Dataset :
image - `(tf.float32, (None, 28, 28, 1))`
label - `(tf.float32, (None, 10))`
mask - `(tf.bool, (None, 1))` if 0. < inc_labels < 1.
where, `mask=1` mean labelled data, and `mask=0` for unlabelled data
"""
ds = get_partition(partition,
train=self.train,
valid=self.valid,
test=self.test)
struct = tf.data.experimental.get_structure(ds)
if len(struct) == 1:
inc_labels = False
ids = tf.range(self.n_labels, dtype=tf.float32)
inc_labels = float(inc_labels)
gen = tf.random.experimental.Generator.from_seed(seed=seed)
def _process_dict(data):
image = tf.cast(data['image'], tf.float32)
if not self.is_binary:
image = self.normalize_255(image)
if inc_labels:
label = tf.cast(data['label'], tf.float32)
if len(label.shape) == 0: # covert to one-hot
label = tf.cast(ids == label, tf.float32)
if 0. < inc_labels < 1.: # semi-supervised mask
mask = gen.uniform(shape=(1,)) < inc_labels
return dict(inputs=(image, label), mask=mask)
return image, label
return image
def _process_tuple(*data):
image = tf.cast(data[0], tf.float32)
if not self.is_binary:
image = self.normalize_255(image)
if inc_labels:
label = tf.cast(data[1], tf.float32)
if len(label.shape) == 0: # covert to one-hot
label = tf.cast(ids == label, tf.float32)
if 0. < inc_labels < 1.: # semi-supervised mask
mask = gen.uniform(shape=(1,)) < inc_labels
return dict(inputs=(image, label), mask=mask)
return image, label
return image
ds = ds.map(_process_dict if isinstance(struct, dict) else _process_tuple,
parallel)
if cache is not None:
ds = ds.cache(str(cache))
# shuffle must be called after cache
if shuffle is not None and shuffle > 0:
ds = ds.shuffle(int(shuffle))
ds = ds.batch(batch_size, drop_remainder)
if prefetch is not None:
ds = ds.prefetch(prefetch)
return ds
class MNIST(BinarizedMNIST):
r""" MNIST
55000 examples for train, 5000 for valid, and 10000 for test
"""
URL = dict(
X_train=r"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
y_train=r"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
X_test=r"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
y_test=r"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz",
)
MD5 = r"8ba71f60dccd53a0b68bfe41ed4cdf9c"
def __init__(self, path='~/tensorflow_datasets/mnist'):
path = os.path.abspath(os.path.expanduser(path))
save_path = os.path.join(path, 'mnist.npz')
if not os.path.exists(path):
os.makedirs(path)
assert os.path.isdir(path)
## check exist processed file
all_data = None
if os.path.exists(save_path):
if not os.path.isfile(save_path):
raise ValueError("path to %s must be a file" % save_path)
if md5_checksum(save_path) != MNIST.MD5:
print("Miss match MD5 remove file at: ", save_path)
os.remove(save_path)
else:
all_data = np.load(save_path)
## download and extract
if all_data is None:
from tqdm import tqdm
def dl_progress(count, block_size, total_size):
kB = block_size * count / 1024.
prog.update(kB - prog.n)
read32 = lambda b: np.frombuffer(
b, dtype=np.dtype(np.uint32).newbyteorder('>'))[0]
all_data = {}
for name, url in MNIST.URL.items():
basename = os.path.basename(url)
zip_path = os.path.join(path, basename)
prog = tqdm(desc="Downloading %s" % basename, unit='kB')
urlretrieve(url, zip_path, dl_progress)
prog.clear()
prog.close()
with gzip.open(zip_path, "rb") as f:
magic = read32(f.read(4))
if magic not in (2051, 2049):
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, zip_path))
n = read32(f.read(4))
# images
if 'X_' in name:
rows = read32(f.read(4))
cols = read32(f.read(4))
buf = f.read(rows * cols * n)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(n, rows, cols, 1)
# labels
else:
buf = f.read(n)
data = np.frombuffer(buf, dtype=np.uint8)
data = one_hot(data, 10)
all_data[name] = data
np.savez_compressed(save_path, **all_data)
## split train, valid, test
rand = np.random.RandomState(seed=1)
ids = rand.permutation(all_data['X_train'].shape[0])
X_train = all_data['X_train'][ids]
y_train = all_data['y_train'][ids]
X_valid = X_train[:5000]
y_valid = y_train[:5000]
X_train = X_train[5000:]
y_train = y_train[5000:]
X_test = all_data['X_test']
y_test = all_data['y_test']
to_ds = lambda images, labels: tf.data.Dataset.zip(
(tf.data.Dataset.from_tensor_slices(images),
tf.data.Dataset.from_tensor_slices(labels)))
self.train = to_ds(X_train, y_train)
self.valid = to_ds(X_valid, y_valid)
self.test = to_ds(X_test, y_test)
@property
def labels(self):
return np.array([str(i) for i in range(10)])
@property
def is_binary(self):
return False
@property
def shape(self):
return (28, 28, 1)
class BinarizedAlphaDigits(BinarizedMNIST):
r""" Binary 20x16 digits of '0' through '9' and capital 'A' through 'Z'.
39 examples of each class. """
def __init__(self):
import tensorflow_datasets as tfds
self.train, self.valid, self.test = tfds.load(
name='binary_alpha_digits',
split=['train[:70%]', 'train[70%:80%]', 'train[80%:]'],
as_supervised=True,
shuffle_files=True,
)
@property
def shape(self):
return (20, 16, 1)
| [
"numpy.sqrt",
"gzip.open",
"numpy.array",
"odin.utils.md5_checksum",
"tensorflow.random.experimental.Generator.from_seed",
"tensorflow.cast",
"numpy.random.RandomState",
"os.remove",
"matplotlib.pyplot.imshow",
"os.path.exists",
"urllib.request.urlretrieve",
"tensorflow.data.Dataset.from_tenso... | [((1466, 1498), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (1487, 1498), True, 'import numpy as np\n'), ((2924, 2975), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(image / 255.0)', '(1e-06)', '(1.0 - 1e-06)'], {}), '(image / 255.0, 1e-06, 1.0 - 1e-06)\n', (2940, 2975), True, 'import tensorflow as tf\n'), ((3304, 3401), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': '"""binarized_mnist"""', 'split': "['train', 'validation', 'test']", 'as_supervised': '(False)'}), "(name='binarized_mnist', split=['train', 'validation', 'test'],\n as_supervised=False)\n", (3313, 3401), True, 'import tensorflow_datasets as tfds\n'), ((4507, 4583), 'odin.fuel.dataset_base.get_partition', 'get_partition', (['partition'], {'train': 'self.train', 'valid': 'self.valid', 'test': 'self.test'}), '(partition, train=self.train, valid=self.valid, test=self.test)\n', (4520, 4583), False, 'from odin.fuel.dataset_base import IterableDataset, get_partition\n'), ((4666, 4704), 'tensorflow.data.experimental.get_structure', 'tf.data.experimental.get_structure', (['ds'], {}), '(ds)\n', (4700, 4704), True, 'import tensorflow as tf\n'), ((4765, 4806), 'tensorflow.range', 'tf.range', (['self.n_labels'], {'dtype': 'tf.float32'}), '(self.n_labels, dtype=tf.float32)\n', (4773, 4806), True, 'import tensorflow as tf\n'), ((4852, 4905), 'tensorflow.random.experimental.Generator.from_seed', 'tf.random.experimental.Generator.from_seed', ([], {'seed': 'seed'}), '(seed=seed)\n', (4894, 4905), True, 'import tensorflow as tf\n'), ((7001, 7032), 'os.path.join', 'os.path.join', (['path', '"""mnist.npz"""'], {}), "(path, 'mnist.npz')\n", (7013, 7032), False, 'import os\n'), ((7101, 7120), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (7114, 7120), False, 'import os\n'), ((7183, 7208), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (7197, 7208), False, 'import os\n'), ((8946, 8975), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(1)'}), '(seed=1)\n', (8967, 8975), True, 'import numpy as np\n'), ((10020, 10157), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': '"""binary_alpha_digits"""', 'split': "['train[:70%]', 'train[70%:80%]', 'train[80%:]']", 'as_supervised': '(True)', 'shuffle_files': '(True)'}), "(name='binary_alpha_digits', split=['train[:70%]',\n 'train[70%:80%]', 'train[80%:]'], as_supervised=True, shuffle_files=True)\n", (10029, 10157), True, 'import tensorflow_datasets as tfds\n'), ((738, 756), 'numpy.sqrt', 'np.sqrt', (['n_samples'], {}), '(n_samples)\n', (745, 756), True, 'import numpy as np\n'), ((2050, 2078), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (2060, 2078), True, 'from matplotlib import pyplot as plt\n'), ((2777, 2795), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2793, 2795), True, 'from matplotlib import pyplot as plt\n'), ((2845, 2859), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2854, 2859), True, 'from matplotlib import pyplot as plt\n'), ((4950, 4984), 'tensorflow.cast', 'tf.cast', (["data['image']", 'tf.float32'], {}), "(data['image'], tf.float32)\n", (4957, 4984), True, 'import tensorflow as tf\n'), ((5495, 5523), 'tensorflow.cast', 'tf.cast', (['data[0]', 'tf.float32'], {}), '(data[0], tf.float32)\n', (5502, 5523), True, 'import tensorflow as tf\n'), ((6959, 6983), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (6977, 6983), False, 'import os\n'), ((7044, 7064), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7058, 7064), False, 'import os\n'), ((7072, 7089), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (7083, 7089), False, 'import os\n'), ((8860, 8902), 'numpy.savez_compressed', 'np.savez_compressed', (['save_path'], {}), '(save_path, **all_data)\n', (8879, 8902), True, 'import numpy as np\n'), ((1754, 1771), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1763, 1771), True, 'import numpy as np\n'), ((1962, 1994), 'numpy.squeeze', 'np.squeeze', (['plot_images'], {'axis': '(-1)'}), '(plot_images, axis=-1)\n', (1972, 1994), True, 'import numpy as np\n'), ((2120, 2144), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n', 'n', '(i + 1)'], {}), '(n, n, i + 1)\n', (2131, 2144), True, 'from matplotlib import pyplot as plt\n'), ((2182, 2237), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': "('gray' if img.ndim == 2 else None)"}), "(img, cmap='gray' if img.ndim == 2 else None)\n", (2192, 2237), True, 'from matplotlib import pyplot as plt\n'), ((2246, 2261), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2254, 2261), True, 'from matplotlib import pyplot as plt\n'), ((5093, 5127), 'tensorflow.cast', 'tf.cast', (["data['label']", 'tf.float32'], {}), "(data['label'], tf.float32)\n", (5100, 5127), True, 'import tensorflow as tf\n'), ((5632, 5660), 'tensorflow.cast', 'tf.cast', (['data[1]', 'tf.float32'], {}), '(data[1], tf.float32)\n', (5639, 5660), True, 'import tensorflow as tf\n'), ((7223, 7248), 'os.path.isfile', 'os.path.isfile', (['save_path'], {}), '(save_path)\n', (7237, 7248), False, 'import os\n'), ((7325, 7348), 'odin.utils.md5_checksum', 'md5_checksum', (['save_path'], {}), '(save_path)\n', (7337, 7348), False, 'from odin.utils import md5_checksum, one_hot\n'), ((7431, 7451), 'os.remove', 'os.remove', (['save_path'], {}), '(save_path)\n', (7440, 7451), False, 'import os\n'), ((7483, 7501), 'numpy.load', 'np.load', (['save_path'], {}), '(save_path)\n', (7490, 7501), True, 'import numpy as np\n'), ((7895, 7916), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (7911, 7916), False, 'import os\n'), ((7936, 7964), 'os.path.join', 'os.path.join', (['path', 'basename'], {}), '(path, basename)\n', (7948, 7964), False, 'import os\n'), ((7980, 8029), 'tqdm.tqdm', 'tqdm', ([], {'desc': "('Downloading %s' % basename)", 'unit': '"""kB"""'}), "(desc='Downloading %s' % basename, unit='kB')\n", (7984, 8029), False, 'from tqdm import tqdm\n'), ((8038, 8077), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'zip_path', 'dl_progress'], {}), '(url, zip_path, dl_progress)\n', (8049, 8077), False, 'from urllib.request import urlretrieve\n'), ((2723, 2770), 'matplotlib.pyplot.title', 'plt.title', (["('%s[Mask:%s]' % (lab, m))"], {'fontsize': '(6)'}), "('%s[Mask:%s]' % (lab, m), fontsize=6)\n", (2732, 2770), True, 'from matplotlib import pyplot as plt\n'), ((5201, 5234), 'tensorflow.cast', 'tf.cast', (['(ids == label)', 'tf.float32'], {}), '(ids == label, tf.float32)\n', (5208, 5234), True, 'import tensorflow as tf\n'), ((5734, 5767), 'tensorflow.cast', 'tf.cast', (['(ids == label)', 'tf.float32'], {}), '(ids == label, tf.float32)\n', (5741, 5767), True, 'import tensorflow as tf\n'), ((8133, 8158), 'gzip.open', 'gzip.open', (['zip_path', '"""rb"""'], {}), "(zip_path, 'rb')\n", (8142, 8158), False, 'import gzip\n'), ((9356, 9398), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['images'], {}), '(images)\n', (9390, 9398), True, 'import tensorflow as tf\n'), ((9409, 9451), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['labels'], {}), '(labels)\n', (9443, 9451), True, 'import tensorflow as tf\n'), ((8583, 8617), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (8596, 8617), True, 'import numpy as np\n'), ((8750, 8784), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (8763, 8784), True, 'import numpy as np\n'), ((8804, 8821), 'odin.utils.one_hot', 'one_hot', (['data', '(10)'], {}), '(data, 10)\n', (8811, 8821), False, 'from odin.utils import md5_checksum, one_hot\n'), ((2393, 2427), 'numpy.array', 'np.array', (['labels[i]'], {'dtype': 'np.bool'}), '(labels[i], dtype=np.bool)\n', (2401, 2427), True, 'import numpy as np\n'), ((7771, 7790), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (7779, 7790), True, 'import numpy as np\n')] |
"""
Example on how to specify a color for each
individual cell or point of a Mesh.
Last example also shows the usage of addScalarBar3D().
"""
print(__doc__)
from vtkplotter import *
import numpy as np
##################################### addPointScalars
man1 = load(datadir+"man.vtk")
nv = man1.N() # nr. of vertices
scals = np.linspace(0, 1, nv) # coloring by index nr of vertex
man1.addPointScalars(scals, "mypointscalars") # add a vtkArray to mesh
# print(man1.getPointArray('mypointscalars')) # info can be retrieved this way
man1.addScalarBar() # add a default scalarbar
show(man1, at=0, N=3, axes=4, elevation=-60)
##################################### pointColors
man2 = load(datadir+"man.vtk")
scals = man2.points()[:, 1] + 37 # pick y coordinates of vertices
man2.pointColors(scals, cmap="bone", vmin=36.2, vmax=36.7) # right dark arm
man2.addScalarBar(horizontal=True)
show(man2, at=1)
##################################### cellColors
man3 = load(datadir+"man.vtk")
scals = man3.cellCenters()[:, 2] + 37 # pick z coordinates of cells
man3.cellColors(scals, cmap="afmhot")
# print(man3.getPointArray('cellColors_afmhot')) # info can be retrieved this way
# add some oriented 3D text
txt = Text("Floor temperature is 35C", s=0.1).rotateZ(90).pos(1,-0.9,-1.7)
# add a fancier 3D scalar bar embedded in the scene
man3.addScalarBar3D(pos=(-1, 0, -1.7))
show(man3, txt, at=2, interactive=1)
# N.B. in the above example one can also do:
# import matplotlib.cm as cm
# man2.pointColors(scals, cmap=cm.bone)
| [
"numpy.linspace"
] | [((329, 350), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nv'], {}), '(0, 1, nv)\n', (340, 350), True, 'import numpy as np\n')] |
import decimal
import numpy as np
from collections import deque
import torch
from config import cfg
from utils.timer import Timer
from utils.logger import logger_info
import utils.distributed as dist
from utils.distributed import sum_tensor
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 1.0 for k in topk]
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def time_string(seconds):
"""Converts time in seconds to a fixed-width string format."""
days, rem = divmod(int(seconds), 24 * 3600)
hrs, rem = divmod(rem, 3600)
mins, secs = divmod(rem, 60)
return "{0:02},{1:02}:{2:02}:{3:02}".format(days, hrs, mins, secs)
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / 1024 / 1024
def float_to_decimal(data, prec=4):
"""Convert floats to decimals which allows for fixed width json."""
if isinstance(data, dict):
return {k: float_to_decimal(v, prec) for k, v in data.items()}
if isinstance(data, float):
return decimal.Decimal(("{:." + str(prec) + "f}").format(data))
else:
return data
class ScalarMeter(object):
"""Measures a scalar value (adapted from Detectron)."""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
return np.median(self.deque)
def get_win_avg(self):
return np.mean(self.deque)
def get_global_avg(self):
return self.total / self.count
class TrainMeter(object):
"""Measures training stats."""
def __init__(self, start_epoch, num_epochs, epoch_iters):
self.epoch_iters = epoch_iters
self.max_iter = (num_epochs - start_epoch) * epoch_iters
self.iter_timer = Timer()
self.loss = ScalarMeter(cfg.solver.log_interval)
self.loss_total = 0.0
self.lr = None
self.num_samples = 0
self.max_epoch = num_epochs
self.start_epoch = start_epoch
def reset(self, timer=False):
if timer:
self.iter_timer.reset()
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.num_samples = 0
def iter_tic(self):
self.iter_timer.tic()
def iter_toc(self):
self.iter_timer.toc()
def update_stats(self, loss, lr, mb_size):
self.loss.add_value(loss)
self.lr = lr
self.loss_total += loss * mb_size
self.num_samples += mb_size
def get_iter_stats(self, cur_epoch, cur_iter):
cur_iter_total = (cur_epoch - self.start_epoch) * self.epoch_iters + cur_iter + 1
eta_sec = self.iter_timer.average_time * (self.max_iter - cur_iter_total)
mem_usage = gpu_mem_usage()
stats = {
"epoch": "{}/{}".format(cur_epoch + 1, self.max_epoch),
"iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
"time_avg": self.iter_timer.average_time,
"eta": time_string(eta_sec),
"loss": self.loss.get_win_avg(),
"lr": self.lr,
"mem": int(np.ceil(mem_usage)),
}
return stats
def log_iter_stats(self, cur_epoch, cur_iter):
if (cur_iter + 1) % cfg.solver.log_interval != 0:
return
stats = self.get_iter_stats(cur_epoch, cur_iter)
info = "Epoch: {:s}, Iter: {:s}, loss: {:.4f}, lr: {:s}, time_avg: {:.4f}, eta: {:s}, mem: {:d}".format(\
stats["epoch"], stats["iter"], stats["loss"], stats["lr"], stats["time_avg"], stats["eta"], stats["mem"])
logger_info(info)
class TestMeter(object):
def __init__(self):
self.num_top1 = 0
self.num_top5 = 0
self.num_samples = 0
def reset(self):
self.num_top1 = 0
self.num_top5 = 0
self.num_samples = 0
def update_stats(self, num_top1, num_top5, mb_size):
self.num_top1 += num_top1
self.num_top5 += num_top5
self.num_samples += mb_size
def log_iter_stats(self, cur_epoch):
if cfg.distributed:
tensor_reduce = torch.tensor([self.num_top1 * 1.0, self.num_top5 * 1.0, self.num_samples * 1.0], device="cuda")
tensor_reduce = sum_tensor(tensor_reduce)
tensor_reduce = tensor_reduce.data.cpu().numpy()
num_top1 = tensor_reduce[0]
num_top5 = tensor_reduce[1]
num_samples = tensor_reduce[2]
else:
num_top1 = self.num_top1
num_top5 = self.num_top5
num_samples = self.num_samples
top1_acc = num_top1 * 1.0 / num_samples
top5_acc = num_top5 * 1.0 / num_samples
info = "Epoch: {:d}, top1_acc = {:.2%}, top5_acc = {:.2%} in {:d}".format(cur_epoch + 1, top1_acc, top5_acc, int(num_samples))
logger_info(info)
return top1_acc, top5_acc
| [
"numpy.mean",
"numpy.ceil",
"numpy.median",
"collections.deque",
"utils.timer.Timer",
"utils.logger.logger_info",
"torch.tensor",
"torch.cuda.max_memory_allocated",
"utils.distributed.sum_tensor"
] | [((1405, 1438), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (1436, 1438), False, 'import torch\n'), ((1972, 1997), 'collections.deque', 'deque', ([], {'maxlen': 'window_size'}), '(maxlen=window_size)\n', (1977, 1997), False, 'from collections import deque\n'), ((2307, 2328), 'numpy.median', 'np.median', (['self.deque'], {}), '(self.deque)\n', (2316, 2328), True, 'import numpy as np\n'), ((2372, 2391), 'numpy.mean', 'np.mean', (['self.deque'], {}), '(self.deque)\n', (2379, 2391), True, 'import numpy as np\n'), ((2717, 2724), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (2722, 2724), False, 'from utils.timer import Timer\n'), ((4513, 4530), 'utils.logger.logger_info', 'logger_info', (['info'], {}), '(info)\n', (4524, 4530), False, 'from utils.logger import logger_info\n'), ((5731, 5748), 'utils.logger.logger_info', 'logger_info', (['info'], {}), '(info)\n', (5742, 5748), False, 'from utils.logger import logger_info\n'), ((5025, 5125), 'torch.tensor', 'torch.tensor', (['[self.num_top1 * 1.0, self.num_top5 * 1.0, self.num_samples * 1.0]'], {'device': '"""cuda"""'}), "([self.num_top1 * 1.0, self.num_top5 * 1.0, self.num_samples * \n 1.0], device='cuda')\n", (5037, 5125), False, 'import torch\n'), ((5149, 5174), 'utils.distributed.sum_tensor', 'sum_tensor', (['tensor_reduce'], {}), '(tensor_reduce)\n', (5159, 5174), False, 'from utils.distributed import sum_tensor\n'), ((4035, 4053), 'numpy.ceil', 'np.ceil', (['mem_usage'], {}), '(mem_usage)\n', (4042, 4053), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import random
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from itertools import chain
from sklearn.metrics import accuracy_score
from utils import DataInput
class deepFM_tf1:
def __init__(self,parameters):
super().__init__()
#col names used in input dataset
self.fm_cols=parameters['fm_cols']
#label name used in input dataset
self.label_name=parameters['label_name']
#embedding dimension
self.fm_emb_dim=parameters['fm_emb_dim']
#hidden layers structure
self.hidden_units=parameters['hidden_units']
#dropout probability
self.dropprob=parameters['dropprob']
#batch_size
self.batch_size=parameters['batch_size']
#epoch_size
self.epoch_size=parameters['epoch_size']
#learning_rate
self.lr=parameters['learning_rate']
def build_graph(self):
graph=tf.Graph()
with graph.as_default():
with tf.name_scope('ModelInput'):
self.fm_col_vals=tf.placeholder(dtype=tf.float32,shape=[self.batch_size,len(self.fm_cols)],name='features')
self.labels=tf.placeholder(dtype=tf.float32,shape=[self.batch_size,1],name='labels')
self.training=tf.placeholder(dtype=tf.bool,shape=[],name='training_flag')
with tf.name_scope('Embedding'):
self.fm_emb=tf.Variable(tf.random.normal([len(self.fm_cols),self.fm_emb_dim],0,0.01),
name='fm_embed_matrix')
for i in range(len(self.fm_cols)):
fm_col_emb=tf.tile(tf.gather(self.fm_emb,[i],axis=0),[self.batch_size,1]) #[B,H]
fm_col_emb=fm_col_emb*tf.expand_dims(self.fm_col_vals[:,i],axis=1) #[B,H]
if i==0:
fm_col_embs=tf.expand_dims(fm_col_emb,axis=1) #[B,1,H]
else:
fm_col_embs=tf.concat([fm_col_embs,tf.expand_dims(fm_col_emb,axis=1)],axis=1)
with tf.name_scope('LowOrder'):
summed_ft_emb=tf.reduce_sum(fm_col_embs,axis=1) #[B,H]
summed_ft_emb_square=tf.square(summed_ft_emb) #[B,H]
squared_ft_emb=tf.square(fm_col_embs) #[B,F,H]
squared_ft_emb_sum = tf.reduce_sum(squared_ft_emb, axis=1) # [B,H]
second_orders=0.5*tf.subtract(summed_ft_emb_square,squared_ft_emb_sum) # [B,H]
with tf.name_scope('HighOrder'):
self.hidden_layers=[]
for i,unit in enumerate(self.hidden_units):
self.hidden_layers+=[
tf.keras.layers.Dense(unit,activation=tf.nn.relu,name='dnn_layer_%d'%i),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(rate=self.dropprob)
]
high_orders=tf.reshape(fm_col_embs,[-1,len(self.fm_cols)*self.fm_emb_dim])
for i in range(len(self.hidden_layers)//3):
high_orders=self.hidden_layers[3*i](high_orders)
high_orders = self.hidden_layers[3*i+1](high_orders)
high_orders = self.hidden_layers[3*i+2](high_orders,training=self.training)
with tf.name_scope('ModelOutput'):
self.final_bn=tf.keras.layers.BatchNormalization()
self.final_do=tf.keras.layers.Dropout(rate=self.dropprob)
self.final_output_logits=tf.keras.layers.Dense(1,activation=None,name='output_layer')
all_i=tf.concat([self.fm_col_vals,second_orders,high_orders],axis=1)
all_i=self.final_bn(all_i)
all_i=self.final_do(all_i)
output_logits=self.final_output_logits(all_i)
self.output_prob=1/(1+tf.exp(-output_logits))
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=output_logits, labels=self.labels)
)
# Optimizer
with tf.name_scope('Optimizer'):
self.opt = tf.train.GradientDescentOptimizer(self.lr)
self.update = self.opt.minimize(self.loss)
return graph
def train(self,train_points,eval_points,epoch_num,ob_step=5,save_path=None,load_path=None): #[B,T,H]
if load_path is None:
self.graph=self.build_graph()
else:
self.graph=tf.Graph()
with self.graph.as_default():
with tf.Session() as sess:
if load_path is None:
saver=tf.train.Saver()
tf.initialize_all_variables().run()
else:
saver=tf.train.import_meta_graph(load_path+'.meta')
saver.restore(sess,load_path)
# get weights and ops
self.fm_col_vals=self.graph.get_operation_by_name("ModelInput/features").outputs[0]
self.labels=self.graph.get_tensor_by_name("ModelInput/labels:0")
self.training=self.graph.get_tensor_by_name("ModelInput/training_flag:0")
self.update=self.graph.get_operation_by_name("Optimizer/GradientDescent")
self.loss=self.graph.get_operation_by_name("Loss/Mean").outputs[0]
self.output_prob = self.graph.get_operation_by_name("ModelOutput/truediv").outputs[0]
### 训练
print('Start Training')
step=0
cnt=0
metric_train_loss=0
train_preds=[]
train_labels=[]
for ep in range(epoch_num):
print("############## epoch %d###############" % ep)
random.shuffle(train_points)
for batch_id,(ft,labels) in DataInput(train_points,self.batch_size,self.fm_cols,self.label_name):
feed_vals={}
feed_vals[self.fm_col_vals]=ft
feed_vals[self.labels]=np.expand_dims(np.array(labels),axis=1)
feed_vals[self.training]=True
_, l, predictions = sess.run(
[self.update, self.loss, self.output_prob],
feed_dict=feed_vals
)
metric_train_loss+=l*len(labels)
cnt+=len(labels)
train_preds.append(predictions)
train_labels.append(labels)
#if ob_step steps have passed,we print current training result
if step>0 and step%ob_step==0:
accuracy = self.accuracy(np.concatenate(train_preds,axis=0),
np.expand_dims(np.array(list(chain(*train_labels))),axis=1))
print('Minibatch loss at step %d: %f' % (step, metric_train_loss/cnt))
print('Minibatch accuracy: %.1f%%\n' % (100*accuracy))
train_preds=[]
train_labels=[]
metric_train_loss=0
cnt=0
###
#if one epoch finishes, we start evaluation on test set
if step==self.epoch_size-1:
step=0
eval_cnt=0
eval_loss=0
eval_preds=[]
eval_labels=[]
for batch_id,(ft,labels) in DataInput(eval_points,self.batch_size,self.fm_cols,self.label_name):
feed_vals={}
feed_vals[self.fm_col_vals]=ft
feed_vals[self.training]=False
feed_vals[self.labels]=np.expand_dims(np.array(labels),axis=1)
l,predictions = sess.run([self.loss,self.output_prob],feed_dict=feed_vals)
eval_loss+=l*len(labels)
cnt+=len(labels)
eval_preds.append(predictions)
eval_labels.append(labels)
accuracy = self.accuracy(np.concatenate(eval_preds,axis=0),
np.expand_dims(np.array(list(chain(*eval_labels))),axis=1))
print('DEV_SET loss at step %d: %f' % (step, eval_loss/cnt))
print('DEV_SET accuracy: %.1f%%\n' % (100*accuracy))
else:
step+=1
#保存
if save_path is not None:
saver.save(sess,save_path)
# make predictions on new data
# existed model would be loaded and used to predict new data
def predict(self, data_points, load_path):
res=[]
self.graph=tf.Graph()
with self.graph.as_default():
with tf.Session() as sess:
saver=tf.train.import_meta_graph(load_path+'.meta')
saver.restore(sess,load_path)
# get weights and ops
graph=tf.get_default_graph()
# get weights and ops
self.fm_col_vals=self.graph.get_operation_by_name("ModelInput/features").outputs[0]
self.training=self.graph.get_tensor_by_name("ModelInput/training_flag:0")
self.output_prob = self.graph.get_operation_by_name("ModelOutput/truediv").outputs[0]
###
print('Start Predict.')
for batch_id,(ft,labels) in DataInput(data_points, self.batch_size,self.fm_cols,self.label_name):
feed_vals={}
feed_vals[self.fm_col_vals]=ft
feed_vals[self.training]=False
predictions = sess.run(self.output_prob,feed_dict=feed_vals)
res.append(predictions)
return np.concatenate(res)
def get_graph(self):
return self.graph
def print_graph(self):
tensor_name_list = [tensor.name for tensor in self.graph.as_graph_def().node]
for tensor_name in tensor_name_list:
print(tensor_name,'\n')
def accuracy(self, predictions, labels, need_confusion_matrix=False):
_predictions = np.where(predictions>0.5, 1,0)
return accuracy_score(_predictions,labels) | [
"itertools.chain",
"tensorflow.compat.v1.exp",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.array",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.subtract",
... | [((91, 115), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (113, 115), True, 'import tensorflow.compat.v1 as tf\n'), ((1074, 1084), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (1082, 1084), True, 'import tensorflow.compat.v1 as tf\n'), ((10053, 10063), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (10061, 10063), True, 'import tensorflow.compat.v1 as tf\n'), ((11221, 11240), 'numpy.concatenate', 'np.concatenate', (['res'], {}), '(res)\n', (11235, 11240), True, 'import numpy as np\n'), ((11633, 11666), 'numpy.where', 'np.where', (['(predictions > 0.5)', '(1)', '(0)'], {}), '(predictions > 0.5, 1, 0)\n', (11641, 11666), True, 'import numpy as np\n'), ((11682, 11718), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['_predictions', 'labels'], {}), '(_predictions, labels)\n', (11696, 11718), False, 'from sklearn.metrics import accuracy_score\n'), ((4936, 4946), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (4944, 4946), True, 'import tensorflow.compat.v1 as tf\n'), ((1137, 1164), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""ModelInput"""'], {}), "('ModelInput')\n", (1150, 1164), True, 'import tensorflow.compat.v1 as tf\n'), ((1320, 1395), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[self.batch_size, 1]', 'name': '"""labels"""'}), "(dtype=tf.float32, shape=[self.batch_size, 1], name='labels')\n", (1334, 1395), True, 'import tensorflow.compat.v1 as tf\n'), ((1424, 1485), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.bool', 'shape': '[]', 'name': '"""training_flag"""'}), "(dtype=tf.bool, shape=[], name='training_flag')\n", (1438, 1485), True, 'import tensorflow.compat.v1 as tf\n'), ((1508, 1534), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""Embedding"""'], {}), "('Embedding')\n", (1521, 1534), True, 'import tensorflow.compat.v1 as tf\n'), ((2239, 2264), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""LowOrder"""'], {}), "('LowOrder')\n", (2252, 2264), True, 'import tensorflow.compat.v1 as tf\n'), ((2297, 2331), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['fm_col_embs'], {'axis': '(1)'}), '(fm_col_embs, axis=1)\n', (2310, 2331), True, 'import tensorflow.compat.v1 as tf\n'), ((2376, 2400), 'tensorflow.compat.v1.square', 'tf.square', (['summed_ft_emb'], {}), '(summed_ft_emb)\n', (2385, 2400), True, 'import tensorflow.compat.v1 as tf\n'), ((2446, 2468), 'tensorflow.compat.v1.square', 'tf.square', (['fm_col_embs'], {}), '(fm_col_embs)\n', (2455, 2468), True, 'import tensorflow.compat.v1 as tf\n'), ((2516, 2553), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['squared_ft_emb'], {'axis': '(1)'}), '(squared_ft_emb, axis=1)\n', (2529, 2553), True, 'import tensorflow.compat.v1 as tf\n'), ((2689, 2715), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""HighOrder"""'], {}), "('HighOrder')\n", (2702, 2715), True, 'import tensorflow.compat.v1 as tf\n'), ((3571, 3599), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""ModelOutput"""'], {}), "('ModelOutput')\n", (3584, 3599), True, 'import tensorflow.compat.v1 as tf\n'), ((3632, 3668), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3666, 3668), True, 'import tensorflow.compat.v1 as tf\n'), ((3700, 3743), 'tensorflow.compat.v1.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'self.dropprob'}), '(rate=self.dropprob)\n', (3723, 3743), True, 'import tensorflow.compat.v1 as tf\n'), ((3786, 3848), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'None', 'name': '"""output_layer"""'}), "(1, activation=None, name='output_layer')\n", (3807, 3848), True, 'import tensorflow.compat.v1 as tf\n'), ((3888, 3953), 'tensorflow.compat.v1.concat', 'tf.concat', (['[self.fm_col_vals, second_orders, high_orders]'], {'axis': '(1)'}), '([self.fm_col_vals, second_orders, high_orders], axis=1)\n', (3897, 3953), True, 'import tensorflow.compat.v1 as tf\n'), ((4211, 4232), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""Loss"""'], {}), "('Loss')\n", (4224, 4232), True, 'import tensorflow.compat.v1 as tf\n'), ((4480, 4506), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""Optimizer"""'], {}), "('Optimizer')\n", (4493, 4506), True, 'import tensorflow.compat.v1 as tf\n'), ((4536, 4578), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.lr'], {}), '(self.lr)\n', (4569, 4578), True, 'import tensorflow.compat.v1 as tf\n'), ((5018, 5030), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (5028, 5030), True, 'import tensorflow.compat.v1 as tf\n'), ((10121, 10133), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (10131, 10133), True, 'import tensorflow.compat.v1 as tf\n'), ((10180, 10227), 'tensorflow.compat.v1.train.import_meta_graph', 'tf.train.import_meta_graph', (["(load_path + '.meta')"], {}), "(load_path + '.meta')\n", (10206, 10227), True, 'import tensorflow.compat.v1 as tf\n'), ((10354, 10376), 'tensorflow.compat.v1.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (10374, 10376), True, 'import tensorflow.compat.v1 as tf\n'), ((10846, 10916), 'utils.DataInput', 'DataInput', (['data_points', 'self.batch_size', 'self.fm_cols', 'self.label_name'], {}), '(data_points, self.batch_size, self.fm_cols, self.label_name)\n', (10855, 10916), False, 'from utils import DataInput\n'), ((2604, 2657), 'tensorflow.compat.v1.subtract', 'tf.subtract', (['summed_ft_emb_square', 'squared_ft_emb_sum'], {}), '(summed_ft_emb_square, squared_ft_emb_sum)\n', (2615, 2657), True, 'import tensorflow.compat.v1 as tf\n'), ((4318, 4404), 'tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'output_logits', 'labels': 'self.labels'}), '(logits=output_logits, labels=self.\n labels)\n', (4357, 4404), True, 'import tensorflow.compat.v1 as tf\n'), ((5106, 5122), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5120, 5122), True, 'import tensorflow.compat.v1 as tf\n'), ((5230, 5277), 'tensorflow.compat.v1.train.import_meta_graph', 'tf.train.import_meta_graph', (["(load_path + '.meta')"], {}), "(load_path + '.meta')\n", (5256, 5277), True, 'import tensorflow.compat.v1 as tf\n'), ((6348, 6376), 'random.shuffle', 'random.shuffle', (['train_points'], {}), '(train_points)\n', (6362, 6376), False, 'import random\n'), ((6426, 6497), 'utils.DataInput', 'DataInput', (['train_points', 'self.batch_size', 'self.fm_cols', 'self.label_name'], {}), '(train_points, self.batch_size, self.fm_cols, self.label_name)\n', (6435, 6497), False, 'from utils import DataInput\n'), ((1810, 1845), 'tensorflow.compat.v1.gather', 'tf.gather', (['self.fm_emb', '[i]'], {'axis': '(0)'}), '(self.fm_emb, [i], axis=0)\n', (1819, 1845), True, 'import tensorflow.compat.v1 as tf\n'), ((1915, 1961), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['self.fm_col_vals[:, i]'], {'axis': '(1)'}), '(self.fm_col_vals[:, i], axis=1)\n', (1929, 1961), True, 'import tensorflow.compat.v1 as tf\n'), ((2034, 2068), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['fm_col_emb'], {'axis': '(1)'}), '(fm_col_emb, axis=1)\n', (2048, 2068), True, 'import tensorflow.compat.v1 as tf\n'), ((2885, 2960), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', (['unit'], {'activation': 'tf.nn.relu', 'name': "('dnn_layer_%d' % i)"}), "(unit, activation=tf.nn.relu, name='dnn_layer_%d' % i)\n", (2906, 2960), True, 'import tensorflow.compat.v1 as tf\n'), ((2983, 3019), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3017, 3019), True, 'import tensorflow.compat.v1 as tf\n'), ((3046, 3089), 'tensorflow.compat.v1.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'self.dropprob'}), '(rate=self.dropprob)\n', (3069, 3089), True, 'import tensorflow.compat.v1 as tf\n'), ((4151, 4173), 'tensorflow.compat.v1.exp', 'tf.exp', (['(-output_logits)'], {}), '(-output_logits)\n', (4157, 4173), True, 'import tensorflow.compat.v1 as tf\n'), ((5144, 5173), 'tensorflow.compat.v1.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5171, 5173), True, 'import tensorflow.compat.v1 as tf\n'), ((6679, 6695), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (6687, 6695), True, 'import numpy as np\n'), ((8510, 8580), 'utils.DataInput', 'DataInput', (['eval_points', 'self.batch_size', 'self.fm_cols', 'self.label_name'], {}), '(eval_points, self.batch_size, self.fm_cols, self.label_name)\n', (8519, 8580), False, 'from utils import DataInput\n'), ((2164, 2198), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['fm_col_emb'], {'axis': '(1)'}), '(fm_col_emb, axis=1)\n', (2178, 2198), True, 'import tensorflow.compat.v1 as tf\n'), ((7461, 7496), 'numpy.concatenate', 'np.concatenate', (['train_preds'], {'axis': '(0)'}), '(train_preds, axis=0)\n', (7475, 7496), True, 'import numpy as np\n'), ((9277, 9311), 'numpy.concatenate', 'np.concatenate', (['eval_preds'], {'axis': '(0)'}), '(eval_preds, axis=0)\n', (9291, 9311), True, 'import numpy as np\n'), ((8824, 8840), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8832, 8840), True, 'import numpy as np\n'), ((7581, 7601), 'itertools.chain', 'chain', (['*train_labels'], {}), '(*train_labels)\n', (7586, 7601), False, 'from itertools import chain\n'), ((9396, 9415), 'itertools.chain', 'chain', (['*eval_labels'], {}), '(*eval_labels)\n', (9401, 9415), False, 'from itertools import chain\n')] |
import numpy as np
from silx.gui.plot import PlotWidget, PlotWindow
from silx.gui.plot.ComplexImageView import ComplexImageView
from silx.gui import qt
import nmutils
import scipy.interpolate
class ProbeManager(object):
"""
Class which coordinates the various probe plots and widgets, propagates and so on.
"""
def __init__(self, ui):
self.ui = ui
self.ui.focusButton.clicked.connect(self.autofocus)
self.ui.propagateButton.clicked.connect(self.propagate)
self.ui.focusSlider.valueChanged.connect(self.updatePlane)
label = 'micrometers'
self.ui.probePlot.getPlot().setGraphYLabel(label)
self.ui.probePlot.getPlot().setGraphXLabel(label)
self.ui.probePlot2.getPlot().setGraphYLabel(label)
self.ui.probePlot2.getPlot().setGraphXLabel(label)
self.ui.probePlot.setGraphTitle('Plane of interest')
self.ui.probePlot2.setGraphTitle('Sample plane')
self.ui.probeHist.setGraphTitle('Probe histogram')
self.ui.probeHist.setGraphXLabel('micrometers')
self.ui.probeHist.setGraphYLabel(' ')
self.ui.probeHist.chooserMenu.currentIndexChanged.connect(self.updatePlane)
self.ui.verticalFocusView.setGraphTitle('Vertical focus (M1)')
self.ui.horizontalFocusView.setGraphTitle('Horizontal focus (M2)')
def set_data(self, probe, psize, energy):
self.psize = psize
self.probe2d = probe
self.energy = energy
self.ui.probePlot.setScale(psize * 1e6)
self.ui.probePlot2.setScale(psize * 1e6)
self.ui.probePlot2.set_data(self.probe2d)
self.ui.verticalFocusView.addXMarker(0., legend='sample', text='sample', color='g')
self.ui.horizontalFocusView.addXMarker(0., legend='sample', text='', color='g')
lims = [-1e6*probe.shape[0] * psize / 2, 1e6*probe.shape[0] * psize / 2] # um
self.xtrans = np.linspace(lims[0], lims[1], probe.shape[0])
self.propagate()
def calculateFWHM(self):
z = self.ui.focusSlider.value()
zslice = np.argmin(np.abs(self.zdist*1e6 - z))
data = self.probe3d[zslice]
yh = np.sum(np.abs(data)**2, axis=0)
yv = np.sum(np.abs(data)**2, axis=1)
edges = scipy.interpolate.UnivariateSpline(self.xtrans, yh-yh.max()/2).roots()
fwhmh = abs(edges[0] - edges[-1])
edges = scipy.interpolate.UnivariateSpline(self.xtrans, yv-yv.max()/2).roots()
fwhmv = abs(edges[0] - edges[-1])
x0, x1 = self.ui.probeHist.getXAxis().getLimits()
y0, y1 = self.ui.probeHist.getYAxis().getLimits()
self.ui.probeHist.addMarker(x0, y0 + .9 * (y1 - y0), legend='fwhm_h',
text='FWHM\n %.0f x %.0f nm\n (v x h)' % (fwhmv*1000, fwhmh*1000),
color='k', selectable=True, draggable=True, symbol=',')
def updatePlane(self):
self.ui.probeHist.addMarker(0, 0, legend='fwhm_h', text=' ', color='w')
z = self.ui.focusSlider.value()
try:
# plane of interest image
zslice = np.argmin(np.abs(self.zdist*1e6 - z))
data = self.probe3d[zslice]
zz = self.zdist[zslice]*1e6
self.ui.probePlot.set_data(data)
self.ui.verticalFocusView.addXMarker(zz, legend='zslice',
text='\n\n %d um'%int(np.round(zz)), color='m')
self.ui.horizontalFocusView.addXMarker(zz, legend='zslice',
text='', color='m')
# histograms
if self.ui.probeHist.chooserMenu.currentIndex() == 0:
# histogram at plane of interest
intensity = np.abs(data)**2
yh = np.sum(intensity, axis=0)
yv = np.sum(intensity, axis=1)
else:
# histogram at reconstruction plane
intensity = np.abs(self.probe2d)**2
yh = np.sum(intensity, axis=0)
yv = np.sum(intensity, axis=1)
self.ui.probeHist.addCurve(self.xtrans, yh, legend='horizontal')
self.ui.probeHist.addCurve(self.xtrans, yv, legend='vertical')
except AttributeError:
pass # no data yet
def propagate(self):
# get the parameters
nn = self.ui.numberBox.value()
fw = self.ui.forwardBox.value()
bw = self.ui.backwardBox.value()
# update the range
self.ui.focusSlider.setMaximum(fw)
self.ui.focusSlider.setMinimum(-bw)
# define distances and propagate
self.zdist = np.linspace(-bw, fw, nn) * 1e-6
dist = self.zdist
dx = dist[1] - dist[0]
print("propagating to %d positions separated by %.1f um..."\
% (len(dist), dx*1e6))
self.probe3d = nmutils.utils.propagateNearfield(self.probe2d, self.psize, -dist, self.energy)
# get intensities and focii
power3d = np.abs(self.probe3d)**2
power_vertical = np.sum(power3d, axis=2).T
power_horizontal = np.sum(power3d, axis=1).T
focus_vertical_ind = np.argmax(np.sum(power_vertical**2, axis=0))
focus_vertical_x = dist[focus_vertical_ind]
focus_horizontal_ind = np.argmax(np.sum(power_horizontal**2, axis=0))
focus_horizontal_x = dist[focus_horizontal_ind]
# show top and side views
scale = [(self.zdist[1]-self.zdist[0])*1e6, self.psize*1e6]
origin = [-bw, -self.psize*self.probe2d.shape[0]/2.0*1e6]
self.ui.verticalFocusView.addImage(power_vertical, replace=True,
xlabel='beamline z axis (micrometers)', ylabel='micrometers', scale=scale, origin=origin)
self.ui.horizontalFocusView.addImage(power_horizontal, replace=True,
xlabel='beamline z axis (micrometers)', ylabel='micrometers', scale=scale, origin=origin)
# indicate vertical and horizontal foci
y = self.ui.verticalFocusView.getYAxis().getLimits()
self.ui.verticalFocusView.addXMarker(focus_vertical_x*1e6, legend='local', text='\n %d um'%int(np.round(focus_vertical_x*1e6)), color='c')
self.ui.horizontalFocusView.addXMarker(focus_horizontal_x*1e6, legend='local', text='\n %d um'%int(np.round(focus_horizontal_x*1e6)), color='c')
# autofocus
focus_ind = np.argmax(np.sum(power3d**2, axis=(1,2)))
self.realFocus = dist[focus_ind] * 1e6
self.autofocus()
def autofocus(self):
try:
self.ui.focusSlider.setValue(int(np.round(self.realFocus)))
self.updatePlane()
self.calculateFWHM()
except AttributeError:
pass # no data yet
class PropagationView(PlotWidget):
"""
Bare bones plot widget for side views of the propagated probe
"""
def __init__(self, parent=None):
super(PropagationView, self).__init__(parent=parent)
class ProbeView(ComplexImageView):
"""
Complex probe in 2d
"""
def __init__(self, parent=None):
super(ProbeView, self).__init__(parent=parent)
self.setComplexMode(self.Mode.LOG10_AMPLITUDE_PHASE)
self.setKeepDataAspectRatio(True)
self.getPlot().getColorBarWidget().setVisible(False)
# add a phase shift number
self.phaseShiftBox = qt.QDoubleSpinBox(toolTip='Phase shift everything')
self.phaseShiftBox.setRange(-3.14, 3.14)
self.phaseShiftBox.setSingleStep(.1)
self.phaseShiftBox.setValue(0.)
self.phaseShiftBox.setPrefix('phase shift: ')
self.phaseShiftBox.valueChanged.connect(self._update)
self.getPlot().toolBar().addWidget(self.phaseShiftBox)
def set_data(self, data):
self.data = data
self._update()
def _update(self):
shift = self.phaseShiftBox.value()
shifted = np.exp(1j * shift) * self.data
self.setData(shifted, copy=False)
class Histogram(PlotWindow):
def __init__(self, parent=None):
super(Histogram, self).__init__(parent=parent,
resetzoom=True, autoScale=True,
logScale=True, grid=True,
curveStyle=True, colormap=False,
aspectRatio=False, yInverted=False,
copy=True, save=True, print_=True,
control=True, position=True,
roi=False, mask=False, fit=False)
# add a POI/reconstruction chooser
self.chooserToolbar = self.addToolBar('Interpolation')
self.chooserMenu = qt.QComboBox(
toolTip='Choose whether to display probe at the plane of interest of in the sample plane')
self.chooserMenu.insertItems(1, ['Plane of interest', 'Sample plane'])
self.chooserToolbar.addWidget(self.chooserMenu)
| [
"numpy.abs",
"silx.gui.qt.QDoubleSpinBox",
"numpy.exp",
"numpy.sum",
"numpy.linspace",
"nmutils.utils.propagateNearfield",
"silx.gui.qt.QComboBox",
"numpy.round"
] | [((1905, 1950), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', 'probe.shape[0]'], {}), '(lims[0], lims[1], probe.shape[0])\n', (1916, 1950), True, 'import numpy as np\n'), ((4747, 4825), 'nmutils.utils.propagateNearfield', 'nmutils.utils.propagateNearfield', (['self.probe2d', 'self.psize', '(-dist)', 'self.energy'], {}), '(self.probe2d, self.psize, -dist, self.energy)\n', (4779, 4825), False, 'import nmutils\n'), ((7207, 7258), 'silx.gui.qt.QDoubleSpinBox', 'qt.QDoubleSpinBox', ([], {'toolTip': '"""Phase shift everything"""'}), "(toolTip='Phase shift everything')\n", (7224, 7258), False, 'from silx.gui import qt\n'), ((8569, 8682), 'silx.gui.qt.QComboBox', 'qt.QComboBox', ([], {'toolTip': '"""Choose whether to display probe at the plane of interest of in the sample plane"""'}), "(toolTip=\n 'Choose whether to display probe at the plane of interest of in the sample plane'\n )\n", (8581, 8682), False, 'from silx.gui import qt\n'), ((2073, 2107), 'numpy.abs', 'np.abs', (['(self.zdist * 1000000.0 - z)'], {}), '(self.zdist * 1000000.0 - z)\n', (2079, 2107), True, 'import numpy as np\n'), ((4531, 4555), 'numpy.linspace', 'np.linspace', (['(-bw)', 'fw', 'nn'], {}), '(-bw, fw, nn)\n', (4542, 4555), True, 'import numpy as np\n'), ((4881, 4901), 'numpy.abs', 'np.abs', (['self.probe3d'], {}), '(self.probe3d)\n', (4887, 4901), True, 'import numpy as np\n'), ((4930, 4953), 'numpy.sum', 'np.sum', (['power3d'], {'axis': '(2)'}), '(power3d, axis=2)\n', (4936, 4953), True, 'import numpy as np\n'), ((4983, 5006), 'numpy.sum', 'np.sum', (['power3d'], {'axis': '(1)'}), '(power3d, axis=1)\n', (4989, 5006), True, 'import numpy as np\n'), ((5048, 5083), 'numpy.sum', 'np.sum', (['(power_vertical ** 2)'], {'axis': '(0)'}), '(power_vertical ** 2, axis=0)\n', (5054, 5083), True, 'import numpy as np\n'), ((5176, 5213), 'numpy.sum', 'np.sum', (['(power_horizontal ** 2)'], {'axis': '(0)'}), '(power_horizontal ** 2, axis=0)\n', (5182, 5213), True, 'import numpy as np\n'), ((6253, 6286), 'numpy.sum', 'np.sum', (['(power3d ** 2)'], {'axis': '(1, 2)'}), '(power3d ** 2, axis=(1, 2))\n', (6259, 6286), True, 'import numpy as np\n'), ((7736, 7756), 'numpy.exp', 'np.exp', (['(1.0j * shift)'], {}), '(1.0j * shift)\n', (7742, 7756), True, 'import numpy as np\n'), ((2157, 2169), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (2163, 2169), True, 'import numpy as np\n'), ((2202, 2214), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (2208, 2214), True, 'import numpy as np\n'), ((3058, 3092), 'numpy.abs', 'np.abs', (['(self.zdist * 1000000.0 - z)'], {}), '(self.zdist * 1000000.0 - z)\n', (3064, 3092), True, 'import numpy as np\n'), ((3675, 3700), 'numpy.sum', 'np.sum', (['intensity'], {'axis': '(0)'}), '(intensity, axis=0)\n', (3681, 3700), True, 'import numpy as np\n'), ((3722, 3747), 'numpy.sum', 'np.sum', (['intensity'], {'axis': '(1)'}), '(intensity, axis=1)\n', (3728, 3747), True, 'import numpy as np\n'), ((3891, 3916), 'numpy.sum', 'np.sum', (['intensity'], {'axis': '(0)'}), '(intensity, axis=0)\n', (3897, 3916), True, 'import numpy as np\n'), ((3938, 3963), 'numpy.sum', 'np.sum', (['intensity'], {'axis': '(1)'}), '(intensity, axis=1)\n', (3944, 3963), True, 'import numpy as np\n'), ((3638, 3650), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (3644, 3650), True, 'import numpy as np\n'), ((3846, 3866), 'numpy.abs', 'np.abs', (['self.probe2d'], {}), '(self.probe2d)\n', (3852, 3866), True, 'import numpy as np\n'), ((6441, 6465), 'numpy.round', 'np.round', (['self.realFocus'], {}), '(self.realFocus)\n', (6449, 6465), True, 'import numpy as np\n'), ((6005, 6043), 'numpy.round', 'np.round', (['(focus_vertical_x * 1000000.0)'], {}), '(focus_vertical_x * 1000000.0)\n', (6013, 6043), True, 'import numpy as np\n'), ((6156, 6196), 'numpy.round', 'np.round', (['(focus_horizontal_x * 1000000.0)'], {}), '(focus_horizontal_x * 1000000.0)\n', (6164, 6196), True, 'import numpy as np\n'), ((3327, 3339), 'numpy.round', 'np.round', (['zz'], {}), '(zz)\n', (3335, 3339), True, 'import numpy as np\n')] |
import argparse
import json
from mimic import log
from mimic.utils.BaseFlags import parser as parser
from mimic.utils.filehandling import expand_paths
import os
import numpy as np
from typing import Union
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser.add_argument('--exp_str_prefix', type=str, default='Mimic', help="prefix of the experiment directory.")
parser.add_argument('--dataset', type=str, default='Mimic', help="name of the dataset")
parser.add_argument('--config_path', type=str, default=None, help="path to the json config")
parser.add_argument('--verbose', type=int, default=0, help="global verbosity level")
parser.add_argument('--load_flags', type=str, default=None, help="overwrite all values with parameters from an old "
"experiment. Give the path to the flags.rar "
"file as input.")
# Image dependent
parser.add_argument('--fixed_image_extractor', type=str2bool, default=True,
help="If the feature extraction layers of the "
"pretrained densenet are frozen. "
"Only works when img_clf_type classifier "
"is densenet.")
# DATA DEPENDENT
parser.add_argument('--only_text_modality', type=str2bool, default=False,
help="flag to indicate if only the text modality is to be used")
parser.add_argument('--undersample_dataset', type=str2bool, default=False,
help="flag to indicate if the dataset should be undersampled such that there are "
"the same number of datapoints that have no label than datapoints that have a label")
parser.add_argument('--weighted_sampler', type=str2bool, default=False,
help="If a weighted sampler should be used for the dataloader.")
parser.add_argument('--binary_labels', type=str2bool, default=False,
help="If True, label 'Finding' with classes 0 and 1 will be used for the classification evaluation.")
# Text Dependent
parser.add_argument('--text_encoding', type=str, default='char',
help="encoding of the text, either character or wordwise")
parser.add_argument('--len_sequence', type=int, default=1024, help="length of sequence")
parser.add_argument('--word_min_occ', type=int, default=3,
help="min occurence of a word in the dataset such that it is added to the vocabulary.")
parser.add_argument('--text_gen_lastlayer', type=str, default='softmax',
help="Last layer of the text generator. Chose between none, softmax and sigmoid.")
parser.add_argument('--style_pa_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_lat_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_text_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--image_channels', type=int, default=1, help="number of classes on which the data set trained")
parser.add_argument('--img_size', type=int, default=128, help="size of the images on which the model is trained")
parser.add_argument('--DIM_img', type=int, default=128, help="number of classes on which the data set trained")
parser.add_argument('--DIM_text', type=int, default=128, help="number of classes on which the data set trained")
parser.add_argument('--likelihood_m1', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m2', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m3', type=str, default='categorical', help="output distribution")
parser.add_argument('--dataloader_workers', type=int, default=8, help="number of workers used for the Dataloader")
parser.add_argument('--use_toy_dataset', type=bool, default=False, help="if true uses small toy dataset")
# paths to save models
parser.add_argument('--encoder_save_m1', type=str, default='encoderM1', help="model save for encoder")
parser.add_argument('--encoder_save_m2', type=str, default='encoderM2', help="model save for encoder")
parser.add_argument('--encoder_save_m3', type=str, default='encoderM3', help="model save for decoder")
parser.add_argument('--decoder_save_m1', type=str, default='decoderM1', help="model save for decoder")
parser.add_argument('--decoder_save_m2', type=str, default='decoderM2', help="model save for decoder")
parser.add_argument('--decoder_save_m3', type=str, default='decoderM3', help="model save for decoder")
# classifiers
parser.add_argument('--text_clf_type', type=str, default='word',
help="text classifier type, implemented are 'word' and 'char'")
parser.add_argument('--img_clf_type', type=str, default='resnet',
help="image classifier type, implemented are 'resnet' and 'densenet'")
parser.add_argument('--clf_save_m1', type=str, default='clf_m1', help="model save for clf")
parser.add_argument('--clf_save_m2', type=str, default='clf_m2', help="model save for clf")
parser.add_argument('--clf_save_m3', type=str, default='clf_m3', help="model save for clf")
parser.add_argument('--clf_loss', type=str, default='binary_crossentropy',
choices=['binary_crossentropy', 'crossentropy', 'bce_with_logits'], help="model save for clf")
# Callbacks
parser.add_argument('--reduce_lr_on_plateau', type=bool, default=False,
help="boolean indicating if callback 'reduce lr on plateau' is used")
parser.add_argument('--max_early_stopping_index', type=int, default=5,
help="patience of the early stopper. If the target metric did not improve "
"for that amount of epochs, training is stopepd")
parser.add_argument('--start_early_stopping_epoch', type=int, default=0,
help="epoch on which to start the early stopping callback")
# LOSS TERM WEIGHTS
parser.add_argument('--beta_m1_style', type=float, default=1.0, help="default weight divergence term style modality 1")
parser.add_argument('--beta_m2_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--beta_m3_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--div_weight_m1_content', type=float, default=0.25,
help="default weight divergence term content modality 1")
parser.add_argument('--div_weight_m2_content', type=float, default=0.25,
help="default weight divergence term content modality 2")
parser.add_argument('--div_weight_m3_content', type=float, default=0.25,
help="default weight divergence term content modality 3")
parser.add_argument('--div_weight_uniform_content', type=float, default=0.25,
help="default weight divergence term prior")
parser.add_argument('--rec_weight_m1', default=0.33, type=float,
help="weight of the m1 modality for the log probs. Type should be either float or string.")
parser.add_argument('--rec_weight_m2', default=0.33, type=float,
help="weight of the m2 modality for the log probs. Type should be either float or string.")
parser.add_argument('--rec_weight_m3', default=0.33, type=float,
help="weight of the m3 modality for the log probs. Type should be either float or string.")
def update_flags_with_config(config_path: str, additional_args={}, testing=False):
"""
If testing is true, no cli arguments will be read.
"""
with open(config_path, 'rt') as json_file:
t_args = argparse.Namespace()
json_config = json.load(json_file)
t_args.__dict__.update({**json_config, **additional_args})
if testing:
return parser.parse_args([], namespace=t_args)
else:
return parser.parse_args(namespace=t_args)
def get_freer_gpu():
"""
Returns the index of the gpu with the most free memory.
Taken from https://discuss.pytorch.org/t/it-there-anyway-to-let-program-select-free-gpu-automatically/17560/6
"""
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return np.argmax(memory_available)
def setup_flags(flags, testing=False):
"""
If testing is true, no cli arguments will be read.
"""
import torch
from pathlib import Path
import numpy as np
if flags.config_path:
flags = update_flags_with_config(config_path=flags.config_path, testing=testing)
flags = expand_paths(flags)
use_cuda = torch.cuda.is_available()
flags.device = torch.device('cuda' if use_cuda else 'cpu')
if str(flags.device) == 'cuda':
torch.cuda.set_device(get_freer_gpu())
flags = flags_set_alpha_modalities(flags)
flags.log_file = log.manager.root.handlers[1].baseFilename
flags.len_sequence = 128 if flags.text_encoding == 'word' else 1024
if flags.load_flags:
old_flags = torch.load(Path(flags.load_flags).expanduser())
# create param dict from all the params of old_flags that are not paths
params = {k: v for k, v in old_flags.item() if ('dir' not in v) and ('path' not in v)}
flags.__dict__.update(params)
if not flags.seed:
# set a random seed
flags.seed = np.random.randint(0, 10000)
return flags
def flags_set_alpha_modalities(flags):
flags.alpha_modalities = [flags.div_weight_uniform_content, flags.div_weight_m1_content,
flags.div_weight_m2_content, flags.div_weight_m3_content]
return flags
| [
"pathlib.Path",
"numpy.argmax",
"mimic.utils.BaseFlags.parser.add_argument",
"argparse.ArgumentTypeError",
"json.load",
"numpy.random.randint",
"torch.cuda.is_available",
"argparse.Namespace",
"mimic.utils.BaseFlags.parser.parse_args",
"mimic.utils.filehandling.expand_paths",
"os.system",
"tor... | [((497, 612), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--exp_str_prefix"""'], {'type': 'str', 'default': '"""Mimic"""', 'help': '"""prefix of the experiment directory."""'}), "('--exp_str_prefix', type=str, default='Mimic', help=\n 'prefix of the experiment directory.')\n", (516, 612), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((608, 700), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--dataset"""'], {'type': 'str', 'default': '"""Mimic"""', 'help': '"""name of the dataset"""'}), "('--dataset', type=str, default='Mimic', help=\n 'name of the dataset')\n", (627, 700), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((696, 793), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--config_path"""'], {'type': 'str', 'default': 'None', 'help': '"""path to the json config"""'}), "('--config_path', type=str, default=None, help=\n 'path to the json config')\n", (715, 793), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((789, 878), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--verbose"""'], {'type': 'int', 'default': '(0)', 'help': '"""global verbosity level"""'}), "('--verbose', type=int, default=0, help=\n 'global verbosity level')\n", (808, 878), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((874, 1058), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--load_flags"""'], {'type': 'str', 'default': 'None', 'help': '"""overwrite all values with parameters from an old experiment. Give the path to the flags.rar file as input."""'}), "('--load_flags', type=str, default=None, help=\n 'overwrite all values with parameters from an old experiment. Give the path to the flags.rar file as input.'\n )\n", (893, 1058), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((1203, 1425), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--fixed_image_extractor"""'], {'type': 'str2bool', 'default': '(True)', 'help': '"""If the feature extraction layers of the pretrained densenet are frozen. Only works when img_clf_type classifier is densenet."""'}), "('--fixed_image_extractor', type=str2bool, default=True,\n help=\n 'If the feature extraction layers of the pretrained densenet are frozen. Only works when img_clf_type classifier is densenet.'\n )\n", (1222, 1425), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((1533, 1675), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--only_text_modality"""'], {'type': 'str2bool', 'default': '(False)', 'help': '"""flag to indicate if only the text modality is to be used"""'}), "('--only_text_modality', type=str2bool, default=False,\n help='flag to indicate if only the text modality is to be used')\n", (1552, 1675), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((1692, 1946), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--undersample_dataset"""'], {'type': 'str2bool', 'default': '(False)', 'help': '"""flag to indicate if the dataset should be undersampled such that there are the same number of datapoints that have no label than datapoints that have a label"""'}), "('--undersample_dataset', type=str2bool, default=False,\n help=\n 'flag to indicate if the dataset should be undersampled such that there are the same number of datapoints that have no label than datapoints that have a label'\n )\n", (1711, 1946), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((1981, 2121), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--weighted_sampler"""'], {'type': 'str2bool', 'default': '(False)', 'help': '"""If a weighted sampler should be used for the dataloader."""'}), "('--weighted_sampler', type=str2bool, default=False,\n help='If a weighted sampler should be used for the dataloader.')\n", (2000, 2121), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((2138, 2318), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--binary_labels"""'], {'type': 'str2bool', 'default': '(False)', 'help': '"""If True, label \'Finding\' with classes 0 and 1 will be used for the classification evaluation."""'}), '(\'--binary_labels\', type=str2bool, default=False, help=\n "If True, label \'Finding\' with classes 0 and 1 will be used for the classification evaluation."\n )\n', (2157, 2318), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((2347, 2475), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--text_encoding"""'], {'type': 'str', 'default': '"""char"""', 'help': '"""encoding of the text, either character or wordwise"""'}), "('--text_encoding', type=str, default='char', help=\n 'encoding of the text, either character or wordwise')\n", (2366, 2475), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((2491, 2584), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--len_sequence"""'], {'type': 'int', 'default': '(1024)', 'help': '"""length of sequence"""'}), "('--len_sequence', type=int, default=1024, help=\n 'length of sequence')\n", (2510, 2584), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((2580, 2736), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--word_min_occ"""'], {'type': 'int', 'default': '(3)', 'help': '"""min occurence of a word in the dataset such that it is added to the vocabulary."""'}), "('--word_min_occ', type=int, default=3, help=\n 'min occurence of a word in the dataset such that it is added to the vocabulary.'\n )\n", (2599, 2736), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((2747, 2916), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--text_gen_lastlayer"""'], {'type': 'str', 'default': '"""softmax"""', 'help': '"""Last layer of the text generator. Chose between none, softmax and sigmoid."""'}), "('--text_gen_lastlayer', type=str, default='softmax',\n help=\n 'Last layer of the text generator. Chose between none, softmax and sigmoid.'\n )\n", (2766, 2916), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((2924, 3036), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--style_pa_dim"""'], {'type': 'int', 'default': '(0)', 'help': '"""dimension of varying factor latent space"""'}), "('--style_pa_dim', type=int, default=0, help=\n 'dimension of varying factor latent space')\n", (2943, 3036), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3032, 3145), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--style_lat_dim"""'], {'type': 'int', 'default': '(0)', 'help': '"""dimension of varying factor latent space"""'}), "('--style_lat_dim', type=int, default=0, help=\n 'dimension of varying factor latent space')\n", (3051, 3145), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3141, 3255), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--style_text_dim"""'], {'type': 'int', 'default': '(0)', 'help': '"""dimension of varying factor latent space"""'}), "('--style_text_dim', type=int, default=0, help=\n 'dimension of varying factor latent space')\n", (3160, 3255), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3251, 3372), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--image_channels"""'], {'type': 'int', 'default': '(1)', 'help': '"""number of classes on which the data set trained"""'}), "('--image_channels', type=int, default=1, help=\n 'number of classes on which the data set trained')\n", (3270, 3372), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3368, 3486), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--img_size"""'], {'type': 'int', 'default': '(128)', 'help': '"""size of the images on which the model is trained"""'}), "('--img_size', type=int, default=128, help=\n 'size of the images on which the model is trained')\n", (3387, 3486), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3482, 3598), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--DIM_img"""'], {'type': 'int', 'default': '(128)', 'help': '"""number of classes on which the data set trained"""'}), "('--DIM_img', type=int, default=128, help=\n 'number of classes on which the data set trained')\n", (3501, 3598), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3594, 3711), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--DIM_text"""'], {'type': 'int', 'default': '(128)', 'help': '"""number of classes on which the data set trained"""'}), "('--DIM_text', type=int, default=128, help=\n 'number of classes on which the data set trained')\n", (3613, 3711), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3707, 3807), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--likelihood_m1"""'], {'type': 'str', 'default': '"""laplace"""', 'help': '"""output distribution"""'}), "('--likelihood_m1', type=str, default='laplace', help=\n 'output distribution')\n", (3726, 3807), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3803, 3903), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--likelihood_m2"""'], {'type': 'str', 'default': '"""laplace"""', 'help': '"""output distribution"""'}), "('--likelihood_m2', type=str, default='laplace', help=\n 'output distribution')\n", (3822, 3903), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3899, 4002), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--likelihood_m3"""'], {'type': 'str', 'default': '"""categorical"""', 'help': '"""output distribution"""'}), "('--likelihood_m3', type=str, default='categorical',\n help='output distribution')\n", (3918, 4002), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((3999, 4118), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--dataloader_workers"""'], {'type': 'int', 'default': '(8)', 'help': '"""number of workers used for the Dataloader"""'}), "('--dataloader_workers', type=int, default=8, help=\n 'number of workers used for the Dataloader')\n", (4018, 4118), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4114, 4224), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--use_toy_dataset"""'], {'type': 'bool', 'default': '(False)', 'help': '"""if true uses small toy dataset"""'}), "('--use_toy_dataset', type=bool, default=False, help=\n 'if true uses small toy dataset')\n", (4133, 4224), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4244, 4350), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--encoder_save_m1"""'], {'type': 'str', 'default': '"""encoderM1"""', 'help': '"""model save for encoder"""'}), "('--encoder_save_m1', type=str, default='encoderM1',\n help='model save for encoder')\n", (4263, 4350), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4347, 4453), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--encoder_save_m2"""'], {'type': 'str', 'default': '"""encoderM2"""', 'help': '"""model save for encoder"""'}), "('--encoder_save_m2', type=str, default='encoderM2',\n help='model save for encoder')\n", (4366, 4453), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4450, 4556), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--encoder_save_m3"""'], {'type': 'str', 'default': '"""encoderM3"""', 'help': '"""model save for decoder"""'}), "('--encoder_save_m3', type=str, default='encoderM3',\n help='model save for decoder')\n", (4469, 4556), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4553, 4659), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--decoder_save_m1"""'], {'type': 'str', 'default': '"""decoderM1"""', 'help': '"""model save for decoder"""'}), "('--decoder_save_m1', type=str, default='decoderM1',\n help='model save for decoder')\n", (4572, 4659), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4656, 4762), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--decoder_save_m2"""'], {'type': 'str', 'default': '"""decoderM2"""', 'help': '"""model save for decoder"""'}), "('--decoder_save_m2', type=str, default='decoderM2',\n help='model save for decoder')\n", (4675, 4762), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4759, 4865), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--decoder_save_m3"""'], {'type': 'str', 'default': '"""decoderM3"""', 'help': '"""model save for decoder"""'}), "('--decoder_save_m3', type=str, default='decoderM3',\n help='model save for decoder')\n", (4778, 4865), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((4877, 5010), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--text_clf_type"""'], {'type': 'str', 'default': '"""word"""', 'help': '"""text classifier type, implemented are \'word\' and \'char\'"""'}), '(\'--text_clf_type\', type=str, default=\'word\', help=\n "text classifier type, implemented are \'word\' and \'char\'")\n', (4896, 5010), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((5026, 5167), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--img_clf_type"""'], {'type': 'str', 'default': '"""resnet"""', 'help': '"""image classifier type, implemented are \'resnet\' and \'densenet\'"""'}), '(\'--img_clf_type\', type=str, default=\'resnet\', help=\n "image classifier type, implemented are \'resnet\' and \'densenet\'")\n', (5045, 5167), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((5183, 5279), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--clf_save_m1"""'], {'type': 'str', 'default': '"""clf_m1"""', 'help': '"""model save for clf"""'}), "('--clf_save_m1', type=str, default='clf_m1', help=\n 'model save for clf')\n", (5202, 5279), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((5275, 5371), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--clf_save_m2"""'], {'type': 'str', 'default': '"""clf_m2"""', 'help': '"""model save for clf"""'}), "('--clf_save_m2', type=str, default='clf_m2', help=\n 'model save for clf')\n", (5294, 5371), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((5367, 5463), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--clf_save_m3"""'], {'type': 'str', 'default': '"""clf_m3"""', 'help': '"""model save for clf"""'}), "('--clf_save_m3', type=str, default='clf_m3', help=\n 'model save for clf')\n", (5386, 5463), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((5459, 5636), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--clf_loss"""'], {'type': 'str', 'default': '"""binary_crossentropy"""', 'choices': "['binary_crossentropy', 'crossentropy', 'bce_with_logits']", 'help': '"""model save for clf"""'}), "('--clf_loss', type=str, default='binary_crossentropy',\n choices=['binary_crossentropy', 'crossentropy', 'bce_with_logits'],\n help='model save for clf')\n", (5478, 5636), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((5662, 5807), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--reduce_lr_on_plateau"""'], {'type': 'bool', 'default': '(False)', 'help': '"""boolean indicating if callback \'reduce lr on plateau\' is used"""'}), '(\'--reduce_lr_on_plateau\', type=bool, default=False,\n help="boolean indicating if callback \'reduce lr on plateau\' is used")\n', (5681, 5807), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((5824, 6032), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--max_early_stopping_index"""'], {'type': 'int', 'default': '(5)', 'help': '"""patience of the early stopper. If the target metric did not improve for that amount of epochs, training is stopepd"""'}), "('--max_early_stopping_index', type=int, default=5, help\n =\n 'patience of the early stopper. If the target metric did not improve for that amount of epochs, training is stopepd'\n )\n", (5843, 6032), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((6066, 6202), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--start_early_stopping_epoch"""'], {'type': 'int', 'default': '(0)', 'help': '"""epoch on which to start the early stopping callback"""'}), "('--start_early_stopping_epoch', type=int, default=0,\n help='epoch on which to start the early stopping callback')\n", (6085, 6202), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((6240, 6364), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--beta_m1_style"""'], {'type': 'float', 'default': '(1.0)', 'help': '"""default weight divergence term style modality 1"""'}), "('--beta_m1_style', type=float, default=1.0, help=\n 'default weight divergence term style modality 1')\n", (6259, 6364), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((6360, 6484), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--beta_m2_style"""'], {'type': 'float', 'default': '(1.0)', 'help': '"""default weight divergence term style modality 2"""'}), "('--beta_m2_style', type=float, default=1.0, help=\n 'default weight divergence term style modality 2')\n", (6379, 6484), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((6480, 6604), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--beta_m3_style"""'], {'type': 'float', 'default': '(1.0)', 'help': '"""default weight divergence term style modality 2"""'}), "('--beta_m3_style', type=float, default=1.0, help=\n 'default weight divergence term style modality 2')\n", (6499, 6604), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((6600, 6734), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--div_weight_m1_content"""'], {'type': 'float', 'default': '(0.25)', 'help': '"""default weight divergence term content modality 1"""'}), "('--div_weight_m1_content', type=float, default=0.25,\n help='default weight divergence term content modality 1')\n", (6619, 6734), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((6751, 6885), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--div_weight_m2_content"""'], {'type': 'float', 'default': '(0.25)', 'help': '"""default weight divergence term content modality 2"""'}), "('--div_weight_m2_content', type=float, default=0.25,\n help='default weight divergence term content modality 2')\n", (6770, 6885), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((6902, 7036), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--div_weight_m3_content"""'], {'type': 'float', 'default': '(0.25)', 'help': '"""default weight divergence term content modality 3"""'}), "('--div_weight_m3_content', type=float, default=0.25,\n help='default weight divergence term content modality 3')\n", (6921, 7036), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((7053, 7180), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--div_weight_uniform_content"""'], {'type': 'float', 'default': '(0.25)', 'help': '"""default weight divergence term prior"""'}), "('--div_weight_uniform_content', type=float, default=\n 0.25, help='default weight divergence term prior')\n", (7072, 7180), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((7196, 7362), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--rec_weight_m1"""'], {'default': '(0.33)', 'type': 'float', 'help': '"""weight of the m1 modality for the log probs. Type should be either float or string."""'}), "('--rec_weight_m1', default=0.33, type=float, help=\n 'weight of the m1 modality for the log probs. Type should be either float or string.'\n )\n", (7215, 7362), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((7373, 7539), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--rec_weight_m2"""'], {'default': '(0.33)', 'type': 'float', 'help': '"""weight of the m2 modality for the log probs. Type should be either float or string."""'}), "('--rec_weight_m2', default=0.33, type=float, help=\n 'weight of the m2 modality for the log probs. Type should be either float or string.'\n )\n", (7392, 7539), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((7550, 7716), 'mimic.utils.BaseFlags.parser.add_argument', 'parser.add_argument', (['"""--rec_weight_m3"""'], {'default': '(0.33)', 'type': 'float', 'help': '"""weight of the m3 modality for the log probs. Type should be either float or string."""'}), "('--rec_weight_m3', default=0.33, type=float, help=\n 'weight of the m3 modality for the log probs. Type should be either float or string.'\n )\n", (7569, 7716), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((8423, 8488), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (8432, 8488), False, 'import os\n'), ((8581, 8608), 'numpy.argmax', 'np.argmax', (['memory_available'], {}), '(memory_available)\n', (8590, 8608), True, 'import numpy as np\n'), ((8917, 8936), 'mimic.utils.filehandling.expand_paths', 'expand_paths', (['flags'], {}), '(flags)\n', (8929, 8936), False, 'from mimic.utils.filehandling import expand_paths\n'), ((8952, 8977), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8975, 8977), False, 'import torch\n'), ((8997, 9040), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (9009, 9040), False, 'import torch\n'), ((7947, 7967), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '()\n', (7965, 7967), False, 'import argparse\n'), ((7990, 8010), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (7999, 8010), False, 'import json\n'), ((8105, 8144), 'mimic.utils.BaseFlags.parser.parse_args', 'parser.parse_args', (['[]'], {'namespace': 't_args'}), '([], namespace=t_args)\n', (8122, 8144), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((8170, 8205), 'mimic.utils.BaseFlags.parser.parse_args', 'parser.parse_args', ([], {'namespace': 't_args'}), '(namespace=t_args)\n', (8187, 8205), True, 'from mimic.utils.BaseFlags import parser as parser\n'), ((9685, 9712), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (9702, 9712), True, 'import numpy as np\n'), ((441, 494), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (467, 494), False, 'import argparse\n'), ((9362, 9384), 'pathlib.Path', 'Path', (['flags.load_flags'], {}), '(flags.load_flags)\n', (9366, 9384), False, 'from pathlib import Path\n')] |
import os,sys
import pandas as pd
import numpy as np
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
# La corrección para llevar los indicadores a 100 000 habitantes.
_n=115.131
#datos casos diarios
df_muertes=pd.read_csv('covid19-bolivia-udape/decesos_diarios.csv',sep=',',header=0,index_col=0).fillna(0)
muertes = df_muertes.iloc[:,:].values.T
y=df_muertes.index.values #con el indice dado por las fechas del reporte
mdf_muertes = df_muertes.rolling(7,min_periods=1).mean()
mean_muertes = mdf_muertes.iloc[:,:].values.T
nac_muertes = muertes[0]+muertes[1]+muertes[2]+muertes[3]+muertes[4]+muertes[5]+muertes[6]+muertes[7]+muertes[8]
mean_nac_muertes = mean_muertes[0]+mean_muertes[1]+mean_muertes[2]+mean_muertes[3]+mean_muertes[4]+mean_muertes[5]+mean_muertes[6]+mean_muertes[7]+mean_muertes[8]
import matplotlib.pyplot as plt
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import matplotlib.image as mpimg
from matplotlib import font_manager as fm, rcParams
fpath = os.path.join(r'MonoLisaSimpson-Regular.ttf')
prop = fm.FontProperties(fname=fpath)
fname = os.path.split(fpath)[1]
# These are the "Tableau 20" colors as RGB.
tableau20 = [(48,48,48), (240,240,240), (59,170,6), (61,167,249),
(230,0,0)]
#1[0] fondo plomo
#2[1] blanco de titulos
#3[2] rojo neon puntos
#4[3] verdes
#5[4] ROJO
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
bol = mpimg.imread('bol.jpg')
imagebox = OffsetImage(bol,zoom=1)
firma = AnnotationBbox(imagebox,(len(y)/2,1))
fig = plt.figure(figsize=(50,25))
#Color del fondo
fig.patch.set_facecolor(tableau20[0])
plt.axes().patch.set_facecolor(tableau20[0])
plt.subplots_adjust(top=0.80)
plt.title('\nFallecimientos/día por 100\'000 Hab a nivel Nacional'+'\n(último reporte en fuente: '+y[-1]+')\n',fontsize=70,fontproperties=prop,color=tableau20[1])
plt.plot(y,nac_muertes/_n,label='Nuevos Casos/día',linewidth=3,color=tableau20[3],linestyle='-',marker='.',markersize=5,markeredgecolor='yellow' ,markerfacecolor='y')
plt.plot(y,mean_nac_muertes/_n,label='Promedio 7 días',linewidth=8,color=tableau20[4],linestyle='-')
plt.legend(loc='upper left',fontsize=50)
plt.yticks(fontsize=50,fontproperties=prop,color=tableau20[1])
plt.xticks(y[::30],fontsize=35,rotation=45,fontproperties=prop,color=tableau20[1])
plt.ylabel('Casos/día',fontsize=60,fontproperties=prop,color=tableau20[1])
plt.gca().yaxis.grid(linestyle='--',linewidth=1,dashes=(5,15))
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["bottom"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["left"].set_visible(False)
plt.gca().get_xaxis().tick_bottom()
plt.gca().get_yaxis().tick_left()
plt.gca().add_artist(firma)
plt.subplots_adjust(bottom=0.2)
plt.text(0,-1*np.max(nac_muertes/_n)/2.8,"Data source: https://github.com/mauforonda/covid19-bolivia"
"\nAutor: Telegram Bot: @Bolivian_Bot"
"\nNota: Curva de fallecimientos/día ajustada a 100 000 hab",fontsize=35,fontproperties=prop,color=tableau20[1]);
plt.savefig('imagenes/muertesNac.png')
| [
"matplotlib.offsetbox.OffsetImage",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"matplotlib.image.imread",
"matplotlib.pyplot.plot",
"os.path.split",
"numpy.max",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"numpy.warnings.fi... | [((53, 128), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {'category': 'np.VisibleDeprecationWarning'}), "('ignore', category=np.VisibleDeprecationWarning)\n", (79, 128), True, 'import numpy as np\n'), ((1055, 1098), 'os.path.join', 'os.path.join', (['"""MonoLisaSimpson-Regular.ttf"""'], {}), "('MonoLisaSimpson-Regular.ttf')\n", (1067, 1098), False, 'import os, sys\n'), ((1107, 1137), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'fname': 'fpath'}), '(fname=fpath)\n', (1124, 1137), True, 'from matplotlib import font_manager as fm, rcParams\n'), ((1712, 1735), 'matplotlib.image.imread', 'mpimg.imread', (['"""bol.jpg"""'], {}), "('bol.jpg')\n", (1724, 1735), True, 'import matplotlib.image as mpimg\n'), ((1747, 1771), 'matplotlib.offsetbox.OffsetImage', 'OffsetImage', (['bol'], {'zoom': '(1)'}), '(bol, zoom=1)\n', (1758, 1771), False, 'from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox\n'), ((1826, 1854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(50, 25)'}), '(figsize=(50, 25))\n', (1836, 1854), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1984), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.8)'}), '(top=0.8)\n', (1975, 1984), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2170), 'matplotlib.pyplot.title', 'plt.title', (['("""\nFallecimientos/día por 100\'000 Hab a nivel Nacional""" +\n """\n(último reporte en fuente: """ + y[-1] + \')\\n\')'], {'fontsize': '(70)', 'fontproperties': 'prop', 'color': 'tableau20[1]'}), '("""\nFallecimientos/día por 100\'000 Hab a nivel Nacional""" +\n """\n(último reporte en fuente: """ + y[-1] + \')\\n\', fontsize=70,\n fontproperties=prop, color=tableau20[1])\n', (1995, 2170), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2335), 'matplotlib.pyplot.plot', 'plt.plot', (['y', '(nac_muertes / _n)'], {'label': '"""Nuevos Casos/día"""', 'linewidth': '(3)', 'color': 'tableau20[3]', 'linestyle': '"""-"""', 'marker': '"""."""', 'markersize': '(5)', 'markeredgecolor': '"""yellow"""', 'markerfacecolor': '"""y"""'}), "(y, nac_muertes / _n, label='Nuevos Casos/día', linewidth=3, color=\n tableau20[3], linestyle='-', marker='.', markersize=5, markeredgecolor=\n 'yellow', markerfacecolor='y')\n", (2157, 2335), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2427), 'matplotlib.pyplot.plot', 'plt.plot', (['y', '(mean_nac_muertes / _n)'], {'label': '"""Promedio 7 días"""', 'linewidth': '(8)', 'color': 'tableau20[4]', 'linestyle': '"""-"""'}), "(y, mean_nac_muertes / _n, label='Promedio 7 días', linewidth=8,\n color=tableau20[4], linestyle='-')\n", (2324, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2417, 2458), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(50)'}), "(loc='upper left', fontsize=50)\n", (2427, 2458), True, 'import matplotlib.pyplot as plt\n'), ((2458, 2522), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(50)', 'fontproperties': 'prop', 'color': 'tableau20[1]'}), '(fontsize=50, fontproperties=prop, color=tableau20[1])\n', (2468, 2522), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2612), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y[::30]'], {'fontsize': '(35)', 'rotation': '(45)', 'fontproperties': 'prop', 'color': 'tableau20[1]'}), '(y[::30], fontsize=35, rotation=45, fontproperties=prop, color=\n tableau20[1])\n', (2531, 2612), True, 'import matplotlib.pyplot as plt\n'), ((2604, 2681), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Casos/día"""'], {'fontsize': '(60)', 'fontproperties': 'prop', 'color': 'tableau20[1]'}), "('Casos/día', fontsize=60, fontproperties=prop, color=tableau20[1])\n", (2614, 2681), True, 'import matplotlib.pyplot as plt\n'), ((3036, 3067), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (3055, 3067), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3384), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""imagenes/muertesNac.png"""'], {}), "('imagenes/muertesNac.png')\n", (3357, 3384), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1166), 'os.path.split', 'os.path.split', (['fpath'], {}), '(fpath)\n', (1159, 1166), False, 'import os, sys\n'), ((248, 340), 'pandas.read_csv', 'pd.read_csv', (['"""covid19-bolivia-udape/decesos_diarios.csv"""'], {'sep': '""","""', 'header': '(0)', 'index_col': '(0)'}), "('covid19-bolivia-udape/decesos_diarios.csv', sep=',', header=0,\n index_col=0)\n", (259, 340), True, 'import pandas as pd\n'), ((3008, 3017), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3015, 3017), True, 'import matplotlib.pyplot as plt\n'), ((1910, 1920), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1918, 1920), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2688), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2686, 2688), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3106), 'numpy.max', 'np.max', (['(nac_muertes / _n)'], {}), '(nac_muertes / _n)\n', (3088, 3106), True, 'import numpy as np\n'), ((2742, 2751), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2749, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2798), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2796, 2798), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2848), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2846, 2848), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2897), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2895, 2897), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2943), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2941, 2943), True, 'import matplotlib.pyplot as plt\n'), ((2974, 2983), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2981, 2983), True, 'import matplotlib.pyplot as plt\n')] |
import os
import itertools
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils as tutils
import torchvision.transforms as transforms
import numpy as np
from tqdm import tqdm
from fsgan.utils.obj_factory import obj_factory
from fsgan.utils.tensorboard_logger import TensorBoardLogger
from fsgan.utils import utils, img_utils
from fsgan.utils.seg_utils import blend_seg_pred, blend_seg_label
from fsgan.utils.iou_metric import IOUMetric
from fsgan.datasets import img_landmarks_transforms
class IOUBenchmark(IOUMetric):
def __init__(self, num_classes, normalized=False, ignore_index=None):
super(IOUBenchmark, self).__init__(num_classes, normalized, ignore_index)
def to(self, device):
return self
def __call__(self, pred, target):
self.add(pred, target)
_, miou = self.value()
return {'iou': miou}
def main(
# General arguments
exp_dir, resume_dir=None, start_epoch=None, epochs=(90,), iterations=None, resolutions=(128, 256),
learning_rate=(1e-1,), gpus=None, workers=4, batch_size=(64,), seed=None, log_freq=20,
# Data arguments
train_dataset='fsgan.image_seg_dataset.ImageSegDataset', val_dataset=None, numpy_transforms=None,
tensor_transforms=('img_landmarks_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),
# Training arguments
optimizer='optim.SGD(momentum=0.9,weight_decay=1e-4)', scheduler='lr_scheduler.StepLR(step_size=30,gamma=0.1)',
criterion='nn.CrossEntropyLoss', model='fsgan.models.simple_unet.UNet(n_classes=3,feature_scale=1)',
pretrained=False, benchmark='fsgan.train_segmentation.IOUBenchmark(3)'
):
def proces_epoch(dataset_loader, train=True):
stage = 'TRAINING' if train else 'VALIDATION'
total_iter = len(dataset_loader) * dataset_loader.batch_size * epoch
pbar = tqdm(dataset_loader, unit='batches')
# Set networks training mode
model.train(train)
# Reset logger
logger.reset(prefix='{} {}X{}: Epoch: {} / {}; LR: {:.0e}; '.format(
stage, res, res, epoch + 1, res_epochs, scheduler.get_lr()[0]))
# For each batch in the training data
for i, (input, target) in enumerate(pbar):
# Prepare input
input = input.to(device)
target = target.to(device)
with torch.no_grad():
target = target.argmax(dim=1)
# Execute model
pred = model(input)
# Calculate loss
loss_total = criterion(pred, target)
# Run benchmark
benchmark_res = benchmark(pred, target) if benchmark is not None else {}
if train:
# Update generator weights
optimizer.zero_grad()
loss_total.backward()
optimizer.step()
logger.update('losses', total=loss_total)
logger.update('bench', **benchmark_res)
total_iter += dataset_loader.batch_size
# Batch logs
pbar.set_description(str(logger))
if train and i % log_freq == 0:
logger.log_scalars_val('%dx%d/batch' % (res, res), total_iter)
# Epoch logs
logger.log_scalars_avg('%dx%d/epoch/%s' % (res, res, 'train' if train else 'val'), epoch)
if not train:
# Log images
seg_pred = blend_seg_pred(input, pred)
seg_gt = blend_seg_label(input, target)
grid = img_utils.make_grid(input, seg_pred, seg_gt)
logger.log_image('%dx%d/vis' % (res, res), grid, epoch)
return logger.log_dict['losses']['total'].avg
#################
# Main pipeline #
#################
# Validation
resolutions = resolutions if isinstance(resolutions, (list, tuple)) else [resolutions]
learning_rate = learning_rate if isinstance(learning_rate, (list, tuple)) else [learning_rate]
epochs = epochs if isinstance(epochs, (list, tuple)) else [epochs]
batch_size = batch_size if isinstance(batch_size, (list, tuple)) else [batch_size]
iterations = iterations if iterations is None or isinstance(iterations, (list, tuple)) else [iterations]
learning_rate = learning_rate * len(resolutions) if len(learning_rate) == 1 else learning_rate
epochs = epochs * len(resolutions) if len(epochs) == 1 else epochs
batch_size = batch_size * len(resolutions) if len(batch_size) == 1 else batch_size
if iterations is not None:
iterations = iterations * len(resolutions) if len(iterations) == 1 else iterations
iterations = utils.str2int(iterations)
if not os.path.isdir(exp_dir):
raise RuntimeError('Experiment directory was not found: \'' + exp_dir + '\'')
assert len(learning_rate) == len(resolutions)
assert len(epochs) == len(resolutions)
assert len(batch_size) == len(resolutions)
assert iterations is None or len(iterations) == len(resolutions)
# Seed
utils.set_seed(seed)
# Check CUDA device availability
device, gpus = utils.set_device(gpus)
# Initialize loggers
logger = TensorBoardLogger(log_dir=exp_dir)
# Initialize datasets
numpy_transforms = obj_factory(numpy_transforms) if numpy_transforms is not None else []
tensor_transforms = obj_factory(tensor_transforms) if tensor_transforms is not None else []
img_transforms = img_landmarks_transforms.Compose(numpy_transforms + tensor_transforms)
train_dataset = obj_factory(train_dataset, transform=img_transforms)
if val_dataset is not None:
val_dataset = obj_factory(val_dataset, transform=img_transforms)
# Create networks
arch = utils.get_arch(model, num_classes=len(train_dataset.classes))
model = obj_factory(model, num_classes=len(train_dataset.classes)).to(device)
# Resume from a checkpoint or initialize the networks weights randomly
checkpoint_dir = exp_dir if resume_dir is None else resume_dir
model_path = os.path.join(checkpoint_dir, 'model_latest.pth')
best_loss = 1e6
curr_res = resolutions[0]
optimizer_state = None
if os.path.isfile(model_path):
print("=> loading checkpoint from '{}'".format(checkpoint_dir))
# model
checkpoint = torch.load(model_path)
if 'resolution' in checkpoint:
curr_res = checkpoint['resolution']
start_epoch = checkpoint['epoch'] if start_epoch is None else start_epoch
# else:
# curr_res = resolutions[1] if len(resolutions) > 1 else resolutions[0]
best_loss_key = 'best_loss_%d' % curr_res
best_loss = checkpoint[best_loss_key] if best_loss_key in checkpoint else best_loss
model.apply(utils.init_weights)
model.load_state_dict(checkpoint['state_dict'], strict=False)
optimizer_state = checkpoint['optimizer']
else:
print("=> no checkpoint found at '{}'".format(checkpoint_dir))
if not pretrained:
print("=> randomly initializing networks...")
model.apply(utils.init_weights)
# Lossess
criterion = obj_factory(criterion).to(device)
# Benchmark
benchmark = obj_factory(benchmark).to(device)
# Support multiple GPUs
if gpus and len(gpus) > 1:
model = nn.DataParallel(model, gpus)
# For each resolution
start_res_ind = int(np.log2(curr_res)) - int(np.log2(resolutions[0]))
start_epoch = 0 if start_epoch is None else start_epoch
for ri in range(start_res_ind, len(resolutions)):
res = resolutions[ri]
res_lr = learning_rate[ri]
res_epochs = epochs[ri]
res_iterations = iterations[ri] if iterations is not None else None
res_batch_size = batch_size[ri]
# Optimizer and scheduler
optimizer = obj_factory(optimizer, model.parameters(), lr=res_lr)
scheduler = obj_factory(scheduler, optimizer)
if optimizer_state is not None:
optimizer.load_state_dict(optimizer_state)
# Initialize data loaders
if res_iterations is None:
train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, len(train_dataset))
else:
train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, res_iterations)
train_loader = tutils.data.DataLoader(train_dataset, batch_size=res_batch_size, sampler=train_sampler,
num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)
if val_dataset is not None:
if res_iterations is None:
val_sampler = tutils.data.sampler.WeightedRandomSampler(val_dataset.weights, len(val_dataset))
else:
val_iterations = (res_iterations * len(val_dataset)) // len(train_dataset)
val_sampler = tutils.data.sampler.WeightedRandomSampler(val_dataset.weights, val_iterations)
val_loader = tutils.data.DataLoader(val_dataset, batch_size=res_batch_size, sampler=val_sampler,
num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)
else:
val_loader = None
# For each epoch
for epoch in range(start_epoch, res_epochs):
total_loss = proces_epoch(train_loader, train=True)
if val_loader is not None:
with torch.no_grad():
total_loss = proces_epoch(val_loader, train=False)
if hasattr(benchmark, 'reset'):
benchmark.reset()
# Schedulers step (in PyTorch 1.1.0+ it must follow after the epoch training and validation steps)
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(total_loss)
else:
scheduler.step()
# Save models checkpoints
is_best = total_loss < best_loss
best_loss = min(best_loss, total_loss)
utils.save_checkpoint(exp_dir, 'model', {
'resolution': res,
'epoch': epoch + 1,
'state_dict': model.module.state_dict() if gpus and len(gpus) > 1 else model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_loss_%d' % res: best_loss,
'arch': arch,
}, is_best)
# Reset start epoch to 0 because it's should only effect the first training resolution
start_epoch = 0
best_loss = 1e6
if __name__ == "__main__":
# Parse program arguments
import argparse
parser = argparse.ArgumentParser('train_segmentation_ces')
general = parser.add_argument_group('general')
general.add_argument('exp_dir', metavar='DIR',
help='path to experiment directory')
general.add_argument('-rd', '--resume_dir', metavar='DIR',
help='path to resume directory (default: None)')
general.add_argument('-se', '--start-epoch', metavar='N',
help='manual epoch number (useful on restarts)')
general.add_argument('-e', '--epochs', default=90, type=int, nargs='+', metavar='N',
help='number of total epochs to run')
general.add_argument('-i', '--iterations', nargs='+', metavar='N',
help='number of iterations per resolution to run')
general.add_argument('-r', '--resolutions', default=(128, 256), type=int, nargs='+', metavar='N',
help='the training resolutions list (must be power of 2)')
general.add_argument('-lr', '--learning-rate', default=(1e-1,), type=float, nargs='+', metavar='F',
help='initial learning rate per resolution')
general.add_argument('--gpus', nargs='+', type=int, metavar='N',
help='list of gpu ids to use (default: all)')
general.add_argument('-w', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
general.add_argument('-b', '--batch-size', default=(64,), type=int, nargs='+', metavar='N',
help='mini-batch size (default: 64)')
general.add_argument('--seed', type=int, metavar='N',
help='random seed')
general.add_argument('-lf', '--log_freq', default=20, type=int, metavar='N',
help='number of steps between each loss plot')
data = parser.add_argument_group('data')
data.add_argument('-td', '--train_dataset', default='fsgan.image_seg_dataset.ImageSegDataset',
help='train dataset object')
data.add_argument('-vd', '--val_dataset',
help='val dataset object')
data.add_argument('-nt', '--numpy_transforms', nargs='+',
help='Numpy transforms')
data.add_argument('-tt', '--tensor_transforms', nargs='+', help='tensor transforms',
default=('img_landmarks_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'))
training = parser.add_argument_group('training')
training.add_argument('-o', '--optimizer', default='optim.SGD(momentum=0.9,weight_decay=1e-4)',
help='network\'s optimizer object')
training.add_argument('-s', '--scheduler', default='lr_scheduler.StepLR(step_size=30,gamma=0.1)',
help='scheduler object')
training.add_argument('-c', '--criterion', default='nn.CrossEntropyLoss',
help='criterion object')
training.add_argument('-m', '--model', default='fsgan.models.simple_unet.UNet(n_classes=3,feature_scale=1)',
help='model object')
training.add_argument('-p', '--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
training.add_argument('-be', '--benchmark', default='fsgan.train_segmentation.IOUBenchmark(3)',
help='benchmark object')
main(**vars(parser.parse_args()))
| [
"fsgan.utils.seg_utils.blend_seg_label",
"fsgan.utils.seg_utils.blend_seg_pred",
"fsgan.utils.utils.str2int",
"argparse.ArgumentParser",
"fsgan.utils.utils.set_device",
"fsgan.datasets.img_landmarks_transforms.Compose",
"os.path.isdir",
"fsgan.utils.img_utils.make_grid",
"torch.utils.data.sampler.We... | [((5021, 5041), 'fsgan.utils.utils.set_seed', 'utils.set_seed', (['seed'], {}), '(seed)\n', (5035, 5041), False, 'from fsgan.utils import utils, img_utils\n'), ((5099, 5121), 'fsgan.utils.utils.set_device', 'utils.set_device', (['gpus'], {}), '(gpus)\n', (5115, 5121), False, 'from fsgan.utils import utils, img_utils\n'), ((5161, 5195), 'fsgan.utils.tensorboard_logger.TensorBoardLogger', 'TensorBoardLogger', ([], {'log_dir': 'exp_dir'}), '(log_dir=exp_dir)\n', (5178, 5195), False, 'from fsgan.utils.tensorboard_logger import TensorBoardLogger\n'), ((5433, 5503), 'fsgan.datasets.img_landmarks_transforms.Compose', 'img_landmarks_transforms.Compose', (['(numpy_transforms + tensor_transforms)'], {}), '(numpy_transforms + tensor_transforms)\n', (5465, 5503), False, 'from fsgan.datasets import img_landmarks_transforms\n'), ((5525, 5577), 'fsgan.utils.obj_factory.obj_factory', 'obj_factory', (['train_dataset'], {'transform': 'img_transforms'}), '(train_dataset, transform=img_transforms)\n', (5536, 5577), False, 'from fsgan.utils.obj_factory import obj_factory\n'), ((6021, 6069), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model_latest.pth"""'], {}), "(checkpoint_dir, 'model_latest.pth')\n", (6033, 6069), False, 'import os\n'), ((6154, 6180), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (6168, 6180), False, 'import os\n'), ((10644, 10693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""train_segmentation_ces"""'], {}), "('train_segmentation_ces')\n", (10667, 10693), False, 'import argparse\n'), ((1904, 1940), 'tqdm.tqdm', 'tqdm', (['dataset_loader'], {'unit': '"""batches"""'}), "(dataset_loader, unit='batches')\n", (1908, 1940), False, 'from tqdm import tqdm\n'), ((4648, 4673), 'fsgan.utils.utils.str2int', 'utils.str2int', (['iterations'], {}), '(iterations)\n', (4661, 4673), False, 'from fsgan.utils import utils, img_utils\n'), ((4686, 4708), 'os.path.isdir', 'os.path.isdir', (['exp_dir'], {}), '(exp_dir)\n', (4699, 4708), False, 'import os\n'), ((5246, 5275), 'fsgan.utils.obj_factory.obj_factory', 'obj_factory', (['numpy_transforms'], {}), '(numpy_transforms)\n', (5257, 5275), False, 'from fsgan.utils.obj_factory import obj_factory\n'), ((5340, 5370), 'fsgan.utils.obj_factory.obj_factory', 'obj_factory', (['tensor_transforms'], {}), '(tensor_transforms)\n', (5351, 5370), False, 'from fsgan.utils.obj_factory import obj_factory\n'), ((5632, 5682), 'fsgan.utils.obj_factory.obj_factory', 'obj_factory', (['val_dataset'], {'transform': 'img_transforms'}), '(val_dataset, transform=img_transforms)\n', (5643, 5682), False, 'from fsgan.utils.obj_factory import obj_factory\n'), ((6291, 6313), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (6301, 6313), False, 'import torch\n'), ((7307, 7335), 'torch.nn.DataParallel', 'nn.DataParallel', (['model', 'gpus'], {}), '(model, gpus)\n', (7322, 7335), True, 'import torch.nn as nn\n'), ((7893, 7926), 'fsgan.utils.obj_factory.obj_factory', 'obj_factory', (['scheduler', 'optimizer'], {}), '(scheduler, optimizer)\n', (7904, 7926), False, 'from fsgan.utils.obj_factory import obj_factory\n'), ((8351, 8516), 'torch.utils.data.DataLoader', 'tutils.data.DataLoader', (['train_dataset'], {'batch_size': 'res_batch_size', 'sampler': 'train_sampler', 'num_workers': 'workers', 'pin_memory': '(True)', 'drop_last': '(True)', 'shuffle': '(False)'}), '(train_dataset, batch_size=res_batch_size, sampler=\n train_sampler, num_workers=workers, pin_memory=True, drop_last=True,\n shuffle=False)\n', (8373, 8516), True, 'import torch.utils as tutils\n'), ((3438, 3465), 'fsgan.utils.seg_utils.blend_seg_pred', 'blend_seg_pred', (['input', 'pred'], {}), '(input, pred)\n', (3452, 3465), False, 'from fsgan.utils.seg_utils import blend_seg_pred, blend_seg_label\n'), ((3487, 3517), 'fsgan.utils.seg_utils.blend_seg_label', 'blend_seg_label', (['input', 'target'], {}), '(input, target)\n', (3502, 3517), False, 'from fsgan.utils.seg_utils import blend_seg_pred, blend_seg_label\n'), ((3537, 3581), 'fsgan.utils.img_utils.make_grid', 'img_utils.make_grid', (['input', 'seg_pred', 'seg_gt'], {}), '(input, seg_pred, seg_gt)\n', (3556, 3581), False, 'from fsgan.utils import utils, img_utils\n'), ((7130, 7152), 'fsgan.utils.obj_factory.obj_factory', 'obj_factory', (['criterion'], {}), '(criterion)\n', (7141, 7152), False, 'from fsgan.utils.obj_factory import obj_factory\n'), ((7197, 7219), 'fsgan.utils.obj_factory.obj_factory', 'obj_factory', (['benchmark'], {}), '(benchmark)\n', (7208, 7219), False, 'from fsgan.utils.obj_factory import obj_factory\n'), ((7387, 7404), 'numpy.log2', 'np.log2', (['curr_res'], {}), '(curr_res)\n', (7394, 7404), True, 'import numpy as np\n'), ((7412, 7435), 'numpy.log2', 'np.log2', (['resolutions[0]'], {}), '(resolutions[0])\n', (7419, 7435), True, 'import numpy as np\n'), ((8247, 8332), 'torch.utils.data.sampler.WeightedRandomSampler', 'tutils.data.sampler.WeightedRandomSampler', (['train_dataset.weights', 'res_iterations'], {}), '(train_dataset.weights, res_iterations\n )\n', (8288, 8332), True, 'import torch.utils as tutils\n'), ((8983, 9144), 'torch.utils.data.DataLoader', 'tutils.data.DataLoader', (['val_dataset'], {'batch_size': 'res_batch_size', 'sampler': 'val_sampler', 'num_workers': 'workers', 'pin_memory': '(True)', 'drop_last': '(True)', 'shuffle': '(False)'}), '(val_dataset, batch_size=res_batch_size, sampler=\n val_sampler, num_workers=workers, pin_memory=True, drop_last=True,\n shuffle=False)\n', (9005, 9144), True, 'import torch.utils as tutils\n'), ((2402, 2417), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2415, 2417), False, 'import torch\n'), ((8879, 8957), 'torch.utils.data.sampler.WeightedRandomSampler', 'tutils.data.sampler.WeightedRandomSampler', (['val_dataset.weights', 'val_iterations'], {}), '(val_dataset.weights, val_iterations)\n', (8920, 8957), True, 'import torch.utils as tutils\n'), ((9431, 9446), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9444, 9446), False, 'import torch\n')] |
""" Class ScenarioCategory
Creation date: 2018 10 30
Author(s): <NAME>
To do:
Add "comprises" method based on the "fall_into" method that is defined for Scenario.
Modifications:
2018 11 05: Make code PEP8 compliant.
2018 11 07: Change use of models.
2018 11 22: Enable instantiation using JSON code.
2018 11 29: Add functionality to return the derived Tags.
2018 12 06: Make it possible to return full JSON code (incl. attributes' JSON code).
2019 05 22: Make use of type_checking.py to shorten the initialization.
2019 10 11: Update of terminology.
2019 11 04: Add options to automatically assign unique ids to actor/activities.
2020 07 30: Update conversion of scenario category to a string.
2020 07 31: Add the includes method.
2020 08 15: Remove static_environment and add static_physical_things.
2020 08 16: Add dynamic_physical_thing_categories.
2020 08 23: Remove the update_uid options because uid is automatically generated by ScenarioElement.
2020 08 24: Enable instantiation of ScenarioCategory from json without needing full json code.
2020 08 25: Add comprises() function.
2020 10 04: Change way of creating object from JSON code.
2020 10 12: Remove Dynamic/StaticPhysicalThing and use PhysicalElement instead.
"""
from __future__ import annotations
from typing import List, Tuple, Union
import fnmatch
import numpy as np
from .activity import Activity
from .activity_category import ActivityCategory, _activity_category_from_json
from .actor import Actor
from .actor_category import ActorCategory, _actor_category_from_json
from .physical_element_category import PhysicalElementCategory, _physical_element_category_from_json
from .qualitative_element import QualitativeElement, _qualitative_element_props_from_json
from .scenario_element import DMObjects, _attributes_from_json, _object_from_json
from .type_checking import check_for_type, check_for_list, check_for_tuple
class ScenarioCategory(QualitativeElement):
""" ScenarioCategory - A qualitative description
Although a scenario is a quantitative description, there also exists a
qualitative description of a scenario. We refer to the qualitative
description of a scenario as a scenario category. The qualitative
description can be regarded as an abstraction of the quantitative scenario.
Scenario categories comprise scenarios. A scenario category may comprise
multiple scenarios. On the other hand, multiple scenario categories may
comprise the same scenario.
A scenario category can include another scenario category.
When instantiating the ScenarioCategory object, the name, description,
image, unique id (uid), and tags are passed. To set the static physical
things, activities, actors, and acts, use the corresponding methods, i.e.,
set_physical_elements(), set_activities(), set_actors(), and set_acts(),
respectively.
Attributes:
description (str): A description of the scenario class. The objective of
the description is to make the scenario class human interpretable.
image (str): Path to image that schematically shows the class.
activities (List[ActivityCategory]): List of activities that
are used for this ScenarioCategory.
physical_elements (List[PhysicalElementCategory]): List of physical
things that participate in the Scenario. Could be both static and
dynamic.
actors (List[ActorCategory]): List of actors that participate in the
Scenario.
acts (List[Tuple[ActorCategory, ActivityCategory]]): The acts describe
which actors perform which activities.
name (str): A name that serves as a short description of the scenario
category.
uid (int): A unique ID.
tags (List[Tag]): A list of tags that formally defines the scenario
category. These tags determine whether scenarios fall into this
scenario category or not.
"""
def __init__(self, image: str, description: str = "", **kwargs):
# Check the types of the inputs
check_for_type("image", image, str)
# Assign the attributes
QualitativeElement.__init__(self, description=description, **kwargs)
self.image = image
self.activities = [] # Type: List[ActivityCategory]
self.physical_elements = [] # Type: List[PhysicalElementCategory]
self.actors = [] # Type: List[ActorCategory]
self.acts = [] # Type: List[Tuple[ActorCategory, ActivityCategory]]
# Set attributes if provided by kwargs.
if "physical_element_categories" in kwargs:
self.set_physical_elements(kwargs["physical_element_categories"])
if "actor_categories" in kwargs:
self.set_actors(kwargs["actor_categories"])
if "activity_categories" in kwargs:
self.set_activities(kwargs["activity_categories"])
if "acts" in kwargs:
self.set_acts(kwargs["acts"])
# Some parameters
# Maximum number of characters that are used when printing the general description
self.maxprintlength = 80
def set_physical_elements(self, physical_elements: List[PhysicalElementCategory]) -> None:
""" Set the physical elements
Check whether the physical things are correctly defined.
:param physical_elements: List of physical thing categories that define
the static environment and part of the dynamic environment
qualitatively.
"""
# Check whether the static physical things are correctly defined.
check_for_list("physical_elements", physical_elements, PhysicalElementCategory,
can_be_none=False)
# Assign static physical thing categories to an attribute.
self.physical_elements = physical_elements
def set_activities(self, activity_categories: List[ActivityCategory]) -> None:
""" Set the activities
Check whether the activities are correctly defined. Activities should be
a list with instantiations of ActivityCategory.
:param activity_categories: List of activities that are used for this
ScenarioCategory.
"""
# Check whether the activities are correctly defined.
check_for_list("activities", activity_categories, ActivityCategory, can_be_none=False)
# Assign activity categories to an attribute.
self.activities = activity_categories # Type: List[ActivityCategory]
def set_actors(self, actor_categories: List[ActorCategory]) -> None:
""" Set the actors
Check whether the actors are correctly defined. Actors should be a list
with instantiations of ActorCategory.
:param actor_categories: List of actors that participate in the
Scenario.
"""
# Check whether the actors are correctly defined.
check_for_list("actors", actor_categories, ActorCategory, can_be_none=False)
# Assign actor categories to an attribute.
self.actors = actor_categories # Type: List[ActorCategory]
def set_acts(self, acts_scenario_category: List[Tuple[ActorCategory, ActivityCategory]],
verbose: bool = True) -> None:
""" Set the acts
Check whether the acts are correctly defined. Each act should be a tuple
with an actor category and an activity category, i.e.,
(ActorCategory, ActivityCategory). Acts is a list
containing multiple tuples (ActorCategory, ActivityCategory).
:param acts_scenario_category: The acts describe which actors perform
which activities. The actors and activities that are used in acts
should also be passed with the actors and activities arguments. If
not, a warning will be shown and the corresponding actor/activity
will be added to the list of actors/activities.
:param verbose: Set to False if warning should be surpressed.
"""
check_for_list("acts", acts_scenario_category, tuple)
for act in acts_scenario_category:
check_for_tuple("act", act, (ActorCategory, ActivityCategory))
# Set the acts.
self.acts = acts_scenario_category
_check_acts(self.acts, self.actors, self.activities, verbose=verbose)
def derived_tags(self) -> dict:
""" Return all tags, including the tags of the attributes.
The ScenarioCategory has tags, but also its attributes can have tags.
More specifically, the each PhysicalElementCategory, ActorCategory, and
ActivityCategory might have tags. A dictionary will be returned. Each
item of the dictionary contains a list of tags corresponding to either
the own object (i.e., ScenarioCategory), a PhysicalElementCategory, or
an ActorCategory.
The tags that might be associated with the ActivityCategory are returned
with the ActorCategory if the corresponding ActorCategory is performing
that ActivityCategory according to the defined acts.
:return: List of tags.
"""
# Instantiate the dictionary.
tags = {}
# Provide the tags of the very own object (ScenarioCategory).
if self.tags:
tags["{:s}::ScenarioCategory".format(self.name)] = self.tags
# Provide the tags for each ActorCategory.
tags = derive_actor_tags(self.actors, self.acts, tags=tags)
# Provide the tags for each PhysicalElementCategory.
for physical_element in self.physical_elements:
if physical_element.tags:
tags["{:s}::PhysicalElementCategory".format(physical_element.name)] = \
physical_element.tags
# Return the tags.
return tags
def includes(self, scenario_category: ScenarioCategory) -> bool:
""" Check if this scenario category includes the given scenario category
It is checked whether the passed ScenarioCategory is included in this
scenario category. To determine whether this is the case, the derived
tags are used. The derived tags from this scenario category should be at
least present (or subtags of the tags) in the provided ScenarioCategory.
:param scenario_category: The potential ScenarioCategory that is
included in this scenario category.
:return: Whether or not the ScenarioCategory is included.
"""
# Determine the derived tags of this and the other scenario category.
own_tags = self.derived_tags()
other_tags = scenario_category.derived_tags()
# Check for tags directly related to the ScenarioCategory. These tags should be directly
# present for the scenario.
if not _check_tags(own_tags, other_tags, "ScenarioCategory", "ScenarioCategory"):
return False
# Check for the actors, dynamic physical things, and static physical things.
if not _check_multiple_tags(own_tags, other_tags, "ActorCategory") or \
not _check_multiple_tags(own_tags, other_tags, "PhysicalElementCategory"):
return False
return True
def comprises(self, scenario) -> bool:
""" Check if this scenario category comprises the given scenario
It is checked whether the passed Scenario is comprised in this scenario
category. To determine whether this is the case, the derived tags are
used. The derived tags from this scenario category should be at
least present (or subtags of the tags) in the provided Scenario.
:param scenario: The potential ScenarioCategory that is
included in this scenario category.
:return: Whether or not the ScenarioCategory is included.
"""
# Determine the derived tags of this and the other scenario category.
own_tags = self.derived_tags()
other_tags = scenario.derived_tags()
# Check for tags directly related to the ScenarioCategory. These tags should be directly
# present for the scenario.
if not _check_tags(own_tags, other_tags, "ScenarioCategory", "Scenario"):
return False
# Check for the actors, dynamic physical things, and static physical things.
if not _check_multiple_tags(own_tags, other_tags, "ActorCategory", "Actor") or \
not _check_multiple_tags(own_tags, other_tags, "PhysicalElementCategory",
"PhysicalElement"):
return False
return True
def __str__(self) -> str:
""" Method that will be called when printing the scenario category.
:return: string to print.
"""
# Show the name
string = "Name: {:s}\n".format(self.name)
# Show the description of the scenario class
string += "Description:\n"
words = self.description.split(' ')
line = ""
for word in words:
if len(line) + len(word) <= self.maxprintlength:
line += " {:s}".format(word)
else:
string += "{:s}\n".format(line)
line = " {:s}".format(word)
if line:
string += "{:s}\n".format(line)
# Show the tags
string += _print_tags(self.derived_tags())
return string
def to_json(self) -> dict:
scenario_category = QualitativeElement.to_json(self)
scenario_category["image"] = self.image
scenario_category["physical_element_categories"] = \
[dict(name=element.name, uid=element.uid) for element in self.physical_elements]
scenario_category["actor_categories"] = [dict(name=actor.name, uid=actor.uid)
for actor in self.actors]
scenario_category["activity_categories"] = [dict(name=activity.name, uid=activity.uid)
for activity in self.activities]
scenario_category["acts"] = []
for actor, activity in self.acts:
scenario_category["acts"].append({"actor": actor.uid,
"activity": activity.uid})
scenario_category["derived_tags"] = self.derived_tags()
for key, tags in scenario_category["derived_tags"].items():
scenario_category["derived_tags"][key] = [tag.to_json() for tag in tags]
return scenario_category
def to_json_full(self) -> dict:
scenario_category = self.to_json()
scenario_category["physical_element_categories"] = [element.to_json_full() for element in
self.physical_elements]
scenario_category["actor_categories"] = [actor.to_json_full() for actor in
self.actors]
scenario_category["activity_categories"] = [activity.to_json_full() for activity in
self.activities]
return scenario_category
def _check_acts(acts: Union[List[Tuple[ActorCategory, ActivityCategory]],
List[Tuple[Actor, Activity]]],
actors: Union[List[ActorCategory], List[Actor]],
activities: Union[List[ActivityCategory], List[Activity]],
verbose: bool = True):
# Check whether the actors/activities defined with the acts are already listed. If not,
# the corresponding actor/activity will be added and a warning will be shown.
for thing, activity in acts:
if thing not in actors:
if verbose:
print("Actor with name '{:s}' ".format(thing.name) +
"is used with acts but not defined in the list of actors.")
print("Therefore, the actor is added to the list of actors.")
actors.append(thing)
if activity not in activities:
if verbose:
print("Activity with name '{:s}' is used with acts but".format(activity.name) +
" not defined in the list of activities.")
print("Therefore, the activity is added to the list of activities.")
activities.append(activity)
def _scenario_category_props_from_json(json: dict, attribute_objects: DMObjects, **kwargs) -> dict:
props = _qualitative_element_props_from_json(json)
props["image"] = json["image"]
props.update(_attributes_from_json(
json, attribute_objects,
dict(physical_element_categories=(_physical_element_category_from_json,
"physical_element_category"),
actor_categories=(_actor_category_from_json, "actor_category"),
activity_categories=(_activity_category_from_json, "activity_category")),
**kwargs))
props["acts"] = _get_acts(json, props["actor_categories"], props["activity_categories"])
return props
def _scenario_category_from_json(json: dict, attribute_objects: DMObjects, **kwargs) \
-> ScenarioCategory:
return ScenarioCategory(**_scenario_category_props_from_json(json, attribute_objects, **kwargs))
def scenario_category_from_json(json: dict, attribute_objects: DMObjects = None, **kwargs) -> \
ScenarioCategory:
""" Get ScenarioCategory object from JSON code.
It is assumed that all the attributes are fully defined. Hence, all
StaticPhysicalThingCategories, DynamicPhysicalThingCategories,
ActorCategory, and ActivityCategory need to be defined, instead of only a
reference to their IDs.
Alternatively, the static physical thing categories, dynamic physical thing
categories, actor categories, and activity categories can be passed as
arguments.
Further optional arguments (provided by kwargs) are:
- physical_elements (List[PhysicalElementCategory]): The physical elements
for defining the static environment.
- actors (List[ActorCategory]): The actor categories.
- activities (List[ActivityCategory]): The activity categories.
For all these arguments: If given, it will not be based on the JSON code.
:param json: JSON code of ScenarioCategory.
:param attribute_objects: A structure for storing all objects (optional).
:return: ScenarioCategory object.
"""
return _object_from_json(json, _scenario_category_from_json, "scenario_category",
attribute_objects, **kwargs)
def derive_actor_tags(actors: List, acts: List, tags: dict = None) -> dict:
""" Derive the tags that are associated with the actors.
The tags of an Actor(Category) will be added to the dictionary "tags". The
key equals <name of actor>::<class>, where class is supposed to be either
Actor or ActorCategory, whereas the value will be the list of tags that are
associated with the actor.
:param actors: The actors of the Scenario(Category).
:param acts: The acts of the Scenario(Category).
:param tags: Initial tags that will be amended with tags of the actors.
:return: Dictionary with each actor as a key and the corresponding values
denote the tags.
"""
# By default, tags is an empty dictionary
if tags is None:
tags = {}
for actor in actors:
actor_tags = actor.get_tags()
for act in acts:
if act[0] == actor:
actor_tags += act[1].get_tags()
if actor_tags:
if isinstance(actor, ActorCategory):
class_name = "ActorCategory"
elif isinstance(actor, Actor):
class_name = "Actor"
else:
raise TypeError("Actor is of type '{}' while it should be ".format(type(actor)) +
"of type ActorCategory, or Actor.")
key = "{:s}::{:s}".format(actor.name, class_name)
i = 1
while key in tags: # Make sure that a unique key is used.
i += 1
key = "{:s}{:d}::{:s}".format(actor.name, i, class_name)
tags[key] = list(set(actor_tags)) # list(set()) makes sure that tags are unique.
return tags
def _check_tags(tags: dict, subtags: dict, tags_class: str = "ScenarioCategory",
subtags_class: str = "ScenarioCategory") -> bool:
""" Check whether (sub)tags of <tags> are present in <subtags>.
The tags are provided as dictionaries, where each item corresponds to the
tags of one object that is part of the scenario (category). This function
checks whether all tags in <tags> of the class <tags_class> are present in
the tags in <subtags> of the class <subtags_class> (or subtags of the
<tags>).
:param tags: Dictionary of the derived tags of the ScenarioCategory.
:param subtags: Dictionary of the derived tags of the Scenario.
:param tags_class: Specify attribute to be used of the ScenarioCategory.
:param subtags_class: Specify attribute to be used of the Scenario.
:return: Whether the tags of the ScenarioCategory are found in the tags
of the Scenario.
"""
sc_keys = fnmatch.filter(tags, "*::{:s}".format(tags_class))
if sc_keys: # In this case, there are tags in <tags> related to <tags_class>.
s_keys = fnmatch.filter(subtags, "*::{:s}".format(subtags_class))
if s_keys: # There are tags in <subtags> related to <subtags_class>.
for tag in tags[sc_keys[0]]:
if not any(map(tag.is_supertag_of, subtags[s_keys[0]])):
return False # A tag of <tags> is not found in the <subtags>.
else: # There are no tags in <subtags> related to <subtags_class>.
return False
return True
def _check_multiple_tags(own_tags: dict, other_tags: dict, tags_class: str,
subtags_class: str = None) -> bool:
""" Check if all tags in `own_tags` are present in `other_tags`.
This is done for a specific attribute (e.g., actor_categories). For example,
with actor categories, there is a list made, where each item is a list
itself with the tags of the corresponding actor category. For each the
actor category in <own_tags>, there needs to be a (different) actor category
in <other_tags> that has the same tags (or more tags, or corresponding
subtags).
:param own_tags: The tags of the own scenario category.
:param other_tags: The tags of the scenario category that is potentially
'included' in the former.
:param tags_class: Check for which attribute we need to check.
:param subtags_class: If specified, this class is used for the `other_tags`.
:return: True if all tags in `own_tags` are present in `other_tags`.
"""
if subtags_class is None:
subtags_class = tags_class
own_objects = fnmatch.filter(own_tags, "*::{:s}".format(tags_class))
other_objects = fnmatch.filter(other_tags, "*::{:s}".format(subtags_class))
if len(own_objects) > len(other_objects): # There must be equal or more objects in other SC.
return False
# Create a boolean matrix, where the (i,j)-th element is True if the i-th object of the other
# objects might correspond to the j-th object of our own objects.
match = np.zeros((len(other_objects), len(own_objects)), dtype=np.bool)
for i, other_object in enumerate(other_objects):
for j, own_object in enumerate(own_objects):
match[i, j] = all(any(map(tag.is_supertag_of, other_tags[other_object]))
for tag in own_tags[own_object])
# Check if all of our own objects can be matched with the actos in the other objects.
return _check_match_matrix(match)
def _check_match_matrix(match: np.array) -> bool:
# The matching of the actors need to be done. If a match is found, the corresponding
# row and column will be removed from the match matrix.
n_matches = 1 # Number of matches to look for.
while match.size:
# If there is at least one ActorCategory left that has no match, a False will be
# returned.
if not all(np.any(match, axis=1)):
return False
sum_match_actor = np.sum(match, axis=0)
j = next((j for j in range(match.shape[1]) if sum_match_actor[j] == n_matches), -1)
if j >= 0: # We found an actor of our own scenario category with n matches.
i = next(i for i in range(match.shape[0]) if match[i, j])
else:
sum_match_actor = np.sum(match, axis=1)
i = next((i for i in range(match.shape[0]) if sum_match_actor[i] == n_matches), -1)
if i >= 0: # We found an actor of the ScenarioCategory with n matches
j = next(j for j in range(match.shape[1]) if match[i, j])
else:
# Try again for higher n (number of matches)
n_matches = n_matches + 1
continue
match = np.delete(np.delete(match, i, axis=0), j, axis=1)
n_matches = 1
return True
def _print_tags(derived_tags: dict) -> str:
string = "Tags:\n"
for i, (key, tags) in enumerate(derived_tags.items(), start=1):
string += u"{}\u2500 {:s}\n".format(u"\u2514" if i == len(derived_tags) else u"\u251C", key)
for j, tag in enumerate(tags, start=1):
string += "{} {}\u2500 {:s}\n".format(" " if i == len(derived_tags) else u"\u2502",
"\u2514" if j == len(tags) else "\u251C",
tag)
return string
def _get_acts(json, actors, activities):
actor_uids = [actor.uid for actor in actors]
activity_uids = [activity.uid for activity in activities]
acts = []
for act in json["acts"]:
acts.append((actors[actor_uids.index(act["actor"])],
activities[activity_uids.index(act["activity"])]))
return acts
| [
"numpy.sum",
"numpy.any",
"numpy.delete"
] | [((24177, 24198), 'numpy.sum', 'np.sum', (['match'], {'axis': '(0)'}), '(match, axis=0)\n', (24183, 24198), True, 'import numpy as np\n'), ((24490, 24511), 'numpy.sum', 'np.sum', (['match'], {'axis': '(1)'}), '(match, axis=1)\n', (24496, 24511), True, 'import numpy as np\n'), ((24937, 24964), 'numpy.delete', 'np.delete', (['match', 'i'], {'axis': '(0)'}), '(match, i, axis=0)\n', (24946, 24964), True, 'import numpy as np\n'), ((24101, 24122), 'numpy.any', 'np.any', (['match'], {'axis': '(1)'}), '(match, axis=1)\n', (24107, 24122), True, 'import numpy as np\n')] |
# Expensive test - not run by nose.
from mhcflurry import train_pan_allele_models_command
from mhcflurry.downloads import get_path
from mhcflurry.allele_encoding import AlleleEncoding
import pandas
import numpy
PRETRAIN_DATA_PATH = get_path(
"random_peptide_predictions", "predictions.csv.bz2")
FULL_TRAIN_DF = pandas.read_csv(
get_path(
"data_curated",
"curated_training_data.no_mass_spec.csv.bz2"))
TRAIN_DF = FULL_TRAIN_DF.loc[
(FULL_TRAIN_DF.peptide.str.len() >= 8) &
(FULL_TRAIN_DF.peptide.str.len() <= 15)
]
ALLELE_SEQUENCES = pandas.read_csv(
get_path("allele_sequences", "allele_sequences.csv"),
index_col=0).sequence
ALLELE_SEQUENCES = ALLELE_SEQUENCES.loc[
ALLELE_SEQUENCES.index.isin(TRAIN_DF.allele)
]
TRAIN_DF = TRAIN_DF.loc[
TRAIN_DF.allele.isin(ALLELE_SEQUENCES.index)
]
FOLDS_DF = pandas.DataFrame(index=TRAIN_DF.index)
FOLDS_DF["fold_0"] = True
HYPERPARAMTERS = {
'activation': 'tanh', 'allele_dense_layer_sizes': [],
'batch_normalization': False,
'dense_layer_l1_regularization': 0.0,
'dense_layer_l2_regularization': 0.0, 'dropout_probability': 0.5,
'early_stopping': True, 'init': 'glorot_uniform',
'layer_sizes': [1024, 512], 'learning_rate': None,
'locally_connected_layers': [], 'loss': 'custom:mse_with_inequalities',
'max_epochs': 0, 'min_delta': 0.0, 'minibatch_size': 128,
'optimizer': 'rmsprop', 'output_activation': 'sigmoid', 'patience': 20,
'peptide_allele_merge_activation': '',
'peptide_allele_merge_method': 'concatenate',
'peptide_amino_acid_encoding': 'BLOSUM62', 'peptide_dense_layer_sizes': [],
'peptide_encoding': {'alignment_method': 'left_pad_centered_right_pad',
'max_length': 15, 'vector_encoding_name': 'BLOSUM62'},
'random_negative_affinity_max': 50000.0,
'random_negative_affinity_min': 20000.0, 'random_negative_constant': 25,
'random_negative_distribution_smoothing': 0.0,
'random_negative_match_distribution': True, 'random_negative_rate': 0.2,
'train_data': {'pretrain': True,
'pretrain_max_epochs': 30,
'pretrain_min_epochs': 5,
'pretrain_patience': 3,
'pretrain_peptides_per_step': 8,
'pretrain_steps_per_epoch': 256},
'validation_split': 0.1,
'data_dependent_initialization_method': "lsuv",
}
def verify_optimizable():
predictor = train_pan_allele_models_command.train_model(
work_item_name="work-item0",
work_item_num=0,
num_work_items=1,
architecture_num=0,
num_architectures=1,
fold_num=0,
num_folds=1,
replicate_num=0,
num_replicates=1,
hyperparameters=HYPERPARAMTERS,
pretrain_data_filename=PRETRAIN_DATA_PATH,
verbose=1,
progress_print_interval=5.0,
predictor=None,
save_to=None,
constant_data={
'train_data': TRAIN_DF,
'folds_df': FOLDS_DF,
'allele_encoding': AlleleEncoding(
alleles=ALLELE_SEQUENCES.index.values,
allele_to_sequence=ALLELE_SEQUENCES.to_dict()),
},
)
(network,) = predictor.neural_networks
print(predictor, network)
print(network.fit_info)
pretrain_val_loss = network.fit_info[0]["val_loss"][-1]
print(pretrain_val_loss)
numpy.testing.assert_array_less(pretrain_val_loss, 0.1)
if __name__ == "__main__":
verify_optimizable()
| [
"pandas.DataFrame",
"mhcflurry.downloads.get_path",
"numpy.testing.assert_array_less"
] | [((235, 296), 'mhcflurry.downloads.get_path', 'get_path', (['"""random_peptide_predictions"""', '"""predictions.csv.bz2"""'], {}), "('random_peptide_predictions', 'predictions.csv.bz2')\n", (243, 296), False, 'from mhcflurry.downloads import get_path\n'), ((861, 899), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'index': 'TRAIN_DF.index'}), '(index=TRAIN_DF.index)\n', (877, 899), False, 'import pandas\n'), ((344, 414), 'mhcflurry.downloads.get_path', 'get_path', (['"""data_curated"""', '"""curated_training_data.no_mass_spec.csv.bz2"""'], {}), "('data_curated', 'curated_training_data.no_mass_spec.csv.bz2')\n", (352, 414), False, 'from mhcflurry.downloads import get_path\n'), ((3401, 3456), 'numpy.testing.assert_array_less', 'numpy.testing.assert_array_less', (['pretrain_val_loss', '(0.1)'], {}), '(pretrain_val_loss, 0.1)\n', (3432, 3456), False, 'import numpy\n'), ((602, 654), 'mhcflurry.downloads.get_path', 'get_path', (['"""allele_sequences"""', '"""allele_sequences.csv"""'], {}), "('allele_sequences', 'allele_sequences.csv')\n", (610, 654), False, 'from mhcflurry.downloads import get_path\n')] |
import cmath
import numpy as np
import scipy.integrate
class Heston():
def __init__(self, r, v0, kappa, theta, eta, rho, sigma):
"""
Parameters
----------
r : float
Risk-free rate
v0 : float
Initial volatility level
kappa : float
Volatility mean-reversion rate
theta : float
Long-term volatility level
eta : float
Volatility risk parameter
rho : float
Correlation coefficient of Brownian motions
sigma : float
Vol-of-vol
"""
self.r = r
self.v0 = v0
self.kappa = kappa
self.theta = theta
self.eta = eta
self.rho = rho
self.sigma = sigma
self.b = [kappa + eta - rho * sigma, kappa + eta]
self.u = [0.5, -0.5]
def makeCharacteristicFunctions(self, S, tau, q = 0.):
"""
Creates the characteristic functions of the logarithmic terminal stock price
under the two measures appearing in the option prices.
Parameters
----------
tau : float
Time to maturity
S : float
Stock spot price
q : float, optional
Continuous stock dividend rate (per year)
Returns
-------
list
Two characteristic functions
"""
def _makeKthCharacteristicFunction(k):
"""
Constructs the characteristic function under the kth measure (k = 0, 1).
Parameters
----------
k : int
Characteristic function index
Returns
-------
callable
Characteristic function of the kth measure.
"""
def _psi(phi):
"""
By Albrecher's little trap formulation which produces a slightly
more stable integrand for numerical integration when compared to
Heston's original formulation
"""
q1 = self.b[k] - self.rho * self.sigma * phi * 1j
q2 = 2 * self.u[k] * phi * 1j - phi**2
d = cmath.sqrt(q1 * q1 - self.sigma**2 * q2)
c = (q1 - d) / (q1 + d)
c1 = (self.r - q) * phi * tau * 1j
c2 = self.kappa * self.theta / (self.sigma ** 2)
c3 = (q1 - d) * tau
c4 = cmath.log((1. - c * cmath.exp(-d * tau))/(1. - c))
C = c1 + c2 * (c3 - 2 * c4)
d1 = 1./ self.sigma**2
d2 = q1 - d
d3 = (1. - cmath.exp(-d * tau)) / (1 - c * cmath.exp(-d * tau))
D = d1 * d2 * d3
return cmath.exp(C + D * self.v0 + phi * np.log(S) * 1j)
return _psi
return [_makeKthCharacteristicFunction(k) for k in range(2)]
def getDensity(self, S, tau, xT, lower, upper):
"""
Computes the value of the pdf of the log terminal stock price equalling xT.
The pdf is obtained by a Fourier transform of the characteristic function.
Since the imaginary part must vanish, we focus only on the real part.
Note that the characteristic function, and hence pdf, changes depending on
the spot price S and time to maturity tau and so we need to additionally
specify the paramters of the pdf that is being used.
Parameters
----------
S : float
Spot price
tau : float
Time to maturity (years)
xT : float
Log-terminal stock price
lower : float, optional
Lower limit of Fourier integral
upper : float, optional
Upper limit of Fourier integral
Returns
-------
float
Value of the pdf of the terminal stock price, given spot price S,
eavulated at xT
"""
def _makeFourierIntegrand(xT):
"""
Helper function to return the integrand of the Fourier transform
Returns
-------
callable
Integrand of the Fourier transform of the characteristic function
"""
def _fourierIntegrand(phi):
_, characteristic_function = self.makeCharacteristicFunctions(S, tau)
return (np.exp( -phi * xT * 1j) * characteristic_function(phi)).real
return _fourierIntegrand
# f is a callable representing the integrand of the Fourier transform.
f = _makeFourierIntegrand(xT)
return 1. / np.pi * scipy.integrate.quad(f, lower, upper)[0]
def getItmProbabilities(self, K, S, tau, q, lower, upper):
"""
Computes the probabilities of the terminal price exceeding the strike
under the two measures. Note that the characteristic function and CDF
are related by the Gil-Pelaez theorem.
Parameters
----------
K : float
Strike price
S : float
Stock spot price
tau : float
Time to maturity (years)
q : float
Continuous stock dividend rate (per year)
lower : float, optional
Lower limit of integration
upper : float, optional
Upper limit of integration
Returns
-------
list
List of the two probabilties used in pricing European options.
"""
def makeGilPelaezIntegrands():
"""
Helper function to construct the integrands appearing in the
Gil-Pelaez theorem.
Returns
-------
list
List of two functions, as a function of phi, representing
the two integrands in the Gil-Pelaez theorem.
"""
psi = self.makeCharacteristicFunctions(S, tau, q)
i_log_K = np.log(K) * 1j
def _makeKthGilPelaezIntegrand(k):
"""
Constructs the Gil-Pelaez integrand for the kth characteristic function (k = 0, 1)
Returns
-------
callable
The Gil-Pelaez integrand as a function of phi.
"""
def _gilPelaezIntegrand(phi):
return ((cmath.exp(- phi * i_log_K) * psi[k](phi))/ (phi * 1j)).real
return _gilPelaezIntegrand
return [_makeKthGilPelaezIntegrand(k) for k in range(2)]
# `integrands` is a list of the callables representing the integrands of
# the Gil-Pelaez theorem.
integrands = makeGilPelaezIntegrands()
integrals = [scipy.integrate.quad(integrands[k], lower, upper)[0] for k in range(2)]
return [0.5 + 1/np.pi * I for I in integrals]
def getCallPrice(self, K, S, tau, q = 0., lower = 1e-8, upper = 1e2):
"""
Computes the price of a European call option under the Heston model.
Parameters
----------
K : float
Strike price
S : float
Stock spot price
tau : float
Time to maturity (years)
q : float
Continuous stock dividend rate (per year)
Returns
-------
float
Call price
"""
P1, P2 = self.getItmProbabilities(K, S, tau, q, lower, upper)
return S * np.exp(- q * tau) * P1 - K * np.exp(-self.r * tau) * P2
def getPutPrice(self, K, S, tau, q = 0., lower = 1e-8, upper = 1e2):
"""
Computes the price of a European put option under the Heston model.
Uses put-call parity.
Parameters
----------
K : float
Strike
S : float
Stock spot price
tau : float
Time to maturity (years)
q : float
Continuous stock dividend rate (per year)
Returns
-------
float
Put price
"""
return self.getCallPrice(K, S, tau, q, lower, upper) + K * np.exp(-self.r * tau) - S * np.exp(-q * tau)
| [
"cmath.sqrt",
"numpy.exp",
"numpy.log",
"cmath.exp"
] | [((2261, 2303), 'cmath.sqrt', 'cmath.sqrt', (['(q1 * q1 - self.sigma ** 2 * q2)'], {}), '(q1 * q1 - self.sigma ** 2 * q2)\n', (2271, 2303), False, 'import cmath\n'), ((6141, 6150), 'numpy.log', 'np.log', (['K'], {}), '(K)\n', (6147, 6150), True, 'import numpy as np\n'), ((8386, 8402), 'numpy.exp', 'np.exp', (['(-q * tau)'], {}), '(-q * tau)\n', (8392, 8402), True, 'import numpy as np\n'), ((7686, 7702), 'numpy.exp', 'np.exp', (['(-q * tau)'], {}), '(-q * tau)\n', (7692, 7702), True, 'import numpy as np\n'), ((7715, 7736), 'numpy.exp', 'np.exp', (['(-self.r * tau)'], {}), '(-self.r * tau)\n', (7721, 7736), True, 'import numpy as np\n'), ((8358, 8379), 'numpy.exp', 'np.exp', (['(-self.r * tau)'], {}), '(-self.r * tau)\n', (8364, 8379), True, 'import numpy as np\n'), ((2722, 2741), 'cmath.exp', 'cmath.exp', (['(-d * tau)'], {}), '(-d * tau)\n', (2731, 2741), False, 'import cmath\n'), ((4541, 4565), 'numpy.exp', 'np.exp', (['(-phi * xT * 1.0j)'], {}), '(-phi * xT * 1.0j)\n', (4547, 4565), True, 'import numpy as np\n'), ((2754, 2773), 'cmath.exp', 'cmath.exp', (['(-d * tau)'], {}), '(-d * tau)\n', (2763, 2773), False, 'import cmath\n'), ((2552, 2571), 'cmath.exp', 'cmath.exp', (['(-d * tau)'], {}), '(-d * tau)\n', (2561, 2571), False, 'import cmath\n'), ((2882, 2891), 'numpy.log', 'np.log', (['S'], {}), '(S)\n', (2888, 2891), True, 'import numpy as np\n'), ((6583, 6608), 'cmath.exp', 'cmath.exp', (['(-phi * i_log_K)'], {}), '(-phi * i_log_K)\n', (6592, 6608), False, 'import cmath\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from edibles import DATADIR
from edibles import PYTHONDIR
from edibles.utils.edibles_spectrum import EdiblesSpectrum
class EdiblesOracle:
"""
This class will process the EDIBLES obs log and target info files.
Users can then query the oracle for observations matching specific criteria.
"""
def __init__(self):
print(DATADIR)
folder = Path(PYTHONDIR+"/data")
filename=folder /"DR4_ObsLog.csv"
self.obslog = pd.read_csv(filename)
filename=folder /"sightline_data"/"Formatted_EBV.csv"
self.ebvlog = pd.read_csv(filename)
filename=folder /"sightline_data"/"Formatted_SpType.csv"
self.sptypelog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_LogN(HI).csv"
self.nhilog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_LogN(H2).csv"
self.nhiilog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_f(H2).csv"
self.fh2log = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_RV.csv"
self.rvlog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_AV.csv"
self.avlog = pd.read_csv(filename)
filename = folder /"sightline_data"/"ObservedObjects.csv"
self.object_log = pd.read_csv(filename,names=["object"],header=0)
#print(self.sptypelog.dtypes)
# total_rows = len(self.ebvlog.index)
# print(total_rows)
def _getObsListFilteredByObsLogParameters(self, object=None, Wave=None, WaveMin=None, WaveMax=None, MergedOnly=False, OrdersOnly=False):
'''Filter all the observations in the ObsLog by the parameters
contained in the obslog, i.e. by object (if specified), wavelength
range or merged versus specific orders. '''
# We will use Boolean matches for all filter criteria.
#print('Inside the function: object is', object)
bool_object_matches = np.zeros(len(self.obslog.index),dtype=bool)
#print(object.dtype)
if object is None:
bool_object_matches = np.ones(len(self.ebvlog.index),dtype=bool)
elif (isinstance(object, np.ndarray) | isinstance(object, list)):
for thisobject in object:
#print("Object in loop:", thisobject)
#print(self.obslog.Object == thisobject)
bool_object_matches = (self.obslog.Object == thisobject) | (bool_object_matches)
#print(bool_object_matches.sum())
else:
bool_object_matches = self.ebvlog.object == object
#print('Inside the function: number of matches is ', bool_object_matches.sum())
# Do we have to filter out merged or single-order spectra? Note that if both
# MergedOnly and OrdersOnly are True, only the Merged spectra will be returned.
if MergedOnly and OrdersOnly:
print("EDIBLES Oracle WARNING: ONLY RETURNING MERGED SPECTRA")
bool_order_matches = self.obslog.Order != "Z"
if OrdersOnly is True:
bool_order_matches = self.obslog.Order != "ALL"
if MergedOnly is True:
bool_order_matches = self.obslog.Order == "ALL"
#print(bool_order_matches)
bool_wave_matches = np.ones(len(self.obslog.index),dtype=bool)
if Wave:
bool_wave_matches = (self.obslog.WaveMin < Wave) & (self.obslog.WaveMax > Wave)
if WaveMin:
bool_wave_matches = (self.obslog.WaveMax > WaveMin) & (bool_wave_matches)
if WaveMax:
bool_wave_matches = (self.obslog.WaveMin < WaveMax) & (bool_wave_matches)
ind = np.where(bool_object_matches & bool_order_matches & bool_wave_matches)
#print(ind)
print("**Filtered File List**")
print(self.obslog.iloc[ind].Filename)
return self.obslog.iloc[ind].Filename
def FilterEngine(self, object, log, value, unc_lower, unc_upper, reference_id):
# Generic function to filter through the list of objects.
# Note: object should be a list or a numpy array type!
# First, find all the objects in our log that match the specified objects.
bool_object_matches = np.zeros(len(log.index),dtype=bool)
if object is None:
bool_object_matches = np.ones(len(log.index),dtype=bool)
elif (isinstance(object, np.ndarray) | isinstance(object, list)):
for thisobject in object:
bool_object_matches = (log.object == thisobject) | (bool_object_matches)
#print(bool_object_matches.sum())
else:
print("EDIBLES Oracle is Panicking in FilterEngine: don't know what I'm dealing with!")
# Next, find all the matches with the parameters -- but only if they are specified!
# Initialize a boolean array to match all entries in the sightline file.
# Then work through each of the criteria and add the corresponding filter criterion.
bool_value_matches = np.ones(len(log.index),dtype=bool)
#print(bool_value_matches)
if value is not None:
# Only keep sightline if the value is an exact match.
bool_value_matches = (log.value == value)
if unc_lower is not None:
bool_value_matches = (log.value > unc_lower) & bool_value_matches
if unc_upper is not None:
print(value)
print(unc_upper)
bool_value_matches = (log.value < unc_upper) & bool_value_matches
# Now process the references or "preferred" values.
# If reference is "All", we should not apply an additional filter.
# If reference is specified, filter on that reference.
# If no reference is specified, use the preferred value.
if reference_id is None:
bool_value_matches = (log.preferred_flag == 1) & bool_value_matches
elif reference_id=='All':
pass
else:
#check if proper ref. is given [1,2] for EBV, [3,4] fpr SpT.
bool_value_matches = (log.reference_id == reference_id) & bool_value_matches
bool_combined_matches = bool_object_matches & bool_value_matches
#ind = np.where(bool_combined_matches)
#matching_objects = log.object.values[ind]
matching_objects_df = log.loc[bool_combined_matches, ['object','value']]
print('getFilteredObslist: Found a total of ', bool_object_matches.sum(), ' object match(es).')
print('getFilteredObslist: Found a total of ', bool_value_matches.sum(), ' parameter match(es).')
print('getFilteredObslist: Found a total of ', bool_combined_matches.sum(), ' combined match(es).')
return matching_objects_df
def getFilteredObjects(self,object=None, Wave=None, \
EBV=None, EBV_min=None, EBV_max=None, EBV_reference=None, \
SpType=None, SpType_min=None, SpType_max=None, SpType_reference=None, \
WaveMin=None, WaveMax=None, LogNHI=None,LogNHI_min=None,LogNHI_max=None,\
LogNHI_reference=None,LogNHII=None,LogNHII_min=None,LogNHII_max=None, \
LogNHII_reference=None, fH2=None,fH2_min=None,fH2_max=None, \
fH2_reference=None, RV=None,RV_min=None,RV_max=None, \
RV_reference=None, AV=None,AV_min=None,AV_max=None, \
AV_reference=None):
'''This method will provide a filtered list of objects that match
the specified criteria on sightline/target parameters as well as
on observational criteria (e.g. wavelength range). This function consists
of two steps:
| 1. Find all targets that match specified target parameters. This is done
for each parameter using the FilterEngine function.
| 2. Find the objects that match all target specifications. '''
# STEP 1: Filter objects for each of the parameters -- but only if parameters are specified!
if (EBV or EBV_min or EBV_max or EBV_reference) is not None:
print("EBV")
matching_objects_ebv = self.FilterEngine(object, self.ebvlog, EBV, EBV_min, EBV_max, EBV_reference)
else:
matching_objects_ebv = self.object_log
if (SpType or SpType_min or SpType_max or SpType_reference) is not None:
print("SP_TYPE")
matching_objects_sptype = self.FilterEngine(object, self.sptypelog, SpType, SpType_min, SpType_max, SpType_reference)
else:
matching_objects_sptype = self.object_log
if (LogNHI or LogNHI_min or LogNHI_max or LogNHI_reference) is not None:
print("LogN(HI)")
matching_objects_lognhi = self.FilterEngine(object, self.nhilog, LogNHI, LogNHI_min, LogNHI_max, LogNHI_reference)
else:
matching_objects_lognhi = self.object_log
if (LogNHII or LogNHII_min or LogNHII_max or LogNHII_reference) is not None:
print("LogN(HII)")
matching_objects_lognhii = self.FilterEngine(object, self.nhiilog, LogNHII, LogNHII_min, LogNHII_max, LogNHII_reference)
else:
matching_objects_lognhii = self.object_log
if (fH2 or fH2_min or fH2_max or fH2_reference) is not None:
print("fH2")
matching_objects_fh2 = self.FilterEngine(object, self.fh2log, fH2, fH2_min, fH2_max, fH2_reference)
else:
matching_objects_fh2 = self.object_log
if (RV or RV_min or RV_max or RV_reference) is not None:
print("RV")
matching_objects_rv = self.FilterEngine(object, self.rvlog, RV, RV_min, RV_max, RV_reference)
else:
matching_objects_rv = self.object_log
if (AV or AV_min or AV_max or AV_reference) is not None:
print("AV")
matching_objects_av = self.FilterEngine(object, self.avlog, AV, AV_min, AV_max, AV_reference)
else:
matching_objects_av = self.object_log
# STEP 2: Find the common objects
ebv_objects = matching_objects_ebv['object']
sptype_objects = matching_objects_sptype['object']
lognhi_objects = matching_objects_lognhi['object']
lognhii_objects = matching_objects_lognhii['object']
fh2_objects = matching_objects_fh2['object']
rv_objects = matching_objects_rv['object']
av_objects = matching_objects_av['object']
#print(lognhi_objects.tolist())
#print(ebv_objects.tolist())
#print(sptype_objects.tolist())
##################
if object is None:
search_list = self.object_log["object"].to_list()
else:
search_list = object
common_objects_set = set(search_list).intersection(ebv_objects.to_list(),sptype_objects.to_list(),lognhi_objects.to_list(),lognhii_objects.to_list(),fh2_objects.to_list(),rv_objects.to_list(),av_objects.to_list())
###################
common_objects_list= list(common_objects_set)
print("***Common Objects***")
if len(common_objects_list) == 0:
print("None")
else:
print(common_objects_list)
return (common_objects_list)
def getFilteredObsList(self,object=None, Wave=None, MergedOnly=False, OrdersOnly=False,\
EBV=None, EBV_min=None, EBV_max=None, EBV_reference=None, \
SpType=None, SpType_min=None, SpType_max=None, SpType_reference=None, \
WaveMin=None, WaveMax=None, LogNHI=None,LogNHI_min=None,LogNHI_max=None,\
LogNHI_reference=None,LogNHII=None,LogNHII_min=None,LogNHII_max=None, \
LogNHII_reference=None, fH2=None,fH2_min=None,fH2_max=None, \
fH2_reference=None, RV=None,RV_min=None,RV_max=None, \
RV_reference=None, AV=None,AV_min=None,AV_max=None, \
AV_reference=None):
'''This method will provide a filtered list of observations that match
the specified criteria on sightline/target parameters as well as
on observational criteria (e.g. wavelength range). This function consists
of three steps:
| 1. Find all targets that match specified target parameters. This is done
for each parameter using the FilterEngine function.
| 2. Find the objects that match all target specifications.
| 3. Find the observations that match specified parameters for only these targets. '''
#print(getFilteredObslist.__dict__)
# STEP 1: Filter objects for each of the parameters -- but only if parameters are specified!
if (EBV or EBV_min or EBV_max or EBV_reference) is not None:
print("EBV")
matching_objects_ebv = self.FilterEngine(object, self.ebvlog, EBV, EBV_min, EBV_max, EBV_reference)
else:
matching_objects_ebv = self.object_log
if (SpType or SpType_min or SpType_max or SpType_reference) is not None:
print("SP_TYPE")
matching_objects_sptype = self.FilterEngine(object, self.sptypelog, SpType, SpType_min, SpType_max, SpType_reference)
else:
matching_objects_sptype = self.object_log
if (LogNHI or LogNHI_min or LogNHI_max or LogNHI_reference) is not None:
print("LogN(HI)")
matching_objects_lognhi = self.FilterEngine(object, self.nhilog, LogNHI, LogNHI_min, LogNHI_max, LogNHI_reference)
else:
matching_objects_lognhi = self.object_log
if (LogNHII or LogNHII_min or LogNHII_max or LogNHII_reference) is not None:
print("LogN(HII)")
matching_objects_lognhii = self.FilterEngine(object, self.nhiilog, LogNHII, LogNHII_min, LogNHII_max, LogNHII_reference)
else:
matching_objects_lognhii = self.object_log
if (fH2 or fH2_min or fH2_max or fH2_reference) is not None:
print("fH2")
matching_objects_fh2 = self.FilterEngine(object, self.fh2log, fH2, fH2_min, fH2_max, fH2_reference)
else:
matching_objects_fh2 = self.object_log
if (RV or RV_min or RV_max or RV_reference) is not None:
print("RV")
matching_objects_rv = self.FilterEngine(object, self.rvlog, RV, RV_min, RV_max, RV_reference)
else:
matching_objects_rv = self.object_log
if (AV or AV_min or AV_max or AV_reference) is not None:
print("AV")
matching_objects_av = self.FilterEngine(object, self.avlog, AV, AV_min, AV_max, AV_reference)
else:
matching_objects_av = self.object_log
# STEP 2: Find the common objects
ebv_objects = matching_objects_ebv['object']
sptype_objects = matching_objects_sptype['object']
lognhi_objects = matching_objects_lognhi['object']
lognhii_objects = matching_objects_lognhii['object']
fh2_objects = matching_objects_fh2['object']
rv_objects = matching_objects_rv['object']
av_objects = matching_objects_av['object']
#print(lognhi_objects.tolist())
#print(ebv_objects.tolist())
#print(sptype_objects.tolist())
##################
if object is None:
search_list = self.object_log["object"].to_list()
else:
search_list = object
common_objects_set = set(search_list).intersection(ebv_objects.to_list(),sptype_objects.to_list(),lognhi_objects.to_list(),lognhii_objects.to_list(),fh2_objects.to_list(),rv_objects.to_list(),av_objects.to_list())
###################
common_objects_list= list(common_objects_set)
print("***Common Objects***")
if len(common_objects_list) == 0:
print("None")
else:
print(common_objects_list)
# STEP 3
# Now push this list of objects through for further filtering based on obs log
FilteredObsList = self._getObsListFilteredByObsLogParameters(object=common_objects_list, Wave=Wave, WaveMin=WaveMin, WaveMax=WaveMax, MergedOnly=MergedOnly, OrdersOnly=OrdersOnly)
print(len(FilteredObsList))
return (FilteredObsList)
def getObsListByWavelength(self, wave=None, MergedOnly=False, OrdersOnly=False):
"""
This function filters the list of Observations to return only those
that include the requested wavelength.
We will create a set of boolean arrays that we will then combined
as the filter.
:param wave: Wavelength that the returned files will include
:type wave: float
:param MergedOnly: Only include spectra from merged orders
:type MergedOnly: bool
:param OrdersOnly: Only include individual spectrum orders
:type OrdersOnly: bool
"""
# Boolean matches for wavelength.
if wave is None:
wave = 5000
bool_wave_matches = (self.obslog.WaveMin < wave) & (self.obslog.WaveMax > wave)
# Do we have to filter out merged or single-order spectra? Note that if both
# MergedOnly and OrdersOnly are True, only the Merged spectra will be returned.
if MergedOnly and OrdersOnly:
print("EDIBLES Oracle: ONLY RETURNING MERGED SPECTRA")
bool_order = self.obslog.Order != "Z"
if OrdersOnly is True:
bool_order = self.obslog.Order != "ALL"
if MergedOnly is True:
bool_order = self.obslog.Order == "ALL"
ind = np.where(bool_wave_matches & bool_order)
# print(ind)
return self.obslog.iloc[ind].Filename
def getObsListByTarget(self, target=None, MergedOnly=False, OrdersOnly=False):
"""
This function filters the list of Observations to return only those
of the requested target.
We will create a set of boolean arrays that we will then combined
as the filter.
:param target: Target name that the returned files will include
:type target: object
:param MergedOnly: Only include spectra from merged orders
:type MergedOnly: bool
:param OrdersOnly: Only include individual spectrum orders
:type OrdersOnly: bool
"""
# Boolean matches for wavelength.
if target is None:
target = 'HD164073'
bool_target_matches = (self.obslog.Object == target)
# Do we have to filter out merged or single-order spectra? Note that if both
# MergedOnly and OrdersOnly are True, only the Merged spectra will be returned.
if MergedOnly and OrdersOnly:
print("EDIBLES Oracle: ONLY RETURNING MERGED SPECTRA")
bool_order = self.obslog.Order != "Z"
if OrdersOnly is True:
bool_order = self.obslog.Order != "ALL"
if MergedOnly is True:
bool_order = self.obslog.Order == "ALL"
ind = np.where(bool_target_matches & bool_order)
return self.obslog.iloc[ind].Filename
if __name__ == "__main__":
# print("Main")
pythia = EdiblesOracle()
# EXAMPLE 1: Get all observations for a single object.
List=pythia.getFilteredObsList(object=["HD 183143"], MergedOnly=True, Wave=3302.0)
print("1. Results from getFilteredObsList: ")
print(List)
# EXAMPLE 2: Find all objects that match certain criteria.
List=pythia.getFilteredObjects(object=["HD 145502"], EBV_min=0.5, fH2_max=.3)
print("2. Results from getFilteredObjects: ")
print(List)
#List=pythia.getFilteredObsList(object=["HD 103779"],MergedOnly=True,EBV_min=0.2,EBV_max=0.8,EBV_reference=3)
#List=pythia.getFilteredObsList(EBV_min=0.2,EBV_max=0.8,EBV_reference=1)
#List=pythia.getFilteredObsList(MergedOnly=True,EBV_min=0.2,EBV_max=0.8,EBV_reference=1)
#print("1. Results from getFilteredObsList: ")
#List=pythia.getFilteredObsList(MergedOnly=True,EBV_min=0.7,EBV_max=0.8, SpType='B0.5 III')
#List=pythia.getFilteredObsList(MergedOnly=True,EBV_min=0.2,EBV_max=0.8, object=['HD 145502'])
##List = pd.DataFrame(List).T
##List.columns = ['Object', 'EBV']
##print("Results from getFilteredObsList: ")
##print(List)
#List=pythia.getFilteredObsList(object=['HD 145502', 'HD 149757'], MergedOnly=True, Wave=6614)
##List = pd.DataFrame(List).T
##List.columns = ['Object', 'EBV']
##print("Results from getFilteredObsList: ")
##print(List)
# print("2. Results from getFilteredObsList: ")
# List=pythia.getFilteredObsList(MergedOnly=True,EBV=0.6,EBV_max=0.9)
# print(List)
#
# print("3. Results from getFilteredObsList: ")
# List=pythia.getFilteredObsList(object=['HD 145502', 'HD 149757'], MergedOnly=True, Wave=6614)
# print(List)
'''
for filename in List:
sp = EdiblesSpectrum(filename)
plt.figure()
plt.title(filename)
plt.xlabel("Wavelength (" + r"$\AA$" + ")")
plt.xlim(5000, 5100)
plt.plot(sp.wave, sp.flux)
plt.show()
'''
| [
"numpy.where",
"pandas.read_csv",
"pathlib.Path"
] | [((479, 504), 'pathlib.Path', 'Path', (["(PYTHONDIR + '/data')"], {}), "(PYTHONDIR + '/data')\n", (483, 504), False, 'from pathlib import Path\n'), ((567, 588), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (578, 588), True, 'import pandas as pd\n'), ((673, 694), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (684, 694), True, 'import pandas as pd\n'), ((785, 806), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (796, 806), True, 'import pandas as pd\n'), ((898, 919), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (909, 919), True, 'import pandas as pd\n'), ((1012, 1033), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1023, 1033), True, 'import pandas as pd\n'), ((1122, 1143), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1133, 1143), True, 'import pandas as pd\n'), ((1228, 1249), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1239, 1249), True, 'import pandas as pd\n'), ((1334, 1355), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1345, 1355), True, 'import pandas as pd\n'), ((1457, 1506), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'names': "['object']", 'header': '(0)'}), "(filename, names=['object'], header=0)\n", (1468, 1506), True, 'import pandas as pd\n'), ((3860, 3930), 'numpy.where', 'np.where', (['(bool_object_matches & bool_order_matches & bool_wave_matches)'], {}), '(bool_object_matches & bool_order_matches & bool_wave_matches)\n', (3868, 3930), True, 'import numpy as np\n'), ((18161, 18201), 'numpy.where', 'np.where', (['(bool_wave_matches & bool_order)'], {}), '(bool_wave_matches & bool_order)\n', (18169, 18201), True, 'import numpy as np\n'), ((19583, 19625), 'numpy.where', 'np.where', (['(bool_target_matches & bool_order)'], {}), '(bool_target_matches & bool_order)\n', (19591, 19625), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 16:18:13 2020
@author: Dropex
"""
import sys
import requests
import pandas as pd
import numpy as np
class TradeAsset:
"""
symbol: BTCUSDT, XAUUSD, AMZN, etc.
interval: 1h, 4h, 1d, etc.
exchange: Binance, BitMex, ByBit, etc.
"""
def __init__(self, symbol, interval, exchange):
self.symbol = symbol
self.interval = interval
self.exchange = exchange
#Use exchange API to get market information
def getklines(self):
if self.exchange == 'Binance':
url_base='https://api.binance.com/api/v1/klines'
PARAMS = {'symbol':self.symbol, 'interval':self.interval}
myheaders = {}
try:
myrequest = requests.get(url = url_base, headers = myheaders, params = PARAMS)
self.klines = myrequest.json()
except:
self.klines = [sys.exc_info()[0]]
else:
self.klines = ['Exchange not defined']
#Generate pandas dataframe for further analysis
def dataframe(self):
if len(self.klines) > 2:
df = pd.DataFrame(self.klines)
else:
df = pd.DataFrame()
dfcolumns=len(df.columns) >= 12
if dfcolumns and self.exchange == 'Binance':
df.columns=['OpenTime','Open','High','Low','Close','Volume','CloseTime','QuoteAssetVol','NoOfTrades','BuyVolume','TakerbuyquoteassetVol','Nothing']
df=df[['OpenTime','Open','High','Low','Close','Volume','CloseTime','BuyVolume']]
df[['Open','High','Low','Close','Volume','BuyVolume']] = df[['Open','High','Low','Close','Volume','BuyVolume']].astype(float)
df['SellVolume']=df['Volume']-df['BuyVolume']
#Average from Open and Close values, used for Volume Profile
df['PriceAverage']=(df['Open']+df['Close'])/2
self.df = df
else:
self.df = pd.DataFrame()
#EMA of last n periods for Close column
# df dataframe with column Close
def ema(self, df, n, sma):
alpha=2/(n+1)
ema=np.nan
lastema=sma
if 500 >= len(df) >= 2*n+1 > 0 and 'Close' in df.columns:
for i in range(n):
ema=alpha*df.loc[len(df['Close'])-n+i,'Close']+(1-alpha)*lastema
lastema=ema
return ema
#Add EMA of n periods column
# df dataframe with column Close
def addema(self,df,n):
ema_list=[]
for i in df.index.values:
df_slice=df[0:i+1]
sma=self.sma(df_slice[0:len(df_slice)-n],n)
ema_list.append(self.ema(df_slice,n,sma))
self.df['ema'+str(n)]=ema_list
#Simple Media Average of last n periods for Close column
# df dataframe with column Close
def sma(self, df, n):
if 500 >= len(df) >= n > 0 and 'Close' in df.columns:
return df['Close'][len(df['Close'])-n:len(df['Close'])].mean()
else:
return np.nan
#Add SMA of n periods column
# df dataframe with column Close
def addsma(self, df, n):
sma_list=[]
for i in df.index.values:
df_slice=df[0:i+1]
sma_list.append(self.sma(df_slice,n))
self.df['sma'+str(n)]=sma_list
#RSI of n periods
# df dataframe with column Close
def rsi(self, df, n):
if 500 >= len(df) >= 2*n+1 > 0 and 'Close' in df.columns:
#RMA for avgain and avloss
avgain=0
avloss=0
alpha=(1.0/n)
gainlist=[]
losslist=[]
for i in range(n,0,-1):
closedelta=df.loc[len(df)-n-i,'Close']-df.loc[len(df)-n-i-1,'Close']
if closedelta > 0:
gainlist.append(closedelta)
else:
losslist.append(abs(closedelta))
avgain=np.array(gainlist).sum()/n
avloss=np.array(losslist).sum()/n
for i in range(n,0,-1):
closedelta=df.loc[len(df)-i,'Close']-df.loc[len(df)-i-1,'Close']
if closedelta > 0:
avgain=closedelta*alpha+(1-alpha)*avgain
avloss=(1-alpha)*avloss
else:
avloss=abs(closedelta)*alpha+(1-alpha)*avloss
avgain=(1-alpha)*avgain
rs=avgain/avloss
rsi=100-(100/(1+rs))
return rsi
else:
return np.nan
#Add RSI of 14 periods column
# df dataframe with column Close
def addrsi(self, df):
rsi_list=[]
n=14
for i in df.index.values:
df_slice=df[0:i+1]
rsi_list.append(self.rsi(df_slice,n))
self.df['rsi'+str(n)]=rsi_list
#MACD
# df dataframe with column Close
def macd(self, df):
if 500 >= len(df) >= 2*26+17+1 > 0 and 'Close' in df.columns:
macd_list=[]
for i in range(0,18):
firstsma12=self.sma(df[0:len(df)-17+i-12],12)
firstsma26=self.sma(df[0:len(df)-17+i-26],26)
macd_list.append(self.ema(df[0:len(df)-17+i],12,firstsma12)-self.ema(df[0:len(df)-17+i],26,firstsma26))
signal=self.emaoflist(macd_list[9:18],self.smaoflist(macd_list[0:9]))
hist=macd_list[-1]-signal
return macd_list[-1],signal,hist
else:
return np.nan,np.nan,np.nan
#Add macd,signal and histogram columns
# df dataframe with column Close
def addmacd(self, df):
macd_list=[]
signal_list=[]
hist_list=[]
for i in df.index.values:
df_slice=df[0:i+1]
macd,signal,hist=self.macd(df_slice)
macd_list.append(macd)
signal_list.append(signal)
hist_list.append(hist)
self.df['macd']=macd_list
self.df['signal']=signal_list
self.df['histogram']=hist_list
#Calculate simple media average of a list
def smaoflist(self,list):
n=len(list)
sma=0
for i in list:
sma=sma+i
sma=sma/n
return sma
#Calculate exponential media average of a list
# sma is simple media average of last n values before the initial value in list
def emaoflist(self,list, sma):
n=len(list)
lastema=sma
ema=0
alpha = 2 / (n + 1)
for i in list:
ema=(1-alpha)*lastema+alpha*i
lastema=ema
return ema
def di(self,df):
if 500 >= len(df) >= 2*14+1 > 0 and 'Close' in df.columns and 'High' in df.columns and 'Low' in df.columns:
upDMlist=[]
downDMlist=[]
trlist=[]
upDIlist=[]
downDIlist=[]
for i in range(0,28):
upDM=df.loc[len(df)-28+i,'High']-df.loc[len(df)-28+i-1,'High']
downDM=df.loc[len(df)-28+i-1,'Low']-df.loc[len(df)-28+i,'Low']
if not (upDM > 0 and upDM > downDM):
upDM=0
if not (downDM > 0 and downDM > upDM):
downDM=0
upDMlist.append(upDM)
downDMlist.append(downDM)
tr=max(df.loc[len(df)-28+i,'High']-df.loc[len(df)-28+i,'Low'], abs(df.loc[len(df)-28+i,'High']-df.loc[len(df)-28+i-1,'Close']), abs(df.loc[len(df)-28+i,'Low']-df.loc[len(df)-28+i-1,'Close']))
trlist.append(tr)
atrsma=self.smaoflist(trlist[0:14])
atr=self.emaoflist(trlist[14:28],atrsma)
sma=self.smaoflist(upDMlist[0:14])
ema=self.emaoflist(upDMlist[14:28],sma)
upDI=100*ema/atr
sma=self.smaoflist(downDMlist[0:14])
ema=self.emaoflist(downDMlist[14:28],sma)
downDI=100*ema/atr
return upDI,downDI
else:
return 0,0
def adx(self,df):
if 500 >= len(df) >= 2*14+1 > 0 and 'Close' in df.columns and 'High' in df.columns and 'Low' in df.columns:
adxlist=[]
for i in range(0,28):
upDI,downDI=self.di(df[0:len(df)-28+i+1])
if upDI+downDI == 0:
upDI=downDI=0.5
adxlist.append(abs((upDI-downDI)/(upDI+downDI)))
sma=self.smaoflist(adxlist[0:14])
ema=self.emaoflist(adxlist[14:28],sma)
adx=100*ema
return upDI,downDI,adx
else:
return np.nan,np.nan,np.nan
def addadx(self,df):
upDI_list=[]
downDI_list=[]
adx_list=[]
for i in df.index.values:
df_slice=df[0:i+1]
upDI,downDI,adx=self.adx(df_slice)
upDI_list.append(upDI)
downDI_list.append(downDI)
adx_list.append(adx)
self.df['upDI']=upDI_list
self.df['downDI']=downDI_list
self.df['adx']=adx_list
| [
"pandas.DataFrame",
"numpy.array",
"sys.exc_info",
"requests.get"
] | [((1152, 1177), 'pandas.DataFrame', 'pd.DataFrame', (['self.klines'], {}), '(self.klines)\n', (1164, 1177), True, 'import pandas as pd\n'), ((1209, 1223), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1221, 1223), True, 'import pandas as pd\n'), ((1958, 1972), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1970, 1972), True, 'import pandas as pd\n'), ((771, 831), 'requests.get', 'requests.get', ([], {'url': 'url_base', 'headers': 'myheaders', 'params': 'PARAMS'}), '(url=url_base, headers=myheaders, params=PARAMS)\n', (783, 831), False, 'import requests\n'), ((3925, 3943), 'numpy.array', 'np.array', (['gainlist'], {}), '(gainlist)\n', (3933, 3943), True, 'import numpy as np\n'), ((3971, 3989), 'numpy.array', 'np.array', (['losslist'], {}), '(losslist)\n', (3979, 3989), True, 'import numpy as np\n'), ((936, 950), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (948, 950), False, 'import sys\n')] |
# Licensed under the MIT License - https://opensource.org/licenses/MIT
import unittest
import numpy as np
from pycobra.cobra import Cobra
from pycobra.ewa import Ewa
from pycobra.kernelcobra import KernelCobra
import logging
from sklearn.utils.estimator_checks import check_estimator
class TestPrediction(unittest.TestCase):
def setUp(self):
# setting up our random data-set
rng = np.random.RandomState(42)
# D1 = train machines; D2 = create COBRA; D3 = calibrate epsilon, alpha; D4 = testing
n_features = 20
D1, D2, D3, D4 = 200, 200, 200, 200
D = D1 + D2 + D3 + D4
X = rng.uniform(-1, 1, D * n_features).reshape(D, n_features)
Y = np.power(X[:,1], 2) + np.power(X[:,3], 3) + np.exp(X[:,10])
# training data-set
X_train = X[:D1 + D2]
X_test = X[D1 + D2 + D3:D1 + D2 + D3 + D4]
# for testing
Y_train = Y[:D1 + D2]
Y_test = Y[D1 + D2 + D3:D1 + D2 + D3 + D4]
cobra = Cobra(random_state=0, epsilon=0.5)
cobra.fit(X_train, Y_train)
ewa = Ewa(random_state=0)
ewa.fit(X_train, Y_train)
kernel = KernelCobra(random_state=0)
kernel.fit(X_train, Y_train)
self.test_data = X_test
self.cobra = cobra
self.ewa = ewa
self.kernelcobra = kernel
def test_cobra_predict(self):
expected = 2.7310842344617035
result = self.cobra.predict(self.test_data[0].reshape(1, -1))
self.assertAlmostEqual(expected, result)
def test_ewa_predict(self):
expected = 2.7656847636961603
result = self.ewa.predict(self.test_data[0].reshape(1, -1))
self.assertAlmostEqual(expected, result[0])
def test_kernel_predict(self):
expected = 2.613685190585763
result = self.kernelcobra.predict(self.test_data[0].reshape(1, -1))
self.assertAlmostEqual(expected, result[0])
def test_estimators(self):
check_estimator(Cobra)
check_estimator(Ewa)
check_estimator(KernelCobra)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main() | [
"logging.basicConfig",
"pycobra.ewa.Ewa",
"numpy.power",
"pycobra.cobra.Cobra",
"numpy.exp",
"pycobra.kernelcobra.KernelCobra",
"sklearn.utils.estimator_checks.check_estimator",
"unittest.main",
"numpy.random.RandomState"
] | [((2100, 2196), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.DEBUG)\n", (2119, 2196), False, 'import logging\n'), ((2197, 2212), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2210, 2212), False, 'import unittest\n'), ((407, 432), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (428, 432), True, 'import numpy as np\n'), ((999, 1033), 'pycobra.cobra.Cobra', 'Cobra', ([], {'random_state': '(0)', 'epsilon': '(0.5)'}), '(random_state=0, epsilon=0.5)\n', (1004, 1033), False, 'from pycobra.cobra import Cobra\n'), ((1093, 1112), 'pycobra.ewa.Ewa', 'Ewa', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1096, 1112), False, 'from pycobra.ewa import Ewa\n'), ((1165, 1192), 'pycobra.kernelcobra.KernelCobra', 'KernelCobra', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1176, 1192), False, 'from pycobra.kernelcobra import KernelCobra\n'), ((1979, 2001), 'sklearn.utils.estimator_checks.check_estimator', 'check_estimator', (['Cobra'], {}), '(Cobra)\n', (1994, 2001), False, 'from sklearn.utils.estimator_checks import check_estimator\n'), ((2010, 2030), 'sklearn.utils.estimator_checks.check_estimator', 'check_estimator', (['Ewa'], {}), '(Ewa)\n', (2025, 2030), False, 'from sklearn.utils.estimator_checks import check_estimator\n'), ((2039, 2067), 'sklearn.utils.estimator_checks.check_estimator', 'check_estimator', (['KernelCobra'], {}), '(KernelCobra)\n', (2054, 2067), False, 'from sklearn.utils.estimator_checks import check_estimator\n'), ((752, 768), 'numpy.exp', 'np.exp', (['X[:, 10]'], {}), '(X[:, 10])\n', (758, 768), True, 'import numpy as np\n'), ((708, 728), 'numpy.power', 'np.power', (['X[:, 1]', '(2)'], {}), '(X[:, 1], 2)\n', (716, 728), True, 'import numpy as np\n'), ((730, 750), 'numpy.power', 'np.power', (['X[:, 3]', '(3)'], {}), '(X[:, 3], 3)\n', (738, 750), True, 'import numpy as np\n')] |
# pylint: disable=invalid-name,no-self-use,protected-access
import numpy
from numpy.testing import assert_almost_equal
import torch
from torch.nn import Parameter
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import LinearMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestLinearMatrixAttention(AllenNlpTestCase):
def test_can_init_dot(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "linear",
"tensor_1_dim": 3,
"tensor_2_dim": 3}))
isinstance(legacy_attention, LinearMatrixAttention)
def test_linear_similarity(self):
linear = LinearMatrixAttention(3, 3)
linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([.1]))
output = linear(torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
assert_almost_equal(output.data.numpy(), numpy.array([[[4.1000, 7.1000], [17.4000, 20.4000]],
[[-9.8000, -6.8000], [36.6000, 39.6000]]]),
decimal=2)
def test_bidaf_trilinear_similarity(self):
linear = LinearMatrixAttention(2, 2, combination='x,y,x*y')
linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5, 2.0, -1.0, 1, 1]))
linear._bias = Parameter(torch.FloatTensor([.0]))
output = linear(torch.FloatTensor([[[0, 0], [4, 5]], [[-7, -8], [10, 11]]]),
torch.FloatTensor([[[1, 2], [4, 5]], [[7, 8], [10, 11]]]))
assert_almost_equal(output.data.numpy(),
numpy.array([[[0 + 0 + 2 + -2 + 0 + 0,
0 + 0 + 8 + -5 + 0 + 0],
[-1.2 + 2.5 + 2 + -2 + 4 + 10,
-1.2 + 2.5 + 8 + -5 + 16 + 25]],
[[2.1 + -4 + 14 + -8 + -49 + -64,
2.1 + -4 + 20 + -11 + -70 + -88],
[-3 + 5.5 + 14 + -8 + 70 + 88,
-3 + 5.5 + 20 + -11 + 100 + 121]]]),
decimal=2)
| [
"numpy.array",
"allennlp.common.Params",
"allennlp.modules.matrix_attention.LinearMatrixAttention",
"torch.FloatTensor"
] | [((859, 886), 'allennlp.modules.matrix_attention.LinearMatrixAttention', 'LinearMatrixAttention', (['(3)', '(3)'], {}), '(3, 3)\n', (880, 886), False, 'from allennlp.modules.matrix_attention import LinearMatrixAttention\n'), ((1542, 1592), 'allennlp.modules.matrix_attention.LinearMatrixAttention', 'LinearMatrixAttention', (['(2)', '(2)'], {'combination': '"""x,y,x*y"""'}), "(2, 2, combination='x,y,x*y')\n", (1563, 1592), False, 'from allennlp.modules.matrix_attention import LinearMatrixAttention\n'), ((551, 615), 'allennlp.common.Params', 'Params', (["{'type': 'linear', 'tensor_1_dim': 3, 'tensor_2_dim': 3}"], {}), "({'type': 'linear', 'tensor_1_dim': 3, 'tensor_2_dim': 3})\n", (557, 615), False, 'from allennlp.common import Params\n'), ((929, 976), 'torch.FloatTensor', 'torch.FloatTensor', (['[-0.3, 0.5, 2.0, -1.0, 1, 1]'], {}), '([-0.3, 0.5, 2.0, -1.0, 1, 1])\n', (946, 976), False, 'import torch\n'), ((1009, 1033), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.1]'], {}), '([0.1])\n', (1026, 1033), False, 'import torch\n'), ((1058, 1131), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]'], {}), '([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]])\n', (1075, 1131), False, 'import torch\n'), ((1157, 1227), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n', (1174, 1227), False, 'import torch\n'), ((1279, 1350), 'numpy.array', 'numpy.array', (['[[[4.1, 7.1], [17.4, 20.4]], [[-9.8, -6.8], [36.6, 39.6]]]'], {}), '([[[4.1, 7.1], [17.4, 20.4]], [[-9.8, -6.8], [36.6, 39.6]]])\n', (1290, 1350), False, 'import numpy\n'), ((1635, 1682), 'torch.FloatTensor', 'torch.FloatTensor', (['[-0.3, 0.5, 2.0, -1.0, 1, 1]'], {}), '([-0.3, 0.5, 2.0, -1.0, 1, 1])\n', (1652, 1682), False, 'import torch\n'), ((1715, 1739), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0]'], {}), '([0.0])\n', (1732, 1739), False, 'import torch\n'), ((1764, 1823), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[0, 0], [4, 5]], [[-7, -8], [10, 11]]]'], {}), '([[[0, 0], [4, 5]], [[-7, -8], [10, 11]]])\n', (1781, 1823), False, 'import torch\n'), ((1849, 1906), 'torch.FloatTensor', 'torch.FloatTensor', (['[[[1, 2], [4, 5]], [[7, 8], [10, 11]]]'], {}), '([[[1, 2], [4, 5]], [[7, 8], [10, 11]]])\n', (1866, 1906), False, 'import torch\n'), ((1986, 2260), 'numpy.array', 'numpy.array', (['[[[0 + 0 + 2 + -2 + 0 + 0, 0 + 0 + 8 + -5 + 0 + 0], [-1.2 + 2.5 + 2 + -2 + \n 4 + 10, -1.2 + 2.5 + 8 + -5 + 16 + 25]], [[2.1 + -4 + 14 + -8 + -49 + -\n 64, 2.1 + -4 + 20 + -11 + -70 + -88], [-3 + 5.5 + 14 + -8 + 70 + 88, -3 +\n 5.5 + 20 + -11 + 100 + 121]]]'], {}), '([[[0 + 0 + 2 + -2 + 0 + 0, 0 + 0 + 8 + -5 + 0 + 0], [-1.2 + 2.5 +\n 2 + -2 + 4 + 10, -1.2 + 2.5 + 8 + -5 + 16 + 25]], [[2.1 + -4 + 14 + -8 +\n -49 + -64, 2.1 + -4 + 20 + -11 + -70 + -88], [-3 + 5.5 + 14 + -8 + 70 +\n 88, -3 + 5.5 + 20 + -11 + 100 + 121]]])\n', (1997, 2260), False, 'import numpy\n')] |
playing_as_white = True
import pickle
from matplotlib import pyplot as plt
import sys; import webbrowser; from pandas import read_csv
import pyperclip as clip
import pyautogui, time; pyautogui.size(); pyautogui.FAILSAFE = True; pyautogui.PAUSE = 0
import os
from time import sleep
from os import listdir
from os.path import isfile, join
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn.preprocessing import StandardScaler
def save_obj(obj, name ):
with open('../models/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('../models/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
model = load_obj("model")
scaler = load_obj("scaler")
# Delete screenshots
mypath = "/Users/petermyers/Desktop/"
files_to_delete = [f for f in listdir(mypath) if isfile(join(mypath, f)) and "Screen Shot" in f]
for file in files_to_delete:
os.remove(mypath + file)
# Take Screenshot
sleep(0.1)
pyautogui.hotkey('command', 'shift', '3')
# Read Screenshot
sleep(0.9)
mypath = "/Users/petermyers/Desktop/"
screenshot_file = mypath + [f for f in listdir(mypath) if isfile(join(mypath, f)) and "Screen Shot" in f][0]
data = plt.imread(screenshot_file)
# Iterate
i = 0
j = 0
board = np.zeros([8,8])
X = []
for i in range(8):
for j in range(8):
cord_x = 478 + 134*i
cord_y = 140 + 134*j
X.append(data[cord_x:cord_x+134,cord_y:cord_y+132,:].flatten())
# For QA
# plt.imshow(data[cord_x:cord_x+134,cord_y:cord_y+132,:])
# plt.savefig('../data/interim/{}_{}.png'.format(i,j))
X = pd.DataFrame(X).values
rescaledX = scaler.transform(X)
prediction = model.predict(rescaledX)
prediction
# Intialize
white_pawns = np.zeros([8,8])
white_pawns
black_pawns = np.zeros([8,8])
black_pawns
# Fill in white/black pawns
record = 0
for i in range(8):
for j in range(8):
if prediction[record,:][0] >= 0.5:
white_pawns[i, j] = 1
if prediction[record,:][1] >= 0.5:
black_pawns[i, j] = 1
record+=1
# Direction of pawn attacks
if playing_as_white:
direction = 1
else:
direction = -1
# Black xs
black_xs = np.zeros([8,8])
record = 0
for i in range(8):
for j in range(8):
if black_pawns[i, j] == 1:
try:
black_xs[i+direction, j-1] = 1
except:
pass
try:
black_xs[i+direction, j+1] = 1
except:
pass
# White xs
direction *= -1
white_xs = np.zeros([8,8])
record = 0
for i in range(8):
for j in range(8):
if white_pawns[i, j] == 1:
try:
white_xs[i+direction, j-1] = 1
except:
pass
try:
white_xs[i+direction, j+1] = 1
except:
pass
# Make the image
cord_x = 478 + 134*8
cord_y = 140 + 134*8
new_data = data[478:cord_x,140:cord_y,:]
fig, ax = plt.subplots(ncols=1)
for i in range(8):
for j in range(8):
if black_xs[i, j] == 1:
cord_x1 = 0+134*j
cord_x2 = 134+134*j
cord_y1 = 0+134*i
cord_y2 = 134+134*i
ax.plot([cord_x1,cord_x2],[cord_y1,cord_y2], color="r")
ax.plot([cord_x2,cord_x1],[cord_y1,cord_y2], color="r")
for i in range(8):
for j in range(8):
if white_xs[i, j] == 1:
cord_x1 = 0+134*j
cord_x2 = 134+134*j
cord_y1 = 0+134*i
cord_y2 = 134+134*i
ax.plot([cord_x1,cord_x2],[cord_y1,cord_y2], color="g")
ax.plot([cord_x2,cord_x1],[cord_y1,cord_y2], color="g")
ax.imshow(new_data)
plt.savefig('/Users/petermyers/Desktop/output.png')
| [
"pyautogui.hotkey",
"os.listdir",
"matplotlib.pyplot.savefig",
"pickle.dump",
"matplotlib.pyplot.imread",
"pickle.load",
"os.path.join",
"time.sleep",
"pyautogui.size",
"numpy.zeros",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"os.remove"
] | [((184, 200), 'pyautogui.size', 'pyautogui.size', ([], {}), '()\n', (198, 200), False, 'import pyautogui, time\n'), ((1009, 1019), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (1014, 1019), False, 'from time import sleep\n'), ((1020, 1061), 'pyautogui.hotkey', 'pyautogui.hotkey', (['"""command"""', '"""shift"""', '"""3"""'], {}), "('command', 'shift', '3')\n", (1036, 1061), False, 'import pyautogui, time\n'), ((1081, 1091), 'time.sleep', 'sleep', (['(0.9)'], {}), '(0.9)\n', (1086, 1091), False, 'from time import sleep\n'), ((1246, 1273), 'matplotlib.pyplot.imread', 'plt.imread', (['screenshot_file'], {}), '(screenshot_file)\n', (1256, 1273), True, 'from matplotlib import pyplot as plt\n'), ((1305, 1321), 'numpy.zeros', 'np.zeros', (['[8, 8]'], {}), '([8, 8])\n', (1313, 1321), True, 'import numpy as np\n'), ((1782, 1798), 'numpy.zeros', 'np.zeros', (['[8, 8]'], {}), '([8, 8])\n', (1790, 1798), True, 'import numpy as np\n'), ((1825, 1841), 'numpy.zeros', 'np.zeros', (['[8, 8]'], {}), '([8, 8])\n', (1833, 1841), True, 'import numpy as np\n'), ((2224, 2240), 'numpy.zeros', 'np.zeros', (['[8, 8]'], {}), '([8, 8])\n', (2232, 2240), True, 'import numpy as np\n'), ((2577, 2593), 'numpy.zeros', 'np.zeros', (['[8, 8]'], {}), '([8, 8])\n', (2585, 2593), True, 'import numpy as np\n'), ((3002, 3023), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)'}), '(ncols=1)\n', (3014, 3023), True, 'from matplotlib import pyplot as plt\n'), ((3713, 3764), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/petermyers/Desktop/output.png"""'], {}), "('/Users/petermyers/Desktop/output.png')\n", (3724, 3764), True, 'from matplotlib import pyplot as plt\n'), ((965, 989), 'os.remove', 'os.remove', (['(mypath + file)'], {}), '(mypath + file)\n', (974, 989), False, 'import os\n'), ((1651, 1666), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (1663, 1666), True, 'import pandas as pd\n'), ((567, 611), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (578, 611), False, 'import pickle\n'), ((705, 719), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (716, 719), False, 'import pickle\n'), ((865, 880), 'os.listdir', 'listdir', (['mypath'], {}), '(mypath)\n', (872, 880), False, 'from os import listdir\n'), ((891, 906), 'os.path.join', 'join', (['mypath', 'f'], {}), '(mypath, f)\n', (895, 906), False, 'from os.path import isfile, join\n'), ((1169, 1184), 'os.listdir', 'listdir', (['mypath'], {}), '(mypath)\n', (1176, 1184), False, 'from os import listdir\n'), ((1195, 1210), 'os.path.join', 'join', (['mypath', 'f'], {}), '(mypath, f)\n', (1199, 1210), False, 'from os.path import isfile, join\n')] |
import cv2
import os
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config import cfg
from core.yolov3 import YOLOV3
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
class YoloTest(object):
def __init__(self):
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.annotation_path = cfg.PSEUDO.ANNO_PATH
self.weight_file = cfg.TEST.WEIGHT_FILE
self.write_image = cfg.TEST.WRITE_IMAGE
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
self.show_label = cfg.TEST.SHOW_LABEL
with tf.name_scope('input'):
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(self.input_data, self.trainable)
self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
with tf.name_scope('ema'):
ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
self.saver.restore(self.sess, self.weight_file)
def predict(self, image):
org_image = np.copy(image)
org_h, org_w, _ = org_image.shape
image_data = utils.image_preporcess(image, [self.input_size, self.input_size])
image_data = image_data[np.newaxis, ...]
pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],
feed_dict={
self.input_data: image_data,
self.trainable: False
}
)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes = utils.nms(bboxes, self.iou_threshold)
return bboxes
def evaluate(self):
pseudo_data = cfg.PSEUDO.TEMP_DATA
if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)
os.mkdir(self.write_image_path)
lines = []
# counter = 0
with open(self.annotation_path, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
# counter += 1
# if counter > 100:
# break
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split('/')[-1]
image = cv2.imread(image_path)
bboxes_pr = self.predict(image)
all_bbox = []
for bbox in bboxes_pr:
coor = ','.join(str(int(x)) for x in bbox[:4])
score = bbox[4]
class_ind = str(int(bbox[5]))
if score > cfg.PSEUDO.THRESHOLD:
print('=> predict result of %s:' % image_name)
if not all_bbox:
all_bbox.append(image_path)
coor_class = coor + ',' + class_ind
all_bbox.append(coor_class)
if self.write_image:
image = utils.draw_bbox(image, bboxes_pr, show_label=self.show_label)
cv2.imwrite(self.write_image_path + image_name, image)
print(self.write_image_path + image_name)
lines.append(' '.join(all_bbox))
with open(pseudo_data, 'w') as f:
for line in lines:
f.write(line)
f.write('\n')
if __name__ == '__main__':
yolotest = YoloTest()
yolotest.evaluate()
import shutil
with open(cfg.PSEUDO.TRAIN_DATA, 'wb') as wfd:
for f in [cfg.TRAIN.ANNOT_PATH, cfg.PSEUDO.TEMP_DATA]:
with open(f, 'rb') as fd:
shutil.copyfileobj(fd, wfd)
| [
"core.utils.read_class_names",
"core.utils.get_anchors",
"os.path.exists",
"core.yolov3.YOLOV3",
"numpy.reshape",
"tensorflow.Session",
"tensorflow.placeholder",
"core.utils.image_preporcess",
"os.mkdir",
"tensorflow.ConfigProto",
"core.utils.draw_bbox",
"shutil.copyfileobj",
"core.utils.nms... | [((173, 189), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (187, 189), True, 'import tensorflow as tf\n'), ((236, 261), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (246, 261), True, 'import tensorflow as tf\n'), ((452, 492), 'core.utils.read_class_names', 'utils.read_class_names', (['cfg.YOLO.CLASSES'], {}), '(cfg.YOLO.CLASSES)\n', (474, 492), True, 'import core.utils as utils\n'), ((1278, 1317), 'core.yolov3.YOLOV3', 'YOLOV3', (['self.input_data', 'self.trainable'], {}), '(self.input_data, self.trainable)\n', (1284, 1317), False, 'from core.yolov3 import YOLOV3\n'), ((1805, 1819), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (1812, 1819), True, 'import numpy as np\n'), ((1884, 1949), 'core.utils.image_preporcess', 'utils.image_preporcess', (['image', '[self.input_size, self.input_size]'], {}), '(image, [self.input_size, self.input_size])\n', (1906, 1949), True, 'import core.utils as utils\n'), ((2547, 2641), 'core.utils.postprocess_boxes', 'utils.postprocess_boxes', (['pred_bbox', '(org_h, org_w)', 'self.input_size', 'self.score_threshold'], {}), '(pred_bbox, (org_h, org_w), self.input_size, self.\n score_threshold)\n', (2570, 2641), True, 'import core.utils as utils\n'), ((2654, 2691), 'core.utils.nms', 'utils.nms', (['bboxes', 'self.iou_threshold'], {}), '(bboxes, self.iou_threshold)\n', (2663, 2691), True, 'import core.utils as utils\n'), ((2794, 2831), 'os.path.exists', 'os.path.exists', (['self.write_image_path'], {}), '(self.write_image_path)\n', (2808, 2831), False, 'import os\n'), ((2878, 2909), 'os.mkdir', 'os.mkdir', (['self.write_image_path'], {}), '(self.write_image_path)\n', (2886, 2909), False, 'import os\n'), ((584, 619), 'core.utils.get_anchors', 'utils.get_anchors', (['cfg.YOLO.ANCHORS'], {}), '(cfg.YOLO.ANCHORS)\n', (601, 619), True, 'import core.utils as utils\n'), ((1074, 1096), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (1087, 1096), True, 'import tensorflow as tf\n'), ((1128, 1179), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'name': '"""input_data"""'}), "(dtype=tf.float32, name='input_data')\n", (1142, 1179), True, 'import tensorflow as tf\n'), ((1210, 1257), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.bool', 'name': '"""trainable"""'}), "(dtype=tf.bool, name='trainable')\n", (1224, 1257), True, 'import tensorflow as tf\n'), ((1445, 1465), 'tensorflow.name_scope', 'tf.name_scope', (['"""ema"""'], {}), "('ema')\n", (1458, 1465), True, 'import tensorflow as tf\n'), ((1489, 1545), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['self.moving_ave_decay'], {}), '(self.moving_ave_decay)\n', (1522, 1545), True, 'import tensorflow as tf\n'), ((2833, 2869), 'shutil.rmtree', 'shutil.rmtree', (['self.write_image_path'], {}), '(self.write_image_path)\n', (2846, 2869), False, 'import shutil\n'), ((1586, 1627), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1600, 1627), True, 'import tensorflow as tf\n'), ((2293, 2343), 'numpy.reshape', 'np.reshape', (['pred_sbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_sbbox, (-1, 5 + self.num_classes))\n', (2303, 2343), True, 'import numpy as np\n'), ((2381, 2431), 'numpy.reshape', 'np.reshape', (['pred_mbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_mbbox, (-1, 5 + self.num_classes))\n', (2391, 2431), True, 'import numpy as np\n'), ((2469, 2519), 'numpy.reshape', 'np.reshape', (['pred_lbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_lbbox, (-1, 5 + self.num_classes))\n', (2479, 2519), True, 'import numpy as np\n'), ((3340, 3362), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3350, 3362), False, 'import cv2\n'), ((4735, 4762), 'shutil.copyfileobj', 'shutil.copyfileobj', (['fd', 'wfd'], {}), '(fd, wfd)\n', (4753, 4762), False, 'import shutil\n'), ((4049, 4110), 'core.utils.draw_bbox', 'utils.draw_bbox', (['image', 'bboxes_pr'], {'show_label': 'self.show_label'}), '(image, bboxes_pr, show_label=self.show_label)\n', (4064, 4110), True, 'import core.utils as utils\n'), ((4139, 4193), 'cv2.imwrite', 'cv2.imwrite', (['(self.write_image_path + image_name)', 'image'], {}), '(self.write_image_path + image_name, image)\n', (4150, 4193), False, 'import cv2\n')] |
# coding=utf-8
# @Time : 2021/1/12 10:12
# @Auto : zzf-jeff
import numpy as np
import cv2
import torch
import pyclipper
from torchocr.datasets.builder import PIPELINES
@PIPELINES.register_module()
class PSEProcessTrain():
def __init__(self, img_size=640, n=6, m=0.5, **kwargs):
self.img_size = img_size
self.n = n
self.m = m
def generate_map(self, im_size, text_polys, text_tags, training_mask, i, n, m):
"""gen pse need map
生成shrink map
:param text_polys:
:param text_tags:
:param training_mask:
:param i:
:param n:
:param m:
:return:
"""
h, w = im_size
score_map = np.zeros((h, w), dtype=np.uint8)
for poly, tag in zip(text_polys, text_tags):
poly = poly.astype(np.int)
# r
r_i = 1 - (1 - m) * (n - i) / (n - 1)
# d
d_i = cv2.contourArea(poly) * (1 - r_i * r_i) / cv2.arcLength(poly, closed=True)
# 采用pyclipper直接求shrink
pco = pyclipper.PyclipperOffset()
pco.AddPath(poly, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
shrinked_poly = np.array(pco.Execute(-d_i))
# draw score_map one
cv2.fillPoly(score_map, shrinked_poly, 1)
# ignore draw zero
if tag:
cv2.fillPoly(training_mask, shrinked_poly, 0)
return score_map, training_mask
def __call__(self, data):
img = data['image']
text_polys = data['polys']
text_tags = data['ignore_tags']
# resize的方式还是有点缺陷的,应该crop
# h, w = img.shape[:2]
# short_edge = min(h, w)
# if short_edge < self.img_size:
# # 保证短边 >= inputsize
# scale = self.img_size / short_edge
# img = cv2.resize(img, dsize=None, fx=scale, fy=scale)
# text_polys *= scale
h, w = img.shape[:2]
training_mask = np.ones((h, w), dtype=np.uint8)
score_maps = []
for i in range(1, self.n + 1):
# s1-->sn ,从小到大
score_map, training_mask = self.generate_map(
(h, w), text_polys, text_tags, training_mask, i, self.n, self.m)
score_maps.append(score_map)
score_maps = np.array(score_maps, dtype=np.float32)
gt_texts = score_maps[-1, :, :]
gt_kernels = score_maps[:-1, :, :]
data['gt_texts'] = torch.from_numpy(gt_texts)
data['gt_kernels'] = torch.from_numpy(gt_kernels)
data['training_masks'] = torch.from_numpy(training_mask)
return data
| [
"cv2.fillPoly",
"numpy.ones",
"cv2.arcLength",
"torch.from_numpy",
"torchocr.datasets.builder.PIPELINES.register_module",
"cv2.contourArea",
"numpy.array",
"numpy.zeros",
"pyclipper.PyclipperOffset"
] | [((191, 218), 'torchocr.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (216, 218), False, 'from torchocr.datasets.builder import PIPELINES\n'), ((737, 769), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (745, 769), True, 'import numpy as np\n'), ((2040, 2071), 'numpy.ones', 'np.ones', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (2047, 2071), True, 'import numpy as np\n'), ((2373, 2411), 'numpy.array', 'np.array', (['score_maps'], {'dtype': 'np.float32'}), '(score_maps, dtype=np.float32)\n', (2381, 2411), True, 'import numpy as np\n'), ((2527, 2553), 'torch.from_numpy', 'torch.from_numpy', (['gt_texts'], {}), '(gt_texts)\n', (2543, 2553), False, 'import torch\n'), ((2584, 2612), 'torch.from_numpy', 'torch.from_numpy', (['gt_kernels'], {}), '(gt_kernels)\n', (2600, 2612), False, 'import torch\n'), ((2647, 2678), 'torch.from_numpy', 'torch.from_numpy', (['training_mask'], {}), '(training_mask)\n', (2663, 2678), False, 'import torch\n'), ((1098, 1125), 'pyclipper.PyclipperOffset', 'pyclipper.PyclipperOffset', ([], {}), '()\n', (1123, 1125), False, 'import pyclipper\n'), ((1309, 1350), 'cv2.fillPoly', 'cv2.fillPoly', (['score_map', 'shrinked_poly', '(1)'], {}), '(score_map, shrinked_poly, 1)\n', (1321, 1350), False, 'import cv2\n'), ((1010, 1042), 'cv2.arcLength', 'cv2.arcLength', (['poly'], {'closed': '(True)'}), '(poly, closed=True)\n', (1023, 1042), False, 'import cv2\n'), ((1421, 1466), 'cv2.fillPoly', 'cv2.fillPoly', (['training_mask', 'shrinked_poly', '(0)'], {}), '(training_mask, shrinked_poly, 0)\n', (1433, 1466), False, 'import cv2\n'), ((968, 989), 'cv2.contourArea', 'cv2.contourArea', (['poly'], {}), '(poly)\n', (983, 989), False, 'import cv2\n')] |
import re
import numpy as np
from pymatgen.core import Structure
from .core import LammpsBox
from .inputs import LammpsData
def fields_view(array, fields):
return array.getfield(np.dtype(
{name: array.dtype.fields[name] for name in fields}
))
class LammpsRun(object):
""" Parse Lammps Run
"""
def __init__(self, lammps_data, lammps_log=None, lammps_dump=None):
# self.lammps_script would be nice to have as well
self.lammps_data = LammpsData.from_file(lammps_data)
self.lammps_log = LammpsLog(lammps_log) if lammps_log else None
self.lammps_dump = LammpsDump(lammps_dump) if lammps_dump else None
self._generate_maps()
def _generate_maps(self):
self._atom_index = []
for atom in self.initial_structure:
self._atom_index.append(atom.specie)
def get_structure(self, index):
if self.lammps_dump is None:
raise ValueError('Requires lammps dump to get structures in md simulation')
positions = self.lammps_dump.get_positions(index)
lammps_box = self.lammps_dump.get_lammps_box(index)
species = self._atom_index
site_properties = {}
try:
site_properties['velocities'] = self.lammps_dump.get_velocities(index)
except ValueError:
pass
return Structure(lammps_box.lattice, species, positions,
coords_are_cartesian=True, site_properties=site_properties)
def get_forces(self, index):
if self.lammps_dump is None:
raise ValueError('Requires lammps dump to get forces in md simulation')
return self.lammps_dump.get_forces(index)
def get_stress(self, index):
if self.lammps_log is None:
raise ValueError('Requires lammps log to get stress in md simulation')
return self.lammps_log.get_stress(index)
def get_energy(self, index):
if self.lammps_log is None:
raise ValueError('Requires lammps log to get stress in md simulation')
return self.lammps_log.get_energy(index)
@property
def final_structure(self):
return self.get_structure(-1)
@property
def final_forces(self):
return self.get_forces(-1)
@property
def final_stress(self):
return self.get_stress(-1)
@property
def initial_structure(self):
return self.lammps_data.structure
class LammpsDump(object):
"""
Parse the lammps dump file to extract useful info about the system.
"""
def __init__(self, filename):
self.filename = filename
self._parse_dump()
@property
def timesteps(self):
return np.array([t['timestep'] for t in self.trajectories])
def get_positions(self, index):
timestep = self.trajectories[index]
if any(p not in timestep['atoms'].dtype.names for p in {'x', 'y', 'z'}):
raise ValueError('Atom dumps must include x y z positions to get positions')
return np.array(fields_view(timestep['atoms'], ['x', 'y', 'z']).tolist())
def get_velocities(self, index):
timestep = self.trajectories[index]
if all(p not in timestep['atoms'].dtype.names for p in {'vx', 'vy', 'vz'}):
raise ValueError('Atom dumps must include vx vy vz velocities to get velocities')
return np.array(fields_view(timestep['atoms'], ['vx', 'vy', 'vz']).tolist())
def get_forces(self, index):
timestep = self.trajectories[index]
if any(p not in timestep['atoms'].dtype.names for p in {'fx', 'fy', 'fz'}):
raise ValueError('Atom dumps must include fx fy fz to get forces')
return np.array(fields_view(timestep['atoms'], ['fx', 'fy', 'fz']).tolist())
def get_lammps_box(self, index):
timestep = self.trajectories[index]
return LammpsBox(**timestep['box'])
def _parse_dump(self):
"""
parse dump file
"""
self.trajectories = []
with open(self.filename) as f:
trajectory = {}
while True:
line = f.readline()
if "ITEM: TIMESTEP" in line:
line = f.readline()
trajectory['timestep'] = int(line)
elif "ITEM: NUMBER OF ATOMS" in line:
line = f.readline()
trajectory['natoms'] = int(line)
elif "ITEM: BOX BOUNDS" in line:
# determine format
if "xy xz yz" in line: # triclinic format
xlo, xhi, xy = list(map(float, f.readline().split()))
ylo, yhi, xz = list(map(float, f.readline().split()))
zlo, zhi, yz = list(map(float, f.readline().split()))
else:
xlo, xhi = list(map(float, f.readline().split()))
ylo, yhi = list(map(float, f.readline().split()))
zlo, zhi = list(map(float, f.readline().split()))
xy, xz, yz = 0, 0, 0
trajectory['box'] = {
'xlo': xlo, 'xhi': xhi,
'ylo': ylo, 'yhi': yhi,
'zlo': zlo, 'zhi': zhi,
'xy': xy, 'xz': xz, 'yz': yz
}
elif "ITEM: ATOMS" in line:
labels = line.split()[2:]
formats = [np.int64] * 2 + [np.float64] * (len(labels) - 2)
atom_items = []
for i in range(trajectory['natoms']):
line_data = f.readline().split()
line_data = [int(_) for _ in line_data[:2]] + [float(_) for _ in line_data[2:]]
atom_items.append(tuple(line_data))
trajectory['atoms'] = np.array(atom_items, dtype={'names': labels, 'formats': formats})
trajectory['atoms'] = np.sort(trajectory['atoms'], order='id')
self.trajectories.append(trajectory)
trajectory = {}
else:
break
class LammpsLog(object):
"""
Parser for LAMMPS log file.
"""
def __init__(self, log_file="lammps.log"):
"""
Args:
log_file (string): path to the loag file
"""
self.log_file = log_file
self._parse_log()
@property
def timesteps(self):
return self.thermo_data['step'].view(np.float)
def get_stress(self, index):
timestep = self.thermo_data[index]
if any(p not in timestep.dtype.names for p in ['Pxy', 'Pxz', 'Pyz', 'Pxx', 'Pyy', 'Pzz']):
raise ValueError('Atom dumps must include Pxy, Pxz, Pyz, Pxx, Pyy, Pzz to get stress')
pxx = timestep['Pxx']
pyy = timestep['Pyy']
pzz = timestep['Pzz']
pxy = timestep['Pxy']
pxz = timestep['Pxz']
pyz = timestep['Pyz']
return np.array([
[pxx, pxy, pxz],
[pxy, pyy, pyz],
[pxz, pyz, pzz]
])
def get_energy(self, index):
timestep = self.thermo_data[index]
if 'TotEng' not in timestep.dtype.names:
raise ValueError('Atom dumps mult include TotEng to get total energy')
return float(timestep['TotEng'])
def _parse_log(self):
"""
Parse the log file for the thermodynamic data.
Sets the thermodynamic data as a structured numpy array with field names
taken from the the thermo_style command.
"""
thermo_int_styles = {
'step', 'elapsed', 'elaplong', 'spcpu',
'part', 'atoms', 'nbuild', 'ndanger'
}
thermo_header = []
thermo_types = []
thermo_data = []
inside_thermo_block = False
read_thermo_header = False
with open(self.log_file, 'r') as logfile:
for line in logfile:
# timestep, the unit depedns on the 'units' command
time = re.search('timestep\s+([0-9]+)', line)
if time and not thermo_data:
self.timestep = float(time.group(1))
# total number md steps
steps = re.search('run\s+([0-9]+)', line)
if steps and not thermo_data:
self.nmdsteps = int(steps.group(1))
# logging interval
thermo = re.search('thermo\s+([0-9]+)', line)
if thermo and not thermo_data:
self.interval = float(thermo.group(1))
# thermodynamic data, set by the thermo_style command
if "Memory usage per processor = " in line or \
"Per MPI rank memory allocation" in line:
inside_thermo_block = True
read_thermo_header = False
elif inside_thermo_block:
if not read_thermo_header:
if len(thermo_header) == 0:
thermo_header = line.split()
thermo_types = [np.int if h.lower() in thermo_int_styles else np.float for h in thermo_header]
else:
if thermo_header != line.split():
raise ValueError('Cannot parse log file where thermo_style changes from one run to next. We suggest doing multiple seperate calculations')
read_thermo_header = True
elif "Loop time of " in line:
inside_thermo_block = False
read_thermo_header = False
else:
thermo_data.append(tuple(t(v) for t, v in zip(thermo_types, line.split())))
thermo_data_dtype = np.dtype([(header, nptype) for header, nptype in zip(thermo_header, thermo_types)])
self.thermo_data = np.array(thermo_data, dtype=thermo_data_dtype)
| [
"numpy.sort",
"pymatgen.core.Structure",
"numpy.array",
"numpy.dtype",
"re.search"
] | [((186, 247), 'numpy.dtype', 'np.dtype', (['{name: array.dtype.fields[name] for name in fields}'], {}), '({name: array.dtype.fields[name] for name in fields})\n', (194, 247), True, 'import numpy as np\n'), ((1347, 1460), 'pymatgen.core.Structure', 'Structure', (['lammps_box.lattice', 'species', 'positions'], {'coords_are_cartesian': '(True)', 'site_properties': 'site_properties'}), '(lammps_box.lattice, species, positions, coords_are_cartesian=True,\n site_properties=site_properties)\n', (1356, 1460), False, 'from pymatgen.core import Structure\n'), ((2687, 2739), 'numpy.array', 'np.array', (["[t['timestep'] for t in self.trajectories]"], {}), "([t['timestep'] for t in self.trajectories])\n", (2695, 2739), True, 'import numpy as np\n'), ((6988, 7049), 'numpy.array', 'np.array', (['[[pxx, pxy, pxz], [pxy, pyy, pyz], [pxz, pyz, pzz]]'], {}), '([[pxx, pxy, pxz], [pxy, pyy, pyz], [pxz, pyz, pzz]])\n', (6996, 7049), True, 'import numpy as np\n'), ((9935, 9981), 'numpy.array', 'np.array', (['thermo_data'], {'dtype': 'thermo_data_dtype'}), '(thermo_data, dtype=thermo_data_dtype)\n', (9943, 9981), True, 'import numpy as np\n'), ((8047, 8086), 're.search', 're.search', (['"""timestep\\\\s+([0-9]+)"""', 'line'], {}), "('timestep\\\\s+([0-9]+)', line)\n", (8056, 8086), False, 'import re\n'), ((8253, 8287), 're.search', 're.search', (['"""run\\\\s+([0-9]+)"""', 'line'], {}), "('run\\\\s+([0-9]+)', line)\n", (8262, 8287), False, 'import re\n'), ((8450, 8487), 're.search', 're.search', (['"""thermo\\\\s+([0-9]+)"""', 'line'], {}), "('thermo\\\\s+([0-9]+)', line)\n", (8459, 8487), False, 'import re\n'), ((5858, 5923), 'numpy.array', 'np.array', (['atom_items'], {'dtype': "{'names': labels, 'formats': formats}"}), "(atom_items, dtype={'names': labels, 'formats': formats})\n", (5866, 5923), True, 'import numpy as np\n'), ((5966, 6006), 'numpy.sort', 'np.sort', (["trajectory['atoms']"], {'order': '"""id"""'}), "(trajectory['atoms'], order='id')\n", (5973, 6006), True, 'import numpy as np\n')] |
import random as aleas
import matplotlib.pyplot as plt
from scipy.signal import freqz
import numpy as np
import pandas as pd
import statsmodels.api as sm
"""
random : pour generer des nombres aleatoires
matplotlib.pyplot : pour generer des graphiques et gerer leur construction
scipy.signal : pour avoir le TF de l'autocorrelation
numpy : pour implementer les moyennes et les covariances
"""
###########################################################################
# EXERCICE 3 - Identification de modèle AR
###########################################################################
"""
QUESTION 1 - Creation de trois series temporelles y1, y2, y3 par simulation stohchastique
"""
#Generation des coefficients
a = [- 0.0707, 0.2500]
b = [- 1.6674, 0.9025]
c = [1.7820, 0.8100]
#Donnees
n = 1536
t = range(- 2, n - 1)
y = [k*0 for k in t]
#Creation des series
y1 = []
y2 = []
y3 = []
for k in range(1, int(n/3)):
y[k] = -a[0]*y[k - 1] - a[1]*y[k - 2] + aleas.gauss(0, 1)
y1.append(y[k])
for k in range(int(n/3) + 1, 2*int(n/3)):
y[k] = -b[0]*y[k - 1] - b[1]*y[k - 2] + aleas.gauss(0, 1)
y2.append(y[k])
for k in range(2*int(n/3) + 1, n):
y[k] = -c[0]*y[k - 1] - c[1]*y[k - 2] + aleas.gauss(0, 1)
y3.append(y[k])
#Visualisation de la série 1
plt.plot(t[0 : int(n/3)], y[0 : int(n/3)], color = '#EC3874')
plt.grid()
plt.title("Serie 1")
plt.show()
#Visualisation de la série 2
plt.plot(t[int(n/3) + 1 : 2*int(n/3)], y[int(n/3) + 1 : 2*int(n/3)], y[0:int(n/3)])
plt.grid()
plt.title("Serie 2")
plt.show()
#Visualisation de la série 3
plt.plot(t[2*int(n/3) + 1 : n], y[2*int(n/3) + 1:n], color ='#4CAE58')
plt.grid()
plt.title("Serie 3")
plt.show()
"""
QUESTION 2 - Visualisation des spectres des sous-series
"""
def spectre(*args):
"""
Fonction qui permet de calculer les spectres des sous-series
Np : nombre de points du spectre
f : recuperation des echantillons de frequence (abscisses)
mag : hauteurs des frequences observables correspondantes (ordonnees)
"""
Np = 256
f=freqz(1,args[0],Np)[0]
mag=[]
for arg in args:
mag.append(abs(freqz(1,arg,Np)[1])) # calcul du spectre de chaque sous-serie
return (f,mag)
f,mag=spectre([1]+a,[1]+b,[1]+c)
spectre1 = mag[0]
spectre2 = mag[1]
spectre3 = mag[2]
plt.semilogy(
f,mag[0],'-g',
f,mag[1],':b',
f,mag[2],'--r'
)
plt.grid()
plt.legend(['spectre1', 'spectre2','spectre3'])
plt.title("Spectres")
plt.show()
"""
QUESTION 2 - Visualisation de l'autocorrelation et de la densité spectrale de puissance
pour chaque serie temporelle
"""
#Visualisation de la serie 1
sm.graphics.tsa.plot_acf(y[0:int(n/3)+1], lags = 40, color = '#EC3874')
plt.title("Autocorrelation de la serie 1")
plt.grid()
plt.show()
#Tracé de la densité spectrale de puissance de y1
plt.psd(y1[:])
plt.title("Densité spectrale de puissance de y1")
plt.show()
#Visualisation de la serie 2
sm.graphics.tsa.plot_acf(y[int(n/3)+1:2*int(n/3)], lags = 40)
plt.grid()
plt.title("Autocorrelation de la serie 2")
plt.show()
#Tracé de la densité spectrale de puissance de y2
plt.psd(y2[:])
plt.title("Densité spectrale de puissance de y2")
plt.show()
#Visualisation de la serie 3
sm.graphics.tsa.plot_acf(y[2*int(n/3)+1:n], lags = 40, color = '#4CAE58')
plt.grid()
plt.title("Autocorrelation de la serie 3")
plt.show()
#Tracé de la densité spectrale de puissance de y3
plt.psd(y3[:])
plt.title("Densité spectrale de puissance de y3")
plt.show()
"""
QUESTION 3 - Creation d'une serie temporelle constituee par la somme des series
synthetisees precedemment.
"""
#Visualisation de y
somme = []
for j in range(len(y1)):
somme.append(y1[j] + y2[j] + y3[j])
plt.plot(range(len(y1)),somme[:])
plt.grid()
plt.title("y : somme de y1, y2 et y3")
plt.show()
#Tracé de l'autocorrélation de y
sm.graphics.tsa.plot_acf(somme, lags = 40)
plt.grid()
plt.title("Autocorrelation de y")
plt.show()
#Tracé de la densité spectrale de puissance de y
plt.psd(somme[:])
plt.title("Densité spectrale de puissance de y")
plt.show()
"""
QUESTION 4 - Modélisation de y par un processus AR d'ordre 2.
L'objectif de cette etape est d'estimer les coefficients de ce modele et de comparer
les autocorrélations/densites spectrales de y et du modele estime.
"""
t=range(-2,n-1)
y=[k*0 for k in t]
y1 = []
y2 = []
y3 = []
for k in range(1,int(n/3)):
y[k]=-a[0]*y[k-1]-a[1]*y[k-2]+aleas.gauss(0,1)
y1.append(y[k])
for k in range(int(n/3)+1,2*int(n/3)):
y[k]=-b[0]*y[k-1]-b[1]*y[k-2]+aleas.gauss(0,1)
y2.append(y[k])
for k in range(2*int(n/3)+1,n):
y[k]=-c[0]*y[k-1]-c[1]*y[k-2]+aleas.gauss(0,1)
y3.append(y[k])
def AR_model_somme(debut, fin, serie, vrai_spectre):
"""
: parametre debut : debut de l'intervalle
: parametre fin : fin de l'intervalle
: parametre serie : nom de la serie à modéliser
: parametre vrai_spectre : vrai spectre à comparer aux résultats
: type debut : int
: type fin : int
: type serie : string
: type vrai_spectre : spectre
: return : la serie temporelle et la comparaison entre les spectres
: type return : plt.show
"""
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:2, 0:2]) @ D[0, 1:3].reshape(2, 1) # car on veut l'avoir à l'ordre 2
H = - np.linalg.inv(D[0:3, 0:3]) @ D[0, 1:4].reshape(3, 1) # car on veut l'avoir à l'ordre 3
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#on trace la série entre 0 et le début de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title(serie)
plt.show()
#on trace les spectres (estimation)
f, mag = spectre(E1, H1)
#on calcule les valeurs correspondants aux spectres des 3 sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, vrai_spectre,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre2', 'ordre3',"vrai spectre"])
return plt.show()
AR_model_somme(0,int(n/3),"série 1",spectre1)
AR_model_somme(int(n/3),2*int(n/3),"série 2",spectre2)
AR_model_somme(0,n,"serie 3",spectre3)
#Spectre de la somme de y1, y2, y3
s=[]
for i in range(2):
s.append(a[i]+b[i]+c[i])
f,mag=spectre([1]+s)
spectreS = mag[0]
plt.semilogy(
f,mag[0],
)
plt.grid()
plt.legend('spectre1')
plt.title("Spectre de la somme")
plt.show()
"""
QUESTION 5 - Modèles AR de plusieurs ordres [NOT WORKING YET]
"""
"""
def AR_n(debut, fin, serie, vrai_spectre, ordre1, ordre2):
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:ordre1, 0:ordre1]) @ D[0, 1:ordre1+1].reshape(ordre1, 1) # ordre
H = - np.linalg.inv(D[0:ordre2, 0:ordre2]) @ D[0, 1:ordre2+1].reshape(ordre2, 1) # ordre
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#trace de la serie entre 0 et le debut de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title(serie)
plt.show()
#Tracé des spectres (estimation)
f, mag = spectre(E1, H1)
#Calcul des spectres des trois sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, vrai_spectre,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre' + str(ordre1), 'ordre' + str(ordre2), "Vrai spectre"])
return plt.show()"""
"""debut = 0
fin = n
ordre1 = 5
ordre2 = 6
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:ordre1, 0:ordre1]) @ D[0, 1:ordre1+1].reshape(ordre1, 1) # ordre
H = - np.linalg.inv(D[0:ordre2, 0:ordre2]) @ D[0, 1:ordre2+1].reshape(ordre2, 1) # ordre
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#trace de la serie entre 0 et le debut de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title("serie ordre 3 et 4")
plt.show()
#Tracé des spectres (estimation)
f, mag = spectre(E1, H1)
#Calcul des spectres des trois sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, spectre3,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre' + str(ordre1), 'ordre' + str(ordre2), "Vrai spectre"])
plt.show()
"""
"""
QUESTION 6 - Modélisation de y par un processus AR d'ordre 3 et 4.
L'objectif de cette etape est d'estimer les coefficients de ce modele.
"""
t=range(-2,n-1)
y=[k*0 for k in t]
y1 = []
y2 = []
y3 = []
for k in range(1,int(n/3)):
y[k]=-a[0]*y[k-1]-a[1]*y[k-2]+aleas.gauss(0,1)
y1.append(y[k])
for k in range(int(n/3)+1,2*int(n/3)):
y[k]=-b[0]*y[k-1]-b[1]*y[k-2]+aleas.gauss(0,1)
y2.append(y[k])
for k in range(2*int(n/3)+1,n):
y[k]=-c[0]*y[k-1]-c[1]*y[k-2]+aleas.gauss(0,1)
y3.append(y[k])
def AR_model_somme2(debut, fin, serie, vrai_spectre):
"""
: parametre debut : debut de l'intervalle
: parametre fin : fin de l'intervalle
: parametre serie : nom de la serie à modéliser
: parametre vrai_spectre : vrai spectre à comparer aux résultats
: type debut : int
: type fin : int
: type serie : string
: type vrai_spectre : spectre
: return : la serie temporelle et la comparaison entre les spectres
: type return : plt.show
"""
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:3, 0:3]) @ D[0, 1:4].reshape(3, 1) # car on veut l'avoir à l'ordre 3
H = - np.linalg.inv(D[0:4, 0:4]) @ D[0, 1:5].reshape(4, 1) # car on veut l'avoir à l'ordre 4
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#on trace la série entre 0 et le début de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title(serie)
plt.show()
#on trace les spectres (estimation)
f, mag = spectre(E1, H1)
#on calcule les valeurs correspondants aux spectres des 3 sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, vrai_spectre,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre2', 'ordre3',"vrai spectre"])
return plt.show()
AR_model_somme2(0,int(n/3),"série 1",spectre1)
AR_model_somme2(int(n/3),2*int(n/3),"série 2",spectre2)
AR_model_somme2(0,n,"serie 3",spectre3)
AR_model_somme2(0,n,"y",y[:]) | [
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.grid",
"random.gauss",
"matplotlib.pyplot.psd",
"matplotlib.pyplot.plot",
"numpy.append",
"scipy.signal.freqz",
"numpy.linalg.inv",
"statsmodels.api.graphics.tsa.plot_acf",
"matplotlib.pyplot.title",
"numpy.cov",
"matplotlib.pyplot.legend",
"m... | [((1387, 1397), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1395, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1418), 'matplotlib.pyplot.title', 'plt.title', (['"""Serie 1"""'], {}), "('Serie 1')\n", (1407, 1418), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1429), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1427, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1554), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1552, 1554), True, 'import matplotlib.pyplot as plt\n'), ((1555, 1575), 'matplotlib.pyplot.title', 'plt.title', (['"""Serie 2"""'], {}), "('Serie 2')\n", (1564, 1575), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1586), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1584, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1688, 1698), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1696, 1698), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1719), 'matplotlib.pyplot.title', 'plt.title', (['"""Serie 3"""'], {}), "('Serie 3')\n", (1708, 1719), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1730), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1728, 1730), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2425), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['f', 'mag[0]', '"""-g"""', 'f', 'mag[1]', '""":b"""', 'f', 'mag[2]', '"""--r"""'], {}), "(f, mag[0], '-g', f, mag[1], ':b', f, mag[2], '--r')\n", (2373, 2425), True, 'import matplotlib.pyplot as plt\n'), ((2425, 2435), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2433, 2435), True, 'import matplotlib.pyplot as plt\n'), ((2437, 2485), 'matplotlib.pyplot.legend', 'plt.legend', (["['spectre1', 'spectre2', 'spectre3']"], {}), "(['spectre1', 'spectre2', 'spectre3'])\n", (2447, 2485), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2506), 'matplotlib.pyplot.title', 'plt.title', (['"""Spectres"""'], {}), "('Spectres')\n", (2494, 2506), True, 'import matplotlib.pyplot as plt\n'), ((2507, 2517), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2515, 2517), True, 'import matplotlib.pyplot as plt\n'), ((2754, 2796), 'matplotlib.pyplot.title', 'plt.title', (['"""Autocorrelation de la serie 1"""'], {}), "('Autocorrelation de la serie 1')\n", (2763, 2796), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2807), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2805, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2817, 2819), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2885), 'matplotlib.pyplot.psd', 'plt.psd', (['y1[:]'], {}), '(y1[:])\n', (2878, 2885), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2935), 'matplotlib.pyplot.title', 'plt.title', (['"""Densité spectrale de puissance de y1"""'], {}), "('Densité spectrale de puissance de y1')\n", (2895, 2935), True, 'import matplotlib.pyplot as plt\n'), ((2936, 2946), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2944, 2946), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3049), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3047, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3093), 'matplotlib.pyplot.title', 'plt.title', (['"""Autocorrelation de la serie 2"""'], {}), "('Autocorrelation de la serie 2')\n", (3060, 3093), True, 'import matplotlib.pyplot as plt\n'), ((3094, 3104), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3102, 3104), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3170), 'matplotlib.pyplot.psd', 'plt.psd', (['y2[:]'], {}), '(y2[:])\n', (3163, 3170), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3220), 'matplotlib.pyplot.title', 'plt.title', (['"""Densité spectrale de puissance de y2"""'], {}), "('Densité spectrale de puissance de y2')\n", (3180, 3220), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3229, 3231), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3346), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3344, 3346), True, 'import matplotlib.pyplot as plt\n'), ((3348, 3390), 'matplotlib.pyplot.title', 'plt.title', (['"""Autocorrelation de la serie 3"""'], {}), "('Autocorrelation de la serie 3')\n", (3357, 3390), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3401), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3399, 3401), True, 'import matplotlib.pyplot as plt\n'), ((3453, 3467), 'matplotlib.pyplot.psd', 'plt.psd', (['y3[:]'], {}), '(y3[:])\n', (3460, 3467), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3517), 'matplotlib.pyplot.title', 'plt.title', (['"""Densité spectrale de puissance de y3"""'], {}), "('Densité spectrale de puissance de y3')\n", (3477, 3517), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3526, 3528), True, 'import matplotlib.pyplot as plt\n'), ((3783, 3793), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3791, 3793), True, 'import matplotlib.pyplot as plt\n'), ((3794, 3832), 'matplotlib.pyplot.title', 'plt.title', (['"""y : somme de y1, y2 et y3"""'], {}), "('y : somme de y1, y2 et y3')\n", (3803, 3832), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3841, 3843), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3918), 'statsmodels.api.graphics.tsa.plot_acf', 'sm.graphics.tsa.plot_acf', (['somme'], {'lags': '(40)'}), '(somme, lags=40)\n', (3902, 3918), True, 'import statsmodels.api as sm\n'), ((3921, 3931), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3929, 3931), True, 'import matplotlib.pyplot as plt\n'), ((3933, 3966), 'matplotlib.pyplot.title', 'plt.title', (['"""Autocorrelation de y"""'], {}), "('Autocorrelation de y')\n", (3942, 3966), True, 'import matplotlib.pyplot as plt\n'), ((3967, 3977), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3975, 3977), True, 'import matplotlib.pyplot as plt\n'), ((4028, 4045), 'matplotlib.pyplot.psd', 'plt.psd', (['somme[:]'], {}), '(somme[:])\n', (4035, 4045), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4094), 'matplotlib.pyplot.title', 'plt.title', (['"""Densité spectrale de puissance de y"""'], {}), "('Densité spectrale de puissance de y')\n", (4055, 4094), True, 'import matplotlib.pyplot as plt\n'), ((4095, 4105), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4103, 4105), True, 'import matplotlib.pyplot as plt\n'), ((6608, 6631), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['f', 'mag[0]'], {}), '(f, mag[0])\n', (6620, 6631), True, 'import matplotlib.pyplot as plt\n'), ((6635, 6645), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6643, 6645), True, 'import matplotlib.pyplot as plt\n'), ((6647, 6669), 'matplotlib.pyplot.legend', 'plt.legend', (['"""spectre1"""'], {}), "('spectre1')\n", (6657, 6669), True, 'import matplotlib.pyplot as plt\n'), ((6670, 6702), 'matplotlib.pyplot.title', 'plt.title', (['"""Spectre de la somme"""'], {}), "('Spectre de la somme')\n", (6679, 6702), True, 'import matplotlib.pyplot as plt\n'), ((6703, 6713), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6711, 6713), True, 'import matplotlib.pyplot as plt\n'), ((5252, 5423), 'numpy.cov', 'np.cov', (['[y[debut:fin] + [0, 0, 0, 0], [0] + y[debut:fin] + [0, 0, 0], [0, 0] + y[\n debut:fin] + [0, 0], [0, 0, 0] + y[debut:fin] + [0], [0, 0, 0, 0] + y[\n debut:fin]]'], {}), '([y[debut:fin] + [0, 0, 0, 0], [0] + y[debut:fin] + [0, 0, 0], [0, 0] +\n y[debut:fin] + [0, 0], [0, 0, 0] + y[debut:fin] + [0], [0, 0, 0, 0] + y\n [debut:fin]])\n', (5258, 5423), True, 'import numpy as np\n'), ((5676, 5693), 'numpy.append', 'np.append', (['[1]', 'E'], {}), '([1], E)\n', (5685, 5693), True, 'import numpy as np\n'), ((5751, 5768), 'numpy.append', 'np.append', (['[1]', 'H'], {}), '([1], H)\n', (5760, 5768), True, 'import numpy as np\n'), ((5837, 5873), 'matplotlib.pyplot.plot', 'plt.plot', (['t[debut:fin]', 'y[debut:fin]'], {}), '(t[debut:fin], y[debut:fin])\n', (5845, 5873), True, 'import matplotlib.pyplot as plt\n'), ((5882, 5898), 'matplotlib.pyplot.title', 'plt.title', (['serie'], {}), '(serie)\n', (5891, 5898), True, 'import matplotlib.pyplot as plt\n'), ((5903, 5913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5911, 5913), True, 'import matplotlib.pyplot as plt\n'), ((6071, 6147), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['f', 'mag[0]', 'f', 'mag[1]', '""":r"""', 'f', 'vrai_spectre', '""":b"""'], {'linewidth': '(2)'}), "(f, mag[0], f, mag[1], ':r', f, vrai_spectre, ':b', linewidth=2)\n", (6083, 6147), True, 'import matplotlib.pyplot as plt\n'), ((6269, 6317), 'matplotlib.pyplot.legend', 'plt.legend', (["['ordre2', 'ordre3', 'vrai spectre']"], {}), "(['ordre2', 'ordre3', 'vrai spectre'])\n", (6279, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6328, 6338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6336, 6338), True, 'import matplotlib.pyplot as plt\n'), ((10022, 10193), 'numpy.cov', 'np.cov', (['[y[debut:fin] + [0, 0, 0, 0], [0] + y[debut:fin] + [0, 0, 0], [0, 0] + y[\n debut:fin] + [0, 0], [0, 0, 0] + y[debut:fin] + [0], [0, 0, 0, 0] + y[\n debut:fin]]'], {}), '([y[debut:fin] + [0, 0, 0, 0], [0] + y[debut:fin] + [0, 0, 0], [0, 0] +\n y[debut:fin] + [0, 0], [0, 0, 0] + y[debut:fin] + [0], [0, 0, 0, 0] + y\n [debut:fin]])\n', (10028, 10193), True, 'import numpy as np\n'), ((10446, 10463), 'numpy.append', 'np.append', (['[1]', 'E'], {}), '([1], E)\n', (10455, 10463), True, 'import numpy as np\n'), ((10521, 10538), 'numpy.append', 'np.append', (['[1]', 'H'], {}), '([1], H)\n', (10530, 10538), True, 'import numpy as np\n'), ((10607, 10643), 'matplotlib.pyplot.plot', 'plt.plot', (['t[debut:fin]', 'y[debut:fin]'], {}), '(t[debut:fin], y[debut:fin])\n', (10615, 10643), True, 'import matplotlib.pyplot as plt\n'), ((10652, 10668), 'matplotlib.pyplot.title', 'plt.title', (['serie'], {}), '(serie)\n', (10661, 10668), True, 'import matplotlib.pyplot as plt\n'), ((10673, 10683), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10681, 10683), True, 'import matplotlib.pyplot as plt\n'), ((10841, 10917), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['f', 'mag[0]', 'f', 'mag[1]', '""":r"""', 'f', 'vrai_spectre', '""":b"""'], {'linewidth': '(2)'}), "(f, mag[0], f, mag[1], ':r', f, vrai_spectre, ':b', linewidth=2)\n", (10853, 10917), True, 'import matplotlib.pyplot as plt\n'), ((11039, 11087), 'matplotlib.pyplot.legend', 'plt.legend', (["['ordre2', 'ordre3', 'vrai spectre']"], {}), "(['ordre2', 'ordre3', 'vrai spectre'])\n", (11049, 11087), True, 'import matplotlib.pyplot as plt\n'), ((11098, 11108), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11106, 11108), True, 'import matplotlib.pyplot as plt\n'), ((999, 1016), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (1010, 1016), True, 'import random as aleas\n'), ((1128, 1145), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (1139, 1145), True, 'import random as aleas\n'), ((1250, 1267), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (1261, 1267), True, 'import random as aleas\n'), ((2109, 2130), 'scipy.signal.freqz', 'freqz', (['(1)', 'args[0]', 'Np'], {}), '(1, args[0], Np)\n', (2114, 2130), False, 'from scipy.signal import freqz\n'), ((4465, 4482), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (4476, 4482), True, 'import random as aleas\n'), ((4576, 4593), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (4587, 4593), True, 'import random as aleas\n'), ((4680, 4697), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (4691, 4697), True, 'import random as aleas\n'), ((9234, 9251), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (9245, 9251), True, 'import random as aleas\n'), ((9345, 9362), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (9356, 9362), True, 'import random as aleas\n'), ((9449, 9466), 'random.gauss', 'aleas.gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (9460, 9466), True, 'import random as aleas\n'), ((5481, 5507), 'numpy.linalg.inv', 'np.linalg.inv', (['D[0:2, 0:2]'], {}), '(D[0:2, 0:2])\n', (5494, 5507), True, 'import numpy as np\n'), ((5579, 5605), 'numpy.linalg.inv', 'np.linalg.inv', (['D[0:3, 0:3]'], {}), '(D[0:3, 0:3])\n', (5592, 5605), True, 'import numpy as np\n'), ((10251, 10277), 'numpy.linalg.inv', 'np.linalg.inv', (['D[0:3, 0:3]'], {}), '(D[0:3, 0:3])\n', (10264, 10277), True, 'import numpy as np\n'), ((10349, 10375), 'numpy.linalg.inv', 'np.linalg.inv', (['D[0:4, 0:4]'], {}), '(D[0:4, 0:4])\n', (10362, 10375), True, 'import numpy as np\n'), ((2191, 2208), 'scipy.signal.freqz', 'freqz', (['(1)', 'arg', 'Np'], {}), '(1, arg, Np)\n', (2196, 2208), False, 'from scipy.signal import freqz\n')] |
#!/usr/bin/env python3
"""https://scikit-allel.readthedocs.io/
"""
import numpy as np
import allel
msout = '''
000000110000
001000110010
000000110000
001000110000
110111001101
000000010000'''
pyarray = [list(x) for x in msout.strip().split('\n')]
haplotypes = np.array(pyarray).astype(np.int8)
h = allel.HaplotypeArray(haplotypes.transpose())
h.n_haplotypes
h.n_variants
ac1 = h.count_alleles(subpop=np.arange(0, 3))
ac2 = h.count_alleles(subpop=np.arange(3, h.n_haplotypes))
within1 = allel.mean_pairwise_difference(ac1)
within2 = allel.mean_pairwise_difference(ac2)
within = (within1 + within2) / 2
between = allel.mean_pairwise_difference_between(ac1, ac2)
num, den = allel.hudson_fst(ac1, ac2)
np.allclose(num, between - within)
np.allclose(den, between)
fst = np.sum(num) / np.sum(den)
fst
an1 = np.sum(ac1, axis=1)
an2 = np.sum(ac2, axis=1)
np.allclose(2 * ac1[:, 1] * ac1[:, 0] / (an1 * (an1 - 1)), within1)
np.allclose(2 * ac2[:, 1] * ac2[:, 0] / (an2 * (an2 - 1)), within2)
np.allclose((ac1[:, 0] * ac2[:, 1] + ac1[:, 1] * ac2[:, 0]) / (an1 * an2),
between)
p1 = ac1[:, 1] / an1
p2 = ac2[:, 1] / an2
np.allclose(2 * p1 * (1 - p1) * an1 / (an1 - 1), within1)
np.allclose(2 * p2 * (1 - p2) * an2 / (an2 - 1), within2)
np.allclose(p1 * (1 - p2) + (1 - p1) * p2, between)
| [
"numpy.allclose",
"allel.hudson_fst",
"allel.mean_pairwise_difference_between",
"numpy.sum",
"allel.mean_pairwise_difference",
"numpy.array",
"numpy.arange"
] | [((491, 526), 'allel.mean_pairwise_difference', 'allel.mean_pairwise_difference', (['ac1'], {}), '(ac1)\n', (521, 526), False, 'import allel\n'), ((537, 572), 'allel.mean_pairwise_difference', 'allel.mean_pairwise_difference', (['ac2'], {}), '(ac2)\n', (567, 572), False, 'import allel\n'), ((616, 664), 'allel.mean_pairwise_difference_between', 'allel.mean_pairwise_difference_between', (['ac1', 'ac2'], {}), '(ac1, ac2)\n', (654, 664), False, 'import allel\n'), ((677, 703), 'allel.hudson_fst', 'allel.hudson_fst', (['ac1', 'ac2'], {}), '(ac1, ac2)\n', (693, 703), False, 'import allel\n'), ((704, 738), 'numpy.allclose', 'np.allclose', (['num', '(between - within)'], {}), '(num, between - within)\n', (715, 738), True, 'import numpy as np\n'), ((739, 764), 'numpy.allclose', 'np.allclose', (['den', 'between'], {}), '(den, between)\n', (750, 764), True, 'import numpy as np\n'), ((808, 827), 'numpy.sum', 'np.sum', (['ac1'], {'axis': '(1)'}), '(ac1, axis=1)\n', (814, 827), True, 'import numpy as np\n'), ((834, 853), 'numpy.sum', 'np.sum', (['ac2'], {'axis': '(1)'}), '(ac2, axis=1)\n', (840, 853), True, 'import numpy as np\n'), ((854, 921), 'numpy.allclose', 'np.allclose', (['(2 * ac1[:, 1] * ac1[:, 0] / (an1 * (an1 - 1)))', 'within1'], {}), '(2 * ac1[:, 1] * ac1[:, 0] / (an1 * (an1 - 1)), within1)\n', (865, 921), True, 'import numpy as np\n'), ((922, 989), 'numpy.allclose', 'np.allclose', (['(2 * ac2[:, 1] * ac2[:, 0] / (an2 * (an2 - 1)))', 'within2'], {}), '(2 * ac2[:, 1] * ac2[:, 0] / (an2 * (an2 - 1)), within2)\n', (933, 989), True, 'import numpy as np\n'), ((990, 1077), 'numpy.allclose', 'np.allclose', (['((ac1[:, 0] * ac2[:, 1] + ac1[:, 1] * ac2[:, 0]) / (an1 * an2))', 'between'], {}), '((ac1[:, 0] * ac2[:, 1] + ac1[:, 1] * ac2[:, 0]) / (an1 * an2),\n between)\n', (1001, 1077), True, 'import numpy as np\n'), ((1129, 1186), 'numpy.allclose', 'np.allclose', (['(2 * p1 * (1 - p1) * an1 / (an1 - 1))', 'within1'], {}), '(2 * p1 * (1 - p1) * an1 / (an1 - 1), within1)\n', (1140, 1186), True, 'import numpy as np\n'), ((1187, 1244), 'numpy.allclose', 'np.allclose', (['(2 * p2 * (1 - p2) * an2 / (an2 - 1))', 'within2'], {}), '(2 * p2 * (1 - p2) * an2 / (an2 - 1), within2)\n', (1198, 1244), True, 'import numpy as np\n'), ((1245, 1296), 'numpy.allclose', 'np.allclose', (['(p1 * (1 - p2) + (1 - p1) * p2)', 'between'], {}), '(p1 * (1 - p2) + (1 - p1) * p2, between)\n', (1256, 1296), True, 'import numpy as np\n'), ((771, 782), 'numpy.sum', 'np.sum', (['num'], {}), '(num)\n', (777, 782), True, 'import numpy as np\n'), ((785, 796), 'numpy.sum', 'np.sum', (['den'], {}), '(den)\n', (791, 796), True, 'import numpy as np\n'), ((262, 279), 'numpy.array', 'np.array', (['pyarray'], {}), '(pyarray)\n', (270, 279), True, 'import numpy as np\n'), ((404, 419), 'numpy.arange', 'np.arange', (['(0)', '(3)'], {}), '(0, 3)\n', (413, 419), True, 'import numpy as np\n'), ((450, 478), 'numpy.arange', 'np.arange', (['(3)', 'h.n_haplotypes'], {}), '(3, h.n_haplotypes)\n', (459, 478), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import cv2
import io
import time
import pandas as pd
from random import randint
import os
import selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import tensorflow as tf
from tensorflow.keras.models import model_from_json
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import SGD, Adam, Nadam
from tensorflow.keras.callbacks import TensorBoard
from collections import deque
import random
import pickle
import base64
from io import BytesIO
import json
# Path Variables
GAME_URL = "http://wayou.github.io/t-rex-runner/"
CHROME_DRIVER_PATH = "./chromedriver"
LOSS_FILE_PATH = "./objects/loss_df.csv"
ACTIONS_FILE_PATH = "./objects/actions_df.csv"
Q_VALUE_FILE_PATH = "./objects/q_values.csv"
SCORE_FILE_PATH = "./objects/scores_df.csv"
# Script to create id for canvas for faster selections from Document Object MOdel (DOM)
init_script = "document.getElementsByClassName('runner-canvas')[0].id = 'runner-canvas'"
# Script to get image from canvas
getbase64Script = "canvasRunner = document.getElementById('runner-canvas'); \
return canvasRunner.toDataURL().substring(22)"
# Game Parameter Constants
ACTIONS = 2 # Possible actions: "Jump" or "Do Nothing"
GAMMA = 0.9 # Decay rate of past observations, original 0.9
OBSERVATION = 100. # Timesteps to observe before training
EXPLORE = 100000 # Frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # Final value of epsilon
INITIAL_EPSILON = 0.1 # Initial value of epsilon
REPLAY_MEMORY = 80000 # Number of previous transitions to remember
BATCH = 32 # Size of minibatch
FRAME_PER_ACTION = 1
LEARNING_RATE = 0.0003
img_rows, img_cols = 80, 80
img_channels = 4 # We stack 4 frames
# Initialize log structures from file if they exist or else create new
loss_df = pd.read_csv(LOSS_FILE_PATH) if os.path.isfile(
LOSS_FILE_PATH) else pd.DataFrame(columns=["loss"])
score_df = pd.read_csv(SCORE_FILE_PATH) if os.path.isfile(
SCORE_FILE_PATH) else pd.DataFrame(columns=["Scores"])
actions_df = pd.read_csv(ACTIONS_FILE_PATH) if os.path.isfile(
ACTIONS_FILE_PATH) else pd.DataFrame(columns=["Actions"])
q_values_df = pd.read_csv(Q_VALUE_FILE_PATH) if os.path.isfile(
Q_VALUE_FILE_PATH) else pd.DataFrame(columns=["qvalues"])
# Some basic pre-processing function
def save_object(object, name):
"""
Dump file into objects folder
"""
with open("objects/" + name + ".pkl", "wb") as f:
pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
def load_object(name):
"""
Loads file Dump
"""
with open("objects/" + name + ".pkl", "rb") as f:
return pickle.load(f)
def process_image(image):
"""
Processes the image to use futher
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # RGB to Gray scale
image = image[:300, :500] # Crop Region of Interest(ROI)
image = cv2.resize(image, (80, 80))
return image
def grab_screen(_driver):
"""
Grabs the screen
"""
image_b64 = _driver.execute_script(getbase64Script)
screen = np.array(Image.open(BytesIO(base64.b64decode(image_b64))))
image = process_image(screen) # Processing image is required
return image
def show_image(graphs=False):
"""
Shows images in new window
"""
while True:
screen = (yield)
window_title = "Logs" if graphs else "Game_play"
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
image_size = cv2.resize(screen, (800, 400))
cv2.imshow(window_title, screen)
if (cv2.waitKey(1) & 0xFF == ord("q")):
cv2.destroyAllWindows()
break
# Trainig varialbes saed as checkpoints to filesystem to resume training from the same step
class Game():
"""
Selenium interfacing between the python and browser
"""
def __init__(self, custom_config=True):
"""
Launch the broswer window using the attributes in chrome_options
"""
chrome_options = Options()
chrome_options.add_argument("disable-infobars")
chrome_options.add_argument("--mute-audio")
self._driver = webdriver.Chrome(
executable_path=CHROME_DRIVER_PATH, chrome_options=chrome_options)
self._driver.set_window_position(x=-10, y=0)
self._driver.get("chrome://dino")
self._driver.execute_script("Runner.config.ACCELERATION=0")
self._driver.execute_script(init_script)
def get_crashed(self):
"""
return True if the agent as crashed on an obstacles. Gets javascript variable from game decribing the state
"""
return self._driver.execute_script("return Runner.instance_.crashed")
def get_playing(self):
"""
returns True if game in progress, false is crashed or paused
"""
return self._driver.execute_script("return Runner.instance_.playing")
def restart(self):
"""
Sends a signal to browser-javascript to restart the game
"""
self._driver.execute_script("Runner.instance_.restart()")
def press_up(self):
"""
Sends a single to press up get to the browser
"""
self._driver.find_element_by_tag_name("body").send_keys(Keys.ARROW_UP)
def get_score(self):
"""
Gets current game score from javascript variables
"""
score_array = self._driver.execute_script(
"return Runner.instance_.distanceMeter.digits")
# the javascript object is of type array with score in the formate[1,0,0] which is 100.
score = ''.join(score_array)
return int(score)
def pause(self):
"""
Pause the game
"""
return self._driver.execute_script("return Runner.instance_.stop()")
def resume(self):
"""
Resume a paused game if not crashed
"""
return self._driver.execute_script("return Runner.instance_.play()")
def end(self):
"""
Close the browser and end the game
"""
self._driver.close()
class DinoAgent:
"""
Reinforcement Agent
"""
def __init__(self, game): # takes game as input for taking actions
self._game = game
self.jump() # to start the game, we need to jump once
def is_running(self):
return self._game.get_playing()
def is_crashed(self):
return self._game.get_crashed()
def jump(self):
self._game.press_up()
def duck(self):
self._game.press_down()
class Game_State:
def __init__(self, agent, game):
self._agent = agent
self._game = game
# Display the processed image on screen using openCV, implemented using python coroutine
self._display = show_image()
self._display.__next__() # Initilize the display coroutine
def get_state(self, actions):
"""
Returns the Experience of one itereationas a tuple
"""
actions_df.loc[len(actions_df)
] = actions[1] # Storing actions in a dataframe
score = self._game.get_score()
reward = 0.1
is_over = False # Game Over
if actions[1] == 1:
self._agent.jump()
image = grab_screen(self._game._driver)
self._display.send(image) # Display the image on screen
if self._agent.is_crashed():
# Log the score when the game is over
score_df.loc[len(loss_df)] = score
self._game.restart()
reward = -1
is_over = True
return image, reward, is_over
def buildModel():
print("Building Convolutional Neural Network")
model = Sequential()
model.add(Conv2D(32, (8, 8), padding="same", strides=(4, 4), input_shape=(
img_cols, img_rows, img_channels))) # First layer of 80*80*4 with 32 filters
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation("relu"))
# Second layer of 40*40*4 with 64 filters
model.add(Conv2D(64, (4, 4), strides=(2, 2), padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation("relu"))
# Third layer of 30*30*4 with 64 filters
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dense(ACTIONS))
#adam = Adam(lr=LEARNING_RATE)
nadam = Nadam(lr=LEARNING_RATE)
model.compile(loss="mse", optimizer=nadam)
# Creating model file if not present
if not os.path.isfile(LOSS_FILE_PATH):
model.save_weights("model.h5")
print("Finished building the Convolutional Neural Network")
return model
def trainNetwork(model, game_state, observe=False):
"""
Main Training module
Parameters:
model => Keras Model to be trained
game_state => Game State module with access to game environment and dino
observe => Flag to indicate if the model is to be trained(weights updates), else just play
"""
last_time = time.time() # Store the previous observations in replay memory
D = load_object("D") # Load from file system
do_nothing = np.zeros(ACTIONS)
do_nothing[0] = 1 # 0 => Do Nothing ; 1 => Jump
# Get next step after performing the action
x_t, r_0, terminal = game_state.get_state(do_nothing)
# Stack 4 images to create a placeholder input
s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) # 1*20*40*4
initial_state = s_t
if observe: # We keep observing, never train
OBSERVE = 99999
epsilon = FINAL_EPSILON
print("Loading weights to the CNN")
model.load_weights("model.h5")
#adam = Adam(lr=LEARNING_RATE)
nadam = Nadam(lr=LEARNING_RATE)
model.compile(loss="mse", optimizer=nadam)
print("Loading weights Successful")
else: # We go to training mode
OBSERVE = OBSERVATION
epsilon = load_object("epsilon")
model.load_weights("model.h5")
#adam = Adam(lr=LEARNING_RATE)
nadam = Nadam(lr=LEARNING_RATE)
model.compile(loss="mse", optimizer=nadam)
# Resume from the previous time step stored in the file system
t = load_object("time")
while True: # Endless running
loss = 0
Q_sa = 0
action_index = 0
r_t = 0 # Reward at 4
a_t = np.zeros([ACTIONS]) # Actions at t
# Choose an action epsilon greedy
if t % FRAME_PER_ACTION == 0: # Parameter to skip frames for actions
if random.random() <= epsilon: # Randomly explore an action
print("---------Random Action---------")
action_index = random.randrange(ACTIONS)
a_t[action_index] = 1
else: # Predict the output
# Input a stack of 4 images, get the prediction
q = model.predict(s_t)
max_Q = np.argmax(q) # Choosing index with maximum "q" value
action_index = max_Q
a_t[action_index] = 1 # 0 => Do Nothing, 1 => Jump
# We reduce the epsilon (exploration parameter) gradually
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON)/EXPLORE
# Run the selected action and observed next state and reward
x_t1, r_t, terminal = game_state.get_state(a_t)
# FPS of the game
print("FPS: {0}".format(1/(time.time()-last_time)))
last_time = time.time()
x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) # 1x20x40x1
# Append the new image to input stack and remove the first one
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)
# Store the transition in D
D.append((s_t, action_index, r_t, s_t1, terminal))
if len(D) > REPLAY_MEMORY:
D.popleft()
# Only train if done observing
if t > OBSERVE:
# Sample a minibatch to train on
minibatch = random.sample(D, BATCH)
inputs = np.zeros(
(BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3])) # 32x20x40x4
targets = np.zeros((inputs.shape[0], ACTIONS))
# Now we do the experience replay
for i in range(0, len(minibatch)):
state_t = minibatch[i][0] # 4D stack of images
action_t = minibatch[i][1] # This is the action index
reward_t = minibatch[i][2] # Reward at state_t due to action_t
state_t1 = minibatch[i][3] # Next State
# Wheather the agent died or survided due to the action
terminal = minibatch[i][4]
inputs[i:i+1] = state_t
targets[i] = model.predict(state_t) # Predicted "q" value
# Predict "q" value for next step
Q_sa = model.predict(state_t1)
if terminal:
# If terminated, only equal to reward
targets[i, action_t] = reward_t
else:
targets[i, action_t] = reward_t + GAMMA * np.max(Q_sa)
loss += model.train_on_batch(inputs, targets)
loss_df.loc[len(loss_df)] = loss
q_values_df.loc[len(q_values_df)] = np.max(Q_sa)
# Reset game to initial frame if terminated
s_t = initial_state if terminal else s_t1
t += 1
# Save progress every 500 iterations
if t % 500 == 0:
print("Now we save model during training")
game_state._game.pause() # Pause game while saving to filesystem
model.save_weights("model.h5", overwrite=True)
save_object(D, "D") # Saving episodes
save_object(t, "time") # Caching time steps
# Cache epsilon to avoide repeated randomness in actions
save_object(epsilon, "epsilon")
loss_df.to_csv(LOSS_FILE_PATH, index=False)
score_df.to_csv(SCORE_FILE_PATH, index=False)
actions_df.to_csv(ACTIONS_FILE_PATH, index=False)
q_values_df.to_csv(Q_VALUE_FILE_PATH, index=False)
with open("model.json", "w") as outfile:
json.dump(model.to_json(), outfile)
game_state._game.resume()
# Print Info
state = ""
if t <= OBSERVE:
state = "observe"
elif t > OBSERVE and t <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print("TIMESTEP", t, "/ STATE", state, "/ EPSILON", epsilon, "/ ACTION",
action_index, "/ REWARD", r_t, "/ Q_MAX ", np.max(Q_sa), "/ Loss ", loss)
print("Episode finished!")
print("-------------------------------")
# Main Function
def playGame(observe=False):
try:
game = Game()
dino = DinoAgent(game)
game_state = Game_State(dino, game)
model = buildModel()
try:
trainNetwork(model, game_state, observe=observe)
except FileNotFoundError:
print("Looks like init_cache.py was not executed ever.\nDoing that for you!!! Sit back and relax.....")
os.system('python init_cache.py')
trainNetwork(model, game_state, observe=observe)
except StopIteration:
game.end()
except selenium.common.exceptions.WebDriverException:
print("No driver")
playGame(observe=False) | [
"selenium.webdriver.chrome.options.Options",
"pandas.read_csv",
"cv2.imshow",
"tensorflow.keras.layers.Dense",
"cv2.destroyAllWindows",
"tensorflow.keras.optimizers.Nadam",
"tensorflow.keras.layers.Conv2D",
"numpy.max",
"numpy.stack",
"random.random",
"pandas.DataFrame",
"tensorflow.keras.mode... | [((2019, 2049), 'os.path.isfile', 'os.path.isfile', (['LOSS_FILE_PATH'], {}), '(LOSS_FILE_PATH)\n', (2033, 2049), False, 'import os\n'), ((1988, 2015), 'pandas.read_csv', 'pd.read_csv', (['LOSS_FILE_PATH'], {}), '(LOSS_FILE_PATH)\n', (1999, 2015), True, 'import pandas as pd\n'), ((2060, 2090), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['loss']"}), "(columns=['loss'])\n", (2072, 2090), True, 'import pandas as pd\n'), ((2134, 2165), 'os.path.isfile', 'os.path.isfile', (['SCORE_FILE_PATH'], {}), '(SCORE_FILE_PATH)\n', (2148, 2165), False, 'import os\n'), ((2102, 2130), 'pandas.read_csv', 'pd.read_csv', (['SCORE_FILE_PATH'], {}), '(SCORE_FILE_PATH)\n', (2113, 2130), True, 'import pandas as pd\n'), ((2176, 2208), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Scores']"}), "(columns=['Scores'])\n", (2188, 2208), True, 'import pandas as pd\n'), ((2256, 2289), 'os.path.isfile', 'os.path.isfile', (['ACTIONS_FILE_PATH'], {}), '(ACTIONS_FILE_PATH)\n', (2270, 2289), False, 'import os\n'), ((2222, 2252), 'pandas.read_csv', 'pd.read_csv', (['ACTIONS_FILE_PATH'], {}), '(ACTIONS_FILE_PATH)\n', (2233, 2252), True, 'import pandas as pd\n'), ((2300, 2333), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Actions']"}), "(columns=['Actions'])\n", (2312, 2333), True, 'import pandas as pd\n'), ((2382, 2415), 'os.path.isfile', 'os.path.isfile', (['Q_VALUE_FILE_PATH'], {}), '(Q_VALUE_FILE_PATH)\n', (2396, 2415), False, 'import os\n'), ((2348, 2378), 'pandas.read_csv', 'pd.read_csv', (['Q_VALUE_FILE_PATH'], {}), '(Q_VALUE_FILE_PATH)\n', (2359, 2378), True, 'import pandas as pd\n'), ((2426, 2459), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['qvalues']"}), "(columns=['qvalues'])\n", (2438, 2459), True, 'import pandas as pd\n'), ((2930, 2969), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2942, 2969), False, 'import cv2\n'), ((3065, 3092), 'cv2.resize', 'cv2.resize', (['image', '(80, 80)'], {}), '(image, (80, 80))\n', (3075, 3092), False, 'import cv2\n'), ((7835, 7847), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7845, 7847), False, 'from tensorflow.keras.models import Sequential\n'), ((8640, 8663), 'tensorflow.keras.optimizers.Nadam', 'Nadam', ([], {'lr': 'LEARNING_RATE'}), '(lr=LEARNING_RATE)\n', (8645, 8663), False, 'from tensorflow.keras.optimizers import SGD, Adam, Nadam\n'), ((9267, 9278), 'time.time', 'time.time', ([], {}), '()\n', (9276, 9278), False, 'import time\n'), ((9399, 9416), 'numpy.zeros', 'np.zeros', (['ACTIONS'], {}), '(ACTIONS)\n', (9407, 9416), True, 'import numpy as np\n'), ((9639, 9677), 'numpy.stack', 'np.stack', (['(x_t, x_t, x_t, x_t)'], {'axis': '(2)'}), '((x_t, x_t, x_t, x_t), axis=2)\n', (9647, 9677), True, 'import numpy as np\n'), ((2643, 2690), 'pickle.dump', 'pickle.dump', (['object', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(object, f, pickle.HIGHEST_PROTOCOL)\n', (2654, 2690), False, 'import pickle\n'), ((2821, 2835), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2832, 2835), False, 'import pickle\n'), ((3571, 3619), 'cv2.namedWindow', 'cv2.namedWindow', (['window_title', 'cv2.WINDOW_NORMAL'], {}), '(window_title, cv2.WINDOW_NORMAL)\n', (3586, 3619), False, 'import cv2\n'), ((3641, 3671), 'cv2.resize', 'cv2.resize', (['screen', '(800, 400)'], {}), '(screen, (800, 400))\n', (3651, 3671), False, 'import cv2\n'), ((3680, 3712), 'cv2.imshow', 'cv2.imshow', (['window_title', 'screen'], {}), '(window_title, screen)\n', (3690, 3712), False, 'import cv2\n'), ((4162, 4171), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (4169, 4171), False, 'from selenium.webdriver.chrome.options import Options\n'), ((4303, 4391), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': 'CHROME_DRIVER_PATH', 'chrome_options': 'chrome_options'}), '(executable_path=CHROME_DRIVER_PATH, chrome_options=\n chrome_options)\n', (4319, 4391), False, 'from selenium import webdriver\n'), ((7862, 7964), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(8, 8)'], {'padding': '"""same"""', 'strides': '(4, 4)', 'input_shape': '(img_cols, img_rows, img_channels)'}), "(32, (8, 8), padding='same', strides=(4, 4), input_shape=(img_cols,\n img_rows, img_channels))\n", (7868, 7964), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8027, 8057), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (8039, 8057), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8073, 8091), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8083, 8091), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8153, 8203), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (4, 4), strides=(2, 2), padding='same')\n", (8159, 8203), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8220, 8250), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (8232, 8250), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8266, 8284), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8276, 8284), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8345, 8395), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""'}), "(64, (3, 3), strides=(1, 1), padding='same')\n", (8351, 8395), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8412, 8442), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (8424, 8442), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8458, 8476), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8468, 8476), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8492, 8501), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8499, 8501), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8517, 8527), 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (8522, 8527), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8543, 8561), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8553, 8561), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8577, 8591), 'tensorflow.keras.layers.Dense', 'Dense', (['ACTIONS'], {}), '(ACTIONS)\n', (8582, 8591), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\n'), ((8764, 8794), 'os.path.isfile', 'os.path.isfile', (['LOSS_FILE_PATH'], {}), '(LOSS_FILE_PATH)\n', (8778, 8794), False, 'import os\n'), ((10029, 10052), 'tensorflow.keras.optimizers.Nadam', 'Nadam', ([], {'lr': 'LEARNING_RATE'}), '(lr=LEARNING_RATE)\n', (10034, 10052), False, 'from tensorflow.keras.optimizers import SGD, Adam, Nadam\n'), ((10350, 10373), 'tensorflow.keras.optimizers.Nadam', 'Nadam', ([], {'lr': 'LEARNING_RATE'}), '(lr=LEARNING_RATE)\n', (10355, 10373), False, 'from tensorflow.keras.optimizers import SGD, Adam, Nadam\n'), ((10661, 10680), 'numpy.zeros', 'np.zeros', (['[ACTIONS]'], {}), '([ACTIONS])\n', (10669, 10680), True, 'import numpy as np\n'), ((11786, 11797), 'time.time', 'time.time', ([], {}), '()\n', (11795, 11797), False, 'import time\n'), ((11963, 12004), 'numpy.append', 'np.append', (['x_t1', 's_t[:, :, :, :3]'], {'axis': '(3)'}), '(x_t1, s_t[:, :, :, :3], axis=3)\n', (11972, 12004), True, 'import numpy as np\n'), ((3773, 3796), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3794, 3796), False, 'import cv2\n'), ((12293, 12316), 'random.sample', 'random.sample', (['D', 'BATCH'], {}), '(D, BATCH)\n', (12306, 12316), False, 'import random\n'), ((12338, 12397), 'numpy.zeros', 'np.zeros', (['(BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3])'], {}), '((BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3]))\n', (12346, 12397), True, 'import numpy as np\n'), ((12451, 12487), 'numpy.zeros', 'np.zeros', (['(inputs.shape[0], ACTIONS)'], {}), '((inputs.shape[0], ACTIONS))\n', (12459, 12487), True, 'import numpy as np\n'), ((13572, 13584), 'numpy.max', 'np.max', (['Q_sa'], {}), '(Q_sa)\n', (13578, 13584), True, 'import numpy as np\n'), ((14931, 14943), 'numpy.max', 'np.max', (['Q_sa'], {}), '(Q_sa)\n', (14937, 14943), True, 'import numpy as np\n'), ((3272, 3299), 'base64.b64decode', 'base64.b64decode', (['image_b64'], {}), '(image_b64)\n', (3288, 3299), False, 'import base64\n'), ((3725, 3739), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3736, 3739), False, 'import cv2\n'), ((10833, 10848), 'random.random', 'random.random', ([], {}), '()\n', (10846, 10848), False, 'import random\n'), ((10979, 11004), 'random.randrange', 'random.randrange', (['ACTIONS'], {}), '(ACTIONS)\n', (10995, 11004), False, 'import random\n'), ((11210, 11222), 'numpy.argmax', 'np.argmax', (['q'], {}), '(q)\n', (11219, 11222), True, 'import numpy as np\n'), ((15457, 15490), 'os.system', 'os.system', (['"""python init_cache.py"""'], {}), "('python init_cache.py')\n", (15466, 15490), False, 'import os\n'), ((11741, 11752), 'time.time', 'time.time', ([], {}), '()\n', (11750, 11752), False, 'import time\n'), ((13407, 13419), 'numpy.max', 'np.max', (['Q_sa'], {}), '(Q_sa)\n', (13413, 13419), True, 'import numpy as np\n')] |
import sys
hoomd_path = str(sys.argv[4])
gsd_path = str(sys.argv[5])
# need to extract values from filename (pa, pb, xa) for naming
part_perc_a = int(sys.argv[3])
part_frac_a = float(part_perc_a) / 100.0
pe_a = int(sys.argv[1])
pe_b = int(sys.argv[2])
sys.path.append(hoomd_path)
import hoomd
from hoomd import md
from hoomd import deprecated
#initialize system randomly, can specify GPU execution here
my_dt = 0.000001
################################################################################
############################# Begin Data Analysis ##############################
################################################################################
sys.path.append(gsd_path)
import gsd
from gsd import hoomd
from gsd import pygsd
import numpy as np
msdfile = "MSD_pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a) + ".gsd"
f = hoomd.open(name=msdfile, mode='rb')
dumps = f.__len__()
size_min = 35 # minimum size of cluster
position_array = np.zeros((dumps), dtype=np.ndarray) # array of position arrays
type_array = np.zeros((dumps), dtype=np.ndarray) # particle types
box_data = np.zeros((1), dtype=np.ndarray) # box dimensions
timesteps = np.zeros((dumps), dtype=np.float64) # timesteps
with hoomd.open(name=msdfile, mode='rb') as t: # open for reading
snap = t[0] # snap 0th snapshot
box_data = snap.configuration.box # get box dimensions
for i in range(0,dumps):
snap = t[i] # take snap of each dump
type_array[i] = snap.particles.typeid
position_array[i] = snap.particles.position # store all particle positions
timesteps[i] = snap.configuration.step # store tstep for plotting purposes
part_num = len(type_array[0])
part_a = part_num * part_frac_a # get the total number of A particles
part_a = int(part_a)
part_b = part_num - part_a # get the total number of B particles
part_b = int(part_b)
timesteps -= timesteps[0]
msd_time = timesteps[1:]
msd_time *= my_dt
from freud import parallel, box, density, cluster
parallel.setNumThreads(1) # don't run multiple threads
l_box = box_data[0] # get box dimensions (square here)
f_box = box.Box(Lx=l_box,
Ly=l_box,
is2D=True) # initialize freud box
my_clusters = cluster.Cluster(box=f_box,
rcut=1.0) # initialize class
cluster_props = cluster.ClusterProperties(box=f_box)
ids = np.zeros((dumps), dtype=np.ndarray)
size_clusters = np.zeros((dumps), dtype=np.ndarray)
tot_size = np.zeros((dumps), dtype=np.ndarray) # number of particles in clusters
percent_A = np.zeros((dumps), dtype=np.ndarray) # composition A at each timestep
LIQ_A = np.zeros((dumps - 1), dtype=np.ndarray) # arrays for MSD
LIQ_B = np.zeros((dumps - 1), dtype=np.ndarray)
GAS_A = np.zeros((dumps - 1), dtype=np.ndarray)
GAS_B = np.zeros((dumps - 1), dtype=np.ndarray)
MSD_T = np.zeros((dumps - 1), dtype=np.float64)
MSD_TL = np.zeros((dumps - 1), dtype=np.ndarray)
MSD_TG = np.zeros((dumps - 1), dtype=np.ndarray)
disp_x = np.zeros((part_num), dtype=np.ndarray) # displacement vectors
disp_y = np.zeros((part_num), dtype=np.ndarray)
disp_z = np.zeros((part_num), dtype=np.ndarray)
# analyze all particles
for j in range(0, dumps):
l_pos = position_array[j]
my_clusters.computeClusters(l_pos)
ids = my_clusters.getClusterIdx() # get cluster ids
cluster_props.computeProperties(l_pos, ids)
size_clusters[j] = cluster_props.getClusterSizes() # get number of particles in each
how_many = my_clusters.getNumClusters()
#########################################################
### Find MSD for A, B individually, also total system ###
#########################################################
sort_id = np.sort(ids) # array of IDs sorted small to large
q_clust = np.zeros((how_many), dtype=np.ndarray) # my binary 'is it clustered?' array
index = 0 # index of the sorted array to look at
for a in range(0,len(q_clust)):
add_clust = 0
while 1:
add_clust += 1
if index == part_num: # break if index is too large
break
if sort_id[index] != a: # break if ID changes
break
if add_clust == 1: # all particles appear once
q_clust[a] = 0
if add_clust > size_min: # only multiple ids appear twice
q_clust[a] = 1
index += 1 # increment index
lq_a_count = 0
lq_b_count = 0
gs_a_count = 0
gs_b_count = 0
if j > 0:
numerator_A = 0
denominator_tot = 0
for b in range(0,part_num):
# check instantaneous disp. over last timestep
dx = position_array[j][b][0] - position_array[j-1][b][0]
dy = position_array[j][b][1] - position_array[j-1][b][1]
dz = position_array[j][b][2] - position_array[j-1][b][2]
# if it is over some threshold, then it went past a boundary
if dx < -50:
dx += l_box
if dx > 50:
dx -= l_box
disp_x[b] += dx
if dy < -50:
dy += l_box
if dy > 50:
dy -= l_box
disp_y[b] += dy
if dz < -50:
dz += l_box
if dz > 50:
dz -= l_box
disp_z[b] += dz
# msd_val = np.sqrt(((disp_x[b])**2) + ((disp_y[b])**2) + ((disp_z[b])**2))
msd_val = ((disp_x[b])**2) + ((disp_y[b])**2) + ((disp_z[b])**2)
MSD_T[j-1] += msd_val
if q_clust[ids[b]] == 1: # check if in liquid
MSD_TL[j-1] += msd_val # add to tot. lq. msd
if type_array[j][b] == 0: # type A case
LIQ_A[j-1] += msd_val
lq_a_count += 1
else:
LIQ_B[j-1] += msd_val
lq_b_count += 1
else: # else, particle is gas
MSD_TG[j-1] += msd_val # add to tot. gs. msd
if type_array[j][b] == 0: # type A case
GAS_A[j-1] += msd_val
gs_a_count += 1
else:
GAS_B[j-1] += msd_val
gs_b_count += 1
# if-gating these so we don't break our program
if lq_a_count != 0: LIQ_A[j-1] /= lq_a_count
if lq_b_count != 0: LIQ_B[j-1] /= lq_b_count
if gs_a_count != 0: GAS_A[j-1] /= gs_a_count
if gs_b_count != 0: GAS_B[j-1] /= gs_b_count
MSD_T[j-1] /= part_num
if lq_a_count + lq_b_count != 0: MSD_TL[j-1] /= lq_a_count + lq_b_count
if gs_a_count + gs_b_count != 0: MSD_TG[j-1] /= gs_a_count + gs_b_count
numerator_A = lq_a_count
denominator_tot = lq_a_count + lq_b_count
if denominator_tot != 0:
percent_A[j] = float(numerator_A) / float(denominator_tot)
################################################################################
#################### Plot the individual and total data ########################
################################################################################
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
plt_name = "pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a)
plt_name1 = "pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a) + "A"
plt_name2 = "pa" + str(pe_a) + "_pb" + str(pe_b) + "_xa" + str(part_perc_a) + "B"
def half(x):
return np.sqrt(50*x)
def one(x):
return (50*x)
def one_and_half(x):
return (50*x)**1.5
def two(x):
return (50*x)**2
if part_perc_a != 0 and part_perc_a != 100:
plt.plot(percent_A, color="r")
#plt.ylim((0,1))
plt.savefig('A_comp_'+plt_name+'.png', dpi=1000)
plt.close()
plt.plot(msd_time, half(msd_time), label='x^0.5', linewidth=1.0)
plt.plot(msd_time, one(msd_time), label='x^1.0', linewidth=1.0)
plt.plot(msd_time, one_and_half(msd_time), label='x^1.5', linewidth=1.0)
plt.plot(msd_time, two(msd_time), label='x^2.0', linewidth=1.0)
plt.plot(msd_time, GAS_A, color="r", marker='o', markersize=1, linestyle='None', label='Gas_A')
plt.plot(msd_time, GAS_B, color="b", marker='o', markersize=1, linestyle='None', label='Gas_B')
plt.xlim((10**-6,10))
plt.ylim(ymin=10**-6)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time (tau)')
plt.ylabel('MSD')
plt.legend(loc='upper left')
plt.savefig('MSD_GAS_AB_' + plt_name + '.png', dpi=1000)
plt.close()
plt.plot(msd_time, half(msd_time), label='x^0.5', linewidth=1.0)
plt.plot(msd_time, one(msd_time), label='x^1.0', linewidth=1.0)
plt.plot(msd_time, one_and_half(msd_time), label='x^1.5', linewidth=1.0)
plt.plot(msd_time, two(msd_time), label='x^2.0', linewidth=1.0)
plt.plot(msd_time, LIQ_A, color="r", marker='o', markersize=1, linestyle='None', label='Liq_A')
plt.plot(msd_time, LIQ_B, color="b", marker='o', markersize=1, linestyle='None', label='Liq_B')
plt.xlim((10**-6,10))
plt.ylim(ymin=10**-6)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time (tau)')
plt.ylabel('MSD')
plt.legend(loc='upper left')
plt.savefig('MSD_LIQ_AB_' + plt_name + '.png', dpi=1000)
plt.close()
plt.plot(msd_time, half(msd_time), label='x^0.5', linewidth=1.0)
plt.plot(msd_time, one(msd_time), label='x^1.0', linewidth=1.0)
plt.plot(msd_time, one_and_half(msd_time), label='x^1.5', linewidth=1.0)
plt.plot(msd_time, two(msd_time), label='x^2.0', linewidth=1.0)
plt.plot(msd_time, MSD_T, color="g", marker='o', markersize=1, linestyle='None', label='MSD')
plt.xlim((10**-6,10))
plt.ylim(ymin=10**-6)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time (tau)')
plt.ylabel('MSD')
plt.legend(loc='upper left')
plt.savefig('MSD_total_' + plt_name + '.png', dpi=1000)
plt.close()
plt.plot(msd_time, half(msd_time), label='x^0.5', linewidth=1.0)
plt.plot(msd_time, one(msd_time), label='x^1.0', linewidth=1.0)
plt.plot(msd_time, one_and_half(msd_time), label='x^1.5', linewidth=1.0)
plt.plot(msd_time, two(msd_time), label='x^2.0', linewidth=1.0)
plt.plot(msd_time, MSD_TL, color="b", marker='o', markersize=1, linestyle='None', label='Liq')
plt.plot(msd_time, MSD_TG, color="r", marker='o', markersize=1, linestyle='None', label='Gas')
plt.xlim((10**-6,10))
plt.ylim(ymin=10**-6)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time (tau)')
plt.ylabel('MSD')
plt.legend(loc='upper left')
plt.savefig('MSD_LG_' + plt_name + '.png', dpi=1000)
plt.close()
np.savetxt('MSD_typed_' + plt_name + '.txt', np.transpose([msd_time, GAS_A, GAS_B, LIQ_A, LIQ_B]))
np.savetxt('MSD_totals_' + plt_name + '.txt', np.transpose([msd_time, MSD_T, MSD_TL, MSD_TG]))
else: # if monodisperse plot total values
plt.plot(msd_time, half(msd_time), label='x^0.5', linewidth=1.0)
plt.plot(msd_time, one(msd_time), label='x^1.0', linewidth=1.0)
plt.plot(msd_time, one_and_half(msd_time), label='x^1.5', linewidth=1.0)
plt.plot(msd_time, two(msd_time), label='x^2.0', linewidth=1.0)
plt.plot(msd_time, MSD_T, color="g", marker='s', markersize=3, linestyle='None', label='MSD')
plt.xlim((10**-6,10))
plt.ylim(ymin=10**-6)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time (tau)')
plt.ylabel('MSD')
plt.legend(loc='upper left')
plt.savefig('MSD_total_' + plt_name + '.png', dpi=1000)
plt.close()
plt.plot(msd_time, half(msd_time), label='x^0.5', linewidth=1.0)
plt.plot(msd_time, one(msd_time), label='x^1.0', linewidth=1.0)
plt.plot(msd_time, one_and_half(msd_time), label='x^1.5', linewidth=1.0)
plt.plot(msd_time, two(msd_time), label='x^2.0', linewidth=1.0)
plt.plot(msd_time, MSD_TL, color="b", marker='s', markersize=3, linestyle='None', label='Liq')
plt.plot(msd_time, MSD_TG, color="r", marker='o', markersize=3, linestyle='None', label='Gas')
plt.xlim((10**-6,10))
plt.ylim(ymin=10**-6)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time (tau)')
#plt.xlabel(r'Time ($\tau$)')
plt.ylabel('MSD')
plt.legend(loc='upper left')
plt.savefig('MSD_LG_' + plt_name + '.png', dpi=1000)
plt.close()
np.savetxt('MSD_totals_' + plt_name + '.txt', np.transpose([msd_time, MSD_T, MSD_TL, MSD_TG]))
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"sys.path.append",
"freud.cluster.Cluster",
"seaborn.set",
"numpy.sort",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"freud.parallel.setNumThreads",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yscale",
"matplotli... | [((255, 282), 'sys.path.append', 'sys.path.append', (['hoomd_path'], {}), '(hoomd_path)\n', (270, 282), False, 'import sys\n'), ((670, 695), 'sys.path.append', 'sys.path.append', (['gsd_path'], {}), '(gsd_path)\n', (685, 695), False, 'import sys\n'), ((863, 898), 'gsd.hoomd.open', 'hoomd.open', ([], {'name': 'msdfile', 'mode': '"""rb"""'}), "(name=msdfile, mode='rb')\n", (873, 898), False, 'from gsd import hoomd\n'), ((1019, 1052), 'numpy.zeros', 'np.zeros', (['dumps'], {'dtype': 'np.ndarray'}), '(dumps, dtype=np.ndarray)\n', (1027, 1052), True, 'import numpy as np\n'), ((1098, 1131), 'numpy.zeros', 'np.zeros', (['dumps'], {'dtype': 'np.ndarray'}), '(dumps, dtype=np.ndarray)\n', (1106, 1131), True, 'import numpy as np\n'), ((1169, 1198), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.ndarray'}), '(1, dtype=np.ndarray)\n', (1177, 1198), True, 'import numpy as np\n'), ((1243, 1276), 'numpy.zeros', 'np.zeros', (['dumps'], {'dtype': 'np.float64'}), '(dumps, dtype=np.float64)\n', (1251, 1276), True, 'import numpy as np\n'), ((2213, 2238), 'freud.parallel.setNumThreads', 'parallel.setNumThreads', (['(1)'], {}), '(1)\n', (2235, 2238), False, 'from freud import parallel, box, density, cluster\n'), ((2398, 2436), 'freud.box.Box', 'box.Box', ([], {'Lx': 'l_box', 'Ly': 'l_box', 'is2D': '(True)'}), '(Lx=l_box, Ly=l_box, is2D=True)\n', (2405, 2436), False, 'from freud import parallel, box, density, cluster\n'), ((2537, 2573), 'freud.cluster.Cluster', 'cluster.Cluster', ([], {'box': 'f_box', 'rcut': '(1.0)'}), '(box=f_box, rcut=1.0)\n', (2552, 2573), False, 'from freud import parallel, box, density, cluster\n'), ((2655, 2691), 'freud.cluster.ClusterProperties', 'cluster.ClusterProperties', ([], {'box': 'f_box'}), '(box=f_box)\n', (2680, 2691), False, 'from freud import parallel, box, density, cluster\n'), ((2700, 2733), 'numpy.zeros', 'np.zeros', (['dumps'], {'dtype': 'np.ndarray'}), '(dumps, dtype=np.ndarray)\n', (2708, 2733), True, 'import numpy as np\n'), ((2752, 2785), 'numpy.zeros', 'np.zeros', (['dumps'], {'dtype': 'np.ndarray'}), '(dumps, dtype=np.ndarray)\n', (2760, 2785), True, 'import numpy as np\n'), ((2799, 2832), 'numpy.zeros', 'np.zeros', (['dumps'], {'dtype': 'np.ndarray'}), '(dumps, dtype=np.ndarray)\n', (2807, 2832), True, 'import numpy as np\n'), ((2890, 2923), 'numpy.zeros', 'np.zeros', (['dumps'], {'dtype': 'np.ndarray'}), '(dumps, dtype=np.ndarray)\n', (2898, 2923), True, 'import numpy as np\n'), ((2976, 3013), 'numpy.zeros', 'np.zeros', (['(dumps - 1)'], {'dtype': 'np.ndarray'}), '(dumps - 1, dtype=np.ndarray)\n', (2984, 3013), True, 'import numpy as np\n'), ((3049, 3086), 'numpy.zeros', 'np.zeros', (['(dumps - 1)'], {'dtype': 'np.ndarray'}), '(dumps - 1, dtype=np.ndarray)\n', (3057, 3086), True, 'import numpy as np\n'), ((3097, 3134), 'numpy.zeros', 'np.zeros', (['(dumps - 1)'], {'dtype': 'np.ndarray'}), '(dumps - 1, dtype=np.ndarray)\n', (3105, 3134), True, 'import numpy as np\n'), ((3145, 3182), 'numpy.zeros', 'np.zeros', (['(dumps - 1)'], {'dtype': 'np.ndarray'}), '(dumps - 1, dtype=np.ndarray)\n', (3153, 3182), True, 'import numpy as np\n'), ((3193, 3230), 'numpy.zeros', 'np.zeros', (['(dumps - 1)'], {'dtype': 'np.float64'}), '(dumps - 1, dtype=np.float64)\n', (3201, 3230), True, 'import numpy as np\n'), ((3242, 3279), 'numpy.zeros', 'np.zeros', (['(dumps - 1)'], {'dtype': 'np.ndarray'}), '(dumps - 1, dtype=np.ndarray)\n', (3250, 3279), True, 'import numpy as np\n'), ((3291, 3328), 'numpy.zeros', 'np.zeros', (['(dumps - 1)'], {'dtype': 'np.ndarray'}), '(dumps - 1, dtype=np.ndarray)\n', (3299, 3328), True, 'import numpy as np\n'), ((3341, 3377), 'numpy.zeros', 'np.zeros', (['part_num'], {'dtype': 'np.ndarray'}), '(part_num, dtype=np.ndarray)\n', (3349, 3377), True, 'import numpy as np\n'), ((3420, 3456), 'numpy.zeros', 'np.zeros', (['part_num'], {'dtype': 'np.ndarray'}), '(part_num, dtype=np.ndarray)\n', (3428, 3456), True, 'import numpy as np\n'), ((3468, 3504), 'numpy.zeros', 'np.zeros', (['part_num'], {'dtype': 'np.ndarray'}), '(part_num, dtype=np.ndarray)\n', (3476, 3504), True, 'import numpy as np\n'), ((7924, 7945), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (7938, 7945), False, 'import matplotlib\n'), ((8000, 8025), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (8007, 8025), True, 'import seaborn as sns\n'), ((1305, 1340), 'gsd.hoomd.open', 'hoomd.open', ([], {'name': 'msdfile', 'mode': '"""rb"""'}), "(name=msdfile, mode='rb')\n", (1315, 1340), False, 'from gsd import hoomd\n'), ((4104, 4116), 'numpy.sort', 'np.sort', (['ids'], {}), '(ids)\n', (4111, 4116), True, 'import numpy as np\n'), ((4197, 4233), 'numpy.zeros', 'np.zeros', (['how_many'], {'dtype': 'np.ndarray'}), '(how_many, dtype=np.ndarray)\n', (4205, 4233), True, 'import numpy as np\n'), ((8292, 8307), 'numpy.sqrt', 'np.sqrt', (['(50 * x)'], {}), '(50 * x)\n', (8299, 8307), True, 'import numpy as np\n'), ((8463, 8493), 'matplotlib.pyplot.plot', 'plt.plot', (['percent_A'], {'color': '"""r"""'}), "(percent_A, color='r')\n", (8471, 8493), True, 'import matplotlib.pyplot as plt\n'), ((8519, 8571), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('A_comp_' + plt_name + '.png')"], {'dpi': '(1000)'}), "('A_comp_' + plt_name + '.png', dpi=1000)\n", (8530, 8571), True, 'import matplotlib.pyplot as plt\n'), ((8572, 8583), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8581, 8583), True, 'import matplotlib.pyplot as plt\n'), ((8871, 8971), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'GAS_A'], {'color': '"""r"""', 'marker': '"""o"""', 'markersize': '(1)', 'linestyle': '"""None"""', 'label': '"""Gas_A"""'}), "(msd_time, GAS_A, color='r', marker='o', markersize=1, linestyle=\n 'None', label='Gas_A')\n", (8879, 8971), True, 'import matplotlib.pyplot as plt\n'), ((8972, 9072), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'GAS_B'], {'color': '"""b"""', 'marker': '"""o"""', 'markersize': '(1)', 'linestyle': '"""None"""', 'label': '"""Gas_B"""'}), "(msd_time, GAS_B, color='b', marker='o', markersize=1, linestyle=\n 'None', label='Gas_B')\n", (8980, 9072), True, 'import matplotlib.pyplot as plt\n'), ((9073, 9097), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** -6, 10)'], {}), '((10 ** -6, 10))\n', (9081, 9097), True, 'import matplotlib.pyplot as plt\n'), ((9099, 9122), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -6)'}), '(ymin=10 ** -6)\n', (9107, 9122), True, 'import matplotlib.pyplot as plt\n'), ((9125, 9142), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (9135, 9142), True, 'import matplotlib.pyplot as plt\n'), ((9147, 9164), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (9157, 9164), True, 'import matplotlib.pyplot as plt\n'), ((9169, 9193), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (tau)"""'], {}), "('Time (tau)')\n", (9179, 9193), True, 'import matplotlib.pyplot as plt\n'), ((9198, 9215), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSD"""'], {}), "('MSD')\n", (9208, 9215), True, 'import matplotlib.pyplot as plt\n'), ((9220, 9248), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (9230, 9248), True, 'import matplotlib.pyplot as plt\n'), ((9253, 9309), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('MSD_GAS_AB_' + plt_name + '.png')"], {'dpi': '(1000)'}), "('MSD_GAS_AB_' + plt_name + '.png', dpi=1000)\n", (9264, 9309), True, 'import matplotlib.pyplot as plt\n'), ((9314, 9325), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9323, 9325), True, 'import matplotlib.pyplot as plt\n'), ((9617, 9717), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'LIQ_A'], {'color': '"""r"""', 'marker': '"""o"""', 'markersize': '(1)', 'linestyle': '"""None"""', 'label': '"""Liq_A"""'}), "(msd_time, LIQ_A, color='r', marker='o', markersize=1, linestyle=\n 'None', label='Liq_A')\n", (9625, 9717), True, 'import matplotlib.pyplot as plt\n'), ((9718, 9818), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'LIQ_B'], {'color': '"""b"""', 'marker': '"""o"""', 'markersize': '(1)', 'linestyle': '"""None"""', 'label': '"""Liq_B"""'}), "(msd_time, LIQ_B, color='b', marker='o', markersize=1, linestyle=\n 'None', label='Liq_B')\n", (9726, 9818), True, 'import matplotlib.pyplot as plt\n'), ((9819, 9843), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** -6, 10)'], {}), '((10 ** -6, 10))\n', (9827, 9843), True, 'import matplotlib.pyplot as plt\n'), ((9845, 9868), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -6)'}), '(ymin=10 ** -6)\n', (9853, 9868), True, 'import matplotlib.pyplot as plt\n'), ((9871, 9888), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (9881, 9888), True, 'import matplotlib.pyplot as plt\n'), ((9893, 9910), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (9903, 9910), True, 'import matplotlib.pyplot as plt\n'), ((9915, 9939), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (tau)"""'], {}), "('Time (tau)')\n", (9925, 9939), True, 'import matplotlib.pyplot as plt\n'), ((9944, 9961), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSD"""'], {}), "('MSD')\n", (9954, 9961), True, 'import matplotlib.pyplot as plt\n'), ((9966, 9994), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (9976, 9994), True, 'import matplotlib.pyplot as plt\n'), ((9999, 10055), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('MSD_LIQ_AB_' + plt_name + '.png')"], {'dpi': '(1000)'}), "('MSD_LIQ_AB_' + plt_name + '.png', dpi=1000)\n", (10010, 10055), True, 'import matplotlib.pyplot as plt\n'), ((10060, 10071), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10069, 10071), True, 'import matplotlib.pyplot as plt\n'), ((10359, 10457), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'MSD_T'], {'color': '"""g"""', 'marker': '"""o"""', 'markersize': '(1)', 'linestyle': '"""None"""', 'label': '"""MSD"""'}), "(msd_time, MSD_T, color='g', marker='o', markersize=1, linestyle=\n 'None', label='MSD')\n", (10367, 10457), True, 'import matplotlib.pyplot as plt\n'), ((10458, 10482), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** -6, 10)'], {}), '((10 ** -6, 10))\n', (10466, 10482), True, 'import matplotlib.pyplot as plt\n'), ((10484, 10507), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -6)'}), '(ymin=10 ** -6)\n', (10492, 10507), True, 'import matplotlib.pyplot as plt\n'), ((10510, 10527), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (10520, 10527), True, 'import matplotlib.pyplot as plt\n'), ((10532, 10549), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (10542, 10549), True, 'import matplotlib.pyplot as plt\n'), ((10554, 10578), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (tau)"""'], {}), "('Time (tau)')\n", (10564, 10578), True, 'import matplotlib.pyplot as plt\n'), ((10583, 10600), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSD"""'], {}), "('MSD')\n", (10593, 10600), True, 'import matplotlib.pyplot as plt\n'), ((10605, 10633), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (10615, 10633), True, 'import matplotlib.pyplot as plt\n'), ((10638, 10693), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('MSD_total_' + plt_name + '.png')"], {'dpi': '(1000)'}), "('MSD_total_' + plt_name + '.png', dpi=1000)\n", (10649, 10693), True, 'import matplotlib.pyplot as plt\n'), ((10698, 10709), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10707, 10709), True, 'import matplotlib.pyplot as plt\n'), ((11001, 11100), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'MSD_TL'], {'color': '"""b"""', 'marker': '"""o"""', 'markersize': '(1)', 'linestyle': '"""None"""', 'label': '"""Liq"""'}), "(msd_time, MSD_TL, color='b', marker='o', markersize=1, linestyle=\n 'None', label='Liq')\n", (11009, 11100), True, 'import matplotlib.pyplot as plt\n'), ((11101, 11200), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'MSD_TG'], {'color': '"""r"""', 'marker': '"""o"""', 'markersize': '(1)', 'linestyle': '"""None"""', 'label': '"""Gas"""'}), "(msd_time, MSD_TG, color='r', marker='o', markersize=1, linestyle=\n 'None', label='Gas')\n", (11109, 11200), True, 'import matplotlib.pyplot as plt\n'), ((11201, 11225), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** -6, 10)'], {}), '((10 ** -6, 10))\n', (11209, 11225), True, 'import matplotlib.pyplot as plt\n'), ((11227, 11250), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -6)'}), '(ymin=10 ** -6)\n', (11235, 11250), True, 'import matplotlib.pyplot as plt\n'), ((11253, 11270), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (11263, 11270), True, 'import matplotlib.pyplot as plt\n'), ((11275, 11292), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (11285, 11292), True, 'import matplotlib.pyplot as plt\n'), ((11297, 11321), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (tau)"""'], {}), "('Time (tau)')\n", (11307, 11321), True, 'import matplotlib.pyplot as plt\n'), ((11326, 11343), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSD"""'], {}), "('MSD')\n", (11336, 11343), True, 'import matplotlib.pyplot as plt\n'), ((11348, 11376), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (11358, 11376), True, 'import matplotlib.pyplot as plt\n'), ((11381, 11433), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('MSD_LG_' + plt_name + '.png')"], {'dpi': '(1000)'}), "('MSD_LG_' + plt_name + '.png', dpi=1000)\n", (11392, 11433), True, 'import matplotlib.pyplot as plt\n'), ((11438, 11449), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11447, 11449), True, 'import matplotlib.pyplot as plt\n'), ((12042, 12140), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'MSD_T'], {'color': '"""g"""', 'marker': '"""s"""', 'markersize': '(3)', 'linestyle': '"""None"""', 'label': '"""MSD"""'}), "(msd_time, MSD_T, color='g', marker='s', markersize=3, linestyle=\n 'None', label='MSD')\n", (12050, 12140), True, 'import matplotlib.pyplot as plt\n'), ((12141, 12165), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** -6, 10)'], {}), '((10 ** -6, 10))\n', (12149, 12165), True, 'import matplotlib.pyplot as plt\n'), ((12167, 12190), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -6)'}), '(ymin=10 ** -6)\n', (12175, 12190), True, 'import matplotlib.pyplot as plt\n'), ((12193, 12210), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (12203, 12210), True, 'import matplotlib.pyplot as plt\n'), ((12215, 12232), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (12225, 12232), True, 'import matplotlib.pyplot as plt\n'), ((12237, 12261), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (tau)"""'], {}), "('Time (tau)')\n", (12247, 12261), True, 'import matplotlib.pyplot as plt\n'), ((12266, 12283), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSD"""'], {}), "('MSD')\n", (12276, 12283), True, 'import matplotlib.pyplot as plt\n'), ((12288, 12316), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (12298, 12316), True, 'import matplotlib.pyplot as plt\n'), ((12321, 12376), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('MSD_total_' + plt_name + '.png')"], {'dpi': '(1000)'}), "('MSD_total_' + plt_name + '.png', dpi=1000)\n", (12332, 12376), True, 'import matplotlib.pyplot as plt\n'), ((12381, 12392), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12390, 12392), True, 'import matplotlib.pyplot as plt\n'), ((12680, 12779), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'MSD_TL'], {'color': '"""b"""', 'marker': '"""s"""', 'markersize': '(3)', 'linestyle': '"""None"""', 'label': '"""Liq"""'}), "(msd_time, MSD_TL, color='b', marker='s', markersize=3, linestyle=\n 'None', label='Liq')\n", (12688, 12779), True, 'import matplotlib.pyplot as plt\n'), ((12780, 12879), 'matplotlib.pyplot.plot', 'plt.plot', (['msd_time', 'MSD_TG'], {'color': '"""r"""', 'marker': '"""o"""', 'markersize': '(3)', 'linestyle': '"""None"""', 'label': '"""Gas"""'}), "(msd_time, MSD_TG, color='r', marker='o', markersize=3, linestyle=\n 'None', label='Gas')\n", (12788, 12879), True, 'import matplotlib.pyplot as plt\n'), ((12880, 12904), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** -6, 10)'], {}), '((10 ** -6, 10))\n', (12888, 12904), True, 'import matplotlib.pyplot as plt\n'), ((12906, 12929), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -6)'}), '(ymin=10 ** -6)\n', (12914, 12929), True, 'import matplotlib.pyplot as plt\n'), ((12932, 12949), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (12942, 12949), True, 'import matplotlib.pyplot as plt\n'), ((12954, 12971), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (12964, 12971), True, 'import matplotlib.pyplot as plt\n'), ((12976, 13000), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (tau)"""'], {}), "('Time (tau)')\n", (12986, 13000), True, 'import matplotlib.pyplot as plt\n'), ((13039, 13056), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSD"""'], {}), "('MSD')\n", (13049, 13056), True, 'import matplotlib.pyplot as plt\n'), ((13061, 13089), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (13071, 13089), True, 'import matplotlib.pyplot as plt\n'), ((13094, 13146), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('MSD_LG_' + plt_name + '.png')"], {'dpi': '(1000)'}), "('MSD_LG_' + plt_name + '.png', dpi=1000)\n", (13105, 13146), True, 'import matplotlib.pyplot as plt\n'), ((13151, 13162), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13160, 13162), True, 'import matplotlib.pyplot as plt\n'), ((11500, 11552), 'numpy.transpose', 'np.transpose', (['[msd_time, GAS_A, GAS_B, LIQ_A, LIQ_B]'], {}), '([msd_time, GAS_A, GAS_B, LIQ_A, LIQ_B])\n', (11512, 11552), True, 'import numpy as np\n'), ((11604, 11651), 'numpy.transpose', 'np.transpose', (['[msd_time, MSD_T, MSD_TL, MSD_TG]'], {}), '([msd_time, MSD_T, MSD_TL, MSD_TG])\n', (11616, 11651), True, 'import numpy as np\n'), ((13214, 13261), 'numpy.transpose', 'np.transpose', (['[msd_time, MSD_T, MSD_TL, MSD_TG]'], {}), '([msd_time, MSD_T, MSD_TL, MSD_TG])\n', (13226, 13261), True, 'import numpy as np\n')] |
import unittest
from simplex_method import *
import numpy as np
from scipy import optimize
class TestSimplexMethod(unittest.TestCase):
def test_simplex_method(self):
m = np.array([[-2.0, 1.0, -10.0],
[1.0, 1.0, 20.0],
[-5.0, -10.0, 0.0]])
res = {'x0': 10.0, 'x1': 10.0, 'optimum': 150.0}
self.assertEqual(simplex_method(m, dictionary_output=True), res)
m = np.array([[-2.0, -5.0, -30.0],
[3.0, -5.0, -5.0],
[8.0, 3.0, 85.0],
[-9.0, 7.0, 42.0],
[-2.0, -7.0, 0.0]])
res = {'x0': 5.650602409638554, 'x1': 13.265060240963855, 'optimum': 104.1566265060241}
self.assertEqual(simplex_method(m, dictionary_output=True), res)
# y >= -x + 1
# y >= x + 1
m = np.array([[-1, -1, -1], [-1, 1, -1], [1, 0, 0]])
res = 1
self.assertEqual(simplex_method(m, unrestricted=True, dictionary_output=True)['optimum'], res)
# y >= -x + 2
# y >= x - 6
m = np.array([[-1, -1, -2], [-1, 1, 6], [1, 0, 0]])
res = -2
self.assertEqual(simplex_method(m, unrestricted=True, dictionary_output=True)['optimum'], res)
# y >= -x + 6
# y >= x - 2
m = np.array([[-1, -1, -6], [-1, 1, 2], [1, 0, 0]])
res = 2
A = np.array([[-1, -1], [-1, 1]])
b = np.array([-6, 2])
c = [1, 0]
print(optimize.linprog(c, A_ub=A, b_ub=b).get('x'))
print(simplex_method_scipy(m, unrestricted=True))
self.assertEqual(simplex_method(m, unrestricted=True, dictionary_output=True)['optimum'], res)
A = np.array([[-1, -1], [-1, 1]])
b = np.array([-2, 6])
c = [1, 0]
# print(optimize.linprog(c, A_ub=A, b_ub=b).get('x'))
m = np.array([[-1, -1, -1], [-1, 1, -1], [1, 0, 0]])
print(simplex_method_scipy(m, unrestricted=True))
def test_get_column(self):
m = np.array([[-2.0, 1.0, -10.0],
[1.0, 1.0, 20.0],
[-5.0, -10.0, 0.0]])
res = np.array([-10, 20, 0])
self.assertTrue(np.array_equal(get_column(m, -1), res))
def test_make_unrestricted_variables(self):
m = np.array([[-2.0, 1.0, -10.0],
[1.0, 1.0, 20.0],
[-5.0, -10.0, 0.0]])
res = [[-2.0, 2.0, 1.0, -1.0, -10.0],
[1.0, -1.0, 1.0, -1.0, 20.0],
[-1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, -1.0, 0.0],
[-5.0, 5.0, -10.0, 10.0, 0.0]]
self.assertEqual(make_unrestricted_variables(m).tolist(), res)
def test_reduce_variables(self):
m = [10, 20, 10, 30, 150]
res = [-10, -20, 150]
self.assertEqual(reduce_variables(m, 4), res)
def test_restricted_variables(self):
m = np.array([[-2.0, 1.0, -10.0],
[1.0, 1.0, 20.0],
[-5.0, -10.0, 0.0]])
self.assertTrue(np.array_equal(simplex_method(m, unrestricted=True), simplex_method(m, unrestricted=False)))
m = np.array([[-2.0, -5.0, -30.0],
[3.0, -5.0, -5.0],
[8.0, 3.0, 85.0],
[-9.0, 7.0, 42.0],
[-2.0, -7.0, 0.0]])
self.assertTrue(np.array_equal(simplex_method(m, unrestricted=True), simplex_method(m, unrestricted=False)))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"scipy.optimize.linprog"
] | [((3562, 3577), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3575, 3577), False, 'import unittest\n'), ((185, 253), 'numpy.array', 'np.array', (['[[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]]'], {}), '([[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]])\n', (193, 253), True, 'import numpy as np\n'), ((440, 551), 'numpy.array', 'np.array', (['[[-2.0, -5.0, -30.0], [3.0, -5.0, -5.0], [8.0, 3.0, 85.0], [-9.0, 7.0, 42.0\n ], [-2.0, -7.0, 0.0]]'], {}), '([[-2.0, -5.0, -30.0], [3.0, -5.0, -5.0], [8.0, 3.0, 85.0], [-9.0, \n 7.0, 42.0], [-2.0, -7.0, 0.0]])\n', (448, 551), True, 'import numpy as np\n'), ((859, 907), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 1, -1], [1, 0, 0]]'], {}), '([[-1, -1, -1], [-1, 1, -1], [1, 0, 0]])\n', (867, 907), True, 'import numpy as np\n'), ((1082, 1129), 'numpy.array', 'np.array', (['[[-1, -1, -2], [-1, 1, 6], [1, 0, 0]]'], {}), '([[-1, -1, -2], [-1, 1, 6], [1, 0, 0]])\n', (1090, 1129), True, 'import numpy as np\n'), ((1305, 1352), 'numpy.array', 'np.array', (['[[-1, -1, -6], [-1, 1, 2], [1, 0, 0]]'], {}), '([[-1, -1, -6], [-1, 1, 2], [1, 0, 0]])\n', (1313, 1352), True, 'import numpy as np\n'), ((1381, 1410), 'numpy.array', 'np.array', (['[[-1, -1], [-1, 1]]'], {}), '([[-1, -1], [-1, 1]])\n', (1389, 1410), True, 'import numpy as np\n'), ((1423, 1440), 'numpy.array', 'np.array', (['[-6, 2]'], {}), '([-6, 2])\n', (1431, 1440), True, 'import numpy as np\n'), ((1693, 1722), 'numpy.array', 'np.array', (['[[-1, -1], [-1, 1]]'], {}), '([[-1, -1], [-1, 1]])\n', (1701, 1722), True, 'import numpy as np\n'), ((1735, 1752), 'numpy.array', 'np.array', (['[-2, 6]'], {}), '([-2, 6])\n', (1743, 1752), True, 'import numpy as np\n'), ((1846, 1894), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 1, -1], [1, 0, 0]]'], {}), '([[-1, -1, -1], [-1, 1, -1], [1, 0, 0]])\n', (1854, 1894), True, 'import numpy as np\n'), ((1997, 2065), 'numpy.array', 'np.array', (['[[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]]'], {}), '([[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]])\n', (2005, 2065), True, 'import numpy as np\n'), ((2124, 2146), 'numpy.array', 'np.array', (['[-10, 20, 0]'], {}), '([-10, 20, 0])\n', (2132, 2146), True, 'import numpy as np\n'), ((2272, 2340), 'numpy.array', 'np.array', (['[[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]]'], {}), '([[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]])\n', (2280, 2340), True, 'import numpy as np\n'), ((2975, 3043), 'numpy.array', 'np.array', (['[[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]]'], {}), '([[-2.0, 1.0, -10.0], [1.0, 1.0, 20.0], [-5.0, -10.0, 0.0]])\n', (2983, 3043), True, 'import numpy as np\n'), ((3217, 3328), 'numpy.array', 'np.array', (['[[-2.0, -5.0, -30.0], [3.0, -5.0, -5.0], [8.0, 3.0, 85.0], [-9.0, 7.0, 42.0\n ], [-2.0, -7.0, 0.0]]'], {}), '([[-2.0, -5.0, -30.0], [3.0, -5.0, -5.0], [8.0, 3.0, 85.0], [-9.0, \n 7.0, 42.0], [-2.0, -7.0, 0.0]])\n', (3225, 3328), True, 'import numpy as np\n'), ((1474, 1509), 'scipy.optimize.linprog', 'optimize.linprog', (['c'], {'A_ub': 'A', 'b_ub': 'b'}), '(c, A_ub=A, b_ub=b)\n', (1490, 1509), False, 'from scipy import optimize\n')] |
# @author : <NAME>
import os
import math
import time
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
from fcn_model import FCN
from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split
param_config_file_name = os.path.join(os.getcwd(), "fcn_config.json")
# return the softmax layer
def get_softmax_layer(input_tensor, dim=-1, name="softmax"):
prediction = tf.nn.softmax(input_tensor, dim=dim, name=name)
return prediction
# return the sorensen-dice coefficient
def dice_loss(ground_truth, predicted_logits, dim=-1, smooth=1e-5, name="mean_dice_loss"):
predicted_probs = get_softmax_layer(input_tensor=predicted_logits, dim=dim)
intersection = tf.reduce_sum(tf.multiply(ground_truth, predicted_probs), axis=[1, 2, 3])
union = tf.reduce_sum(ground_truth, axis=[1, 2, 3]) + tf.reduce_sum(predicted_probs, axis=[1, 2, 3])
dice_coeff = (2. * intersection + smooth) / (union + smooth)
dice_loss = tf.reduce_mean(-tf.log(dice_coeff), name=name)
return dice_loss
# return cross entropy loss
def cross_entropy_loss(ground_truth, prediction, axis, name="mean_cross_entropy"):
mean_ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ground_truth, logits=prediction, dim=axis), name=name)
return mean_ce
# return the optimizer which has to be used to minimize the loss function
def get_optimizer(learning_rate, loss_function):
adam_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_function)
return adam_optimizer
# return the placeholder
def get_placeholders(img_placeholder_shape, mask_placeholder_shape):
# set the image placeholder
img_pl = tf.placeholder(tf.float32, shape=img_placeholder_shape)
# set the mask placeholder
mask_pl = tf.placeholder(tf.float32, shape=mask_placeholder_shape)
return (img_pl, mask_pl)
# save the trained model
def save_model(session, model_directory, model_file, epoch):
saver = tf.train.Saver()
saver.save(session, os.path.join(os.getcwd(), model_directory, model_file), global_step=(epoch + 1))
# start batch training of the network
def batch_train():
print("Reading the config file..................")
config = read_config_file(param_config_file_name)
model_to_use = config["model_to_use"]
print("Reading the config file completed........\n")
print("Initializing.............................")
model_directory = config["model_directory"][model_to_use] + str(config["num_epochs"])
init(model_directory)
print("Initializing completed...................\n")
print("Reading train data.......................")
all_images_list = os.listdir(config["inputs_path"])
train_valid_list, test_list = get_train_test_split(all_images_list, test_size=0.5)
train_list, valid_list = get_train_test_split(train_valid_list, test_size=0.04)
train_images, train_masks = get_data(train_list, config["inputs_path"], config["masks_path"], config["target_image_size"] + [config["num_classes"]])
valid_images, valid_masks = get_data(valid_list, config["inputs_path"], config["masks_path"], config["target_image_size"] + [config["num_classes"]])
print("Reading train data completed.............\n")
print("Preprocessing the data...................")
train_images = preprocess_images(train_images)
valid_images = preprocess_images(valid_images)
print("Preprocessing of the data completed......\n")
print("Building the network.....................")
axis = -1
if config["data_format"] == "channels_last":
img_pl_shape = [None] + config["target_image_size"] + [config["num_channels"]]
mask_pl_shape = [None] + config["target_image_size"] + [config["num_classes"]]
else:
img_pl_shape = [None] + [config["num_channels"]] + config["target_image_size"]
mask_pl_shape = [None] + [config["num_classes"]] + config["target_image_size"]
train_images = np.transpose(train_images, [0, 3, 1, 2])
train_masks = np.transpose(train_masks, [0, 3, 1, 2])
valid_images = np.transpose(valid_images, [0, 3, 1, 2])
valid_masks = np.transpose(valid_masks, [0, 3, 1, 2])
axis = 1
img_pl, mask_pl = get_placeholders(img_placeholder_shape=img_pl_shape, mask_placeholder_shape=mask_pl_shape)
fcn = FCN(config["vgg_path"], config["data_format"], config["num_classes"])
fcn.vgg_encoder(img_pl)
if model_to_use % 3 == 0:
fcn.fcn_8()
logits = fcn.logits
elif model_to_use % 3 == 1:
fcn.fcn_16()
logits = fcn.logits
else:
fcn.fcn_32()
logits = fcn.logits
if model_to_use == 0 or model_to_use == 1 or model_to_use == 2:
loss = cross_entropy_loss(mask_pl, logits, axis = axis)
elif model_to_use == 3 or model_to_use == 4 or model_to_use == 5:
loss = dice_loss(mask_pl, logits, dim = axis)
else:
loss_1 = dice_loss(mask_pl, logits, dim = axis)
loss_2 = cross_entropy_loss(mask_pl, logits, axis = axis)
loss = loss_1 + loss_2
optimizer = get_optimizer(config["learning_rate"], loss)
print("Building the network completed...........\n")
num_epochs = config["num_epochs"]
batch_size = config["batch_size"]
num_batches = int(math.ceil(train_images.shape[0] / float(batch_size)))
print(f"Train data shape : {train_images.shape}")
print(f"Train mask shape : {train_masks.shape}")
print(f"Validation data shape : {valid_images.shape}")
print(f"Validation mask shape : {valid_masks.shape}")
print(f"Number of epochs to train : {num_epochs}")
print(f"Batch size : {batch_size}")
print(f"Number of batches : {num_batches}\n")
print("Training the Network.....................")
ss = tf.Session()
ss.run(tf.global_variables_initializer())
train_loss_per_epoch = list()
valid_loss_per_epoch = list()
for epoch in range(num_epochs):
ti = time.time()
temp_loss_per_epoch = 0
train_images, train_masks = shuffle(train_images, train_masks)
for batch_id in range(num_batches):
batch_images = train_images[batch_id * batch_size : (batch_id + 1) * batch_size]
batch_masks = train_masks[batch_id * batch_size : (batch_id + 1) * batch_size]
_, loss_per_batch = ss.run([optimizer, loss],
feed_dict = {img_pl : batch_images, mask_pl : batch_masks})
temp_loss_per_epoch += (batch_images.shape[0] * loss_per_batch)
ti = time.time() - ti
loss_validation_set = ss.run(loss, feed_dict = {img_pl : valid_images, mask_pl : valid_masks})
train_loss_per_epoch.append(temp_loss_per_epoch)
valid_loss_per_epoch.append(loss_validation_set)
print(f"Epoch : {epoch+1} / {num_epochs} time taken : {ti:.4f} sec.")
print(f"Avg. training loss : {temp_loss_per_epoch / train_images.shape[0]:.4f}")
print(f"Avg. validation loss : {loss_validation_set:.4f}")
if (epoch + 1) % 25 == 0:
save_model(ss, model_directory, config["model_file"][model_to_use % 3], epoch)
print("Training the Network Completed...........\n")
print("Saving the model.........................")
save_model(ss, model_directory, config["model_file"][model_to_use % 3], epoch)
train_loss_per_epoch = np.array(train_loss_per_epoch)
valid_loss_per_epoch = np.array(valid_loss_per_epoch)
train_loss_per_epoch = np.true_divide(train_loss_per_epoch, train_images.shape[0])
losses_dict = dict()
losses_dict["train_loss"] = train_loss_per_epoch
losses_dict["valid_loss"] = valid_loss_per_epoch
np.save(os.path.join(os.getcwd(), model_directory, config["model_metrics"][model_to_use % 3]), (losses_dict))
np.save(os.path.join(os.getcwd(), model_directory, "train_list.npy"), np.array(train_list))
np.save(os.path.join(os.getcwd(), model_directory, "valid_list.npy"), np.array(valid_list))
print("Saving the model Completed...............\n")
ss.close()
def main():
batch_train()
if __name__ == "__main__":
main()
| [
"tensorflow.reduce_sum",
"fcn_model.FCN",
"tensorflow.multiply",
"numpy.array",
"tensorflow.nn.softmax",
"tensorflow.log",
"os.listdir",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.train.AdamOptimizer",
"fcn_utils.init",
"fcn... | [((293, 304), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (302, 304), False, 'import os\n'), ((431, 478), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['input_tensor'], {'dim': 'dim', 'name': 'name'}), '(input_tensor, dim=dim, name=name)\n', (444, 478), True, 'import tensorflow as tf\n'), ((1709, 1764), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'img_placeholder_shape'}), '(tf.float32, shape=img_placeholder_shape)\n', (1723, 1764), True, 'import tensorflow as tf\n'), ((1811, 1867), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'mask_placeholder_shape'}), '(tf.float32, shape=mask_placeholder_shape)\n', (1825, 1867), True, 'import tensorflow as tf\n'), ((1996, 2012), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2010, 2012), True, 'import tensorflow as tf\n'), ((2244, 2284), 'fcn_utils.read_config_file', 'read_config_file', (['param_config_file_name'], {}), '(param_config_file_name)\n', (2260, 2284), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((2534, 2555), 'fcn_utils.init', 'init', (['model_directory'], {}), '(model_directory)\n', (2538, 2555), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((2691, 2724), 'os.listdir', 'os.listdir', (["config['inputs_path']"], {}), "(config['inputs_path'])\n", (2701, 2724), False, 'import os\n'), ((2759, 2811), 'fcn_utils.get_train_test_split', 'get_train_test_split', (['all_images_list'], {'test_size': '(0.5)'}), '(all_images_list, test_size=0.5)\n', (2779, 2811), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((2841, 2895), 'fcn_utils.get_train_test_split', 'get_train_test_split', (['train_valid_list'], {'test_size': '(0.04)'}), '(train_valid_list, test_size=0.04)\n', (2861, 2895), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((2929, 3054), 'fcn_utils.get_data', 'get_data', (['train_list', "config['inputs_path']", "config['masks_path']", "(config['target_image_size'] + [config['num_classes']])"], {}), "(train_list, config['inputs_path'], config['masks_path'], config[\n 'target_image_size'] + [config['num_classes']])\n", (2937, 3054), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((3082, 3207), 'fcn_utils.get_data', 'get_data', (['valid_list', "config['inputs_path']", "config['masks_path']", "(config['target_image_size'] + [config['num_classes']])"], {}), "(valid_list, config['inputs_path'], config['masks_path'], config[\n 'target_image_size'] + [config['num_classes']])\n", (3090, 3207), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((3335, 3366), 'fcn_utils.preprocess_images', 'preprocess_images', (['train_images'], {}), '(train_images)\n', (3352, 3366), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((3386, 3417), 'fcn_utils.preprocess_images', 'preprocess_images', (['valid_images'], {}), '(valid_images)\n', (3403, 3417), False, 'from fcn_utils import init, read_config_file, get_data, preprocess_images, get_train_test_split\n'), ((4345, 4414), 'fcn_model.FCN', 'FCN', (["config['vgg_path']", "config['data_format']", "config['num_classes']"], {}), "(config['vgg_path'], config['data_format'], config['num_classes'])\n", (4348, 4414), False, 'from fcn_model import FCN\n'), ((5789, 5801), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5799, 5801), True, 'import tensorflow as tf\n'), ((7352, 7382), 'numpy.array', 'np.array', (['train_loss_per_epoch'], {}), '(train_loss_per_epoch)\n', (7360, 7382), True, 'import numpy as np\n'), ((7410, 7440), 'numpy.array', 'np.array', (['valid_loss_per_epoch'], {}), '(valid_loss_per_epoch)\n', (7418, 7440), True, 'import numpy as np\n'), ((7469, 7528), 'numpy.true_divide', 'np.true_divide', (['train_loss_per_epoch', 'train_images.shape[0]'], {}), '(train_loss_per_epoch, train_images.shape[0])\n', (7483, 7528), True, 'import numpy as np\n'), ((745, 787), 'tensorflow.multiply', 'tf.multiply', (['ground_truth', 'predicted_probs'], {}), '(ground_truth, predicted_probs)\n', (756, 787), True, 'import tensorflow as tf\n'), ((817, 860), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ground_truth'], {'axis': '[1, 2, 3]'}), '(ground_truth, axis=[1, 2, 3])\n', (830, 860), True, 'import tensorflow as tf\n'), ((863, 909), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['predicted_probs'], {'axis': '[1, 2, 3]'}), '(predicted_probs, axis=[1, 2, 3])\n', (876, 909), True, 'import tensorflow as tf\n'), ((1201, 1295), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'ground_truth', 'logits': 'prediction', 'dim': 'axis'}), '(labels=ground_truth, logits=\n prediction, dim=axis)\n', (1240, 1295), True, 'import tensorflow as tf\n'), ((3975, 4015), 'numpy.transpose', 'np.transpose', (['train_images', '[0, 3, 1, 2]'], {}), '(train_images, [0, 3, 1, 2])\n', (3987, 4015), True, 'import numpy as np\n'), ((4038, 4077), 'numpy.transpose', 'np.transpose', (['train_masks', '[0, 3, 1, 2]'], {}), '(train_masks, [0, 3, 1, 2])\n', (4050, 4077), True, 'import numpy as np\n'), ((4101, 4141), 'numpy.transpose', 'np.transpose', (['valid_images', '[0, 3, 1, 2]'], {}), '(valid_images, [0, 3, 1, 2])\n', (4113, 4141), True, 'import numpy as np\n'), ((4164, 4203), 'numpy.transpose', 'np.transpose', (['valid_masks', '[0, 3, 1, 2]'], {}), '(valid_masks, [0, 3, 1, 2])\n', (4176, 4203), True, 'import numpy as np\n'), ((5813, 5846), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5844, 5846), True, 'import tensorflow as tf\n'), ((5967, 5978), 'time.time', 'time.time', ([], {}), '()\n', (5976, 5978), False, 'import time\n'), ((6047, 6081), 'sklearn.utils.shuffle', 'shuffle', (['train_images', 'train_masks'], {}), '(train_images, train_masks)\n', (6054, 6081), False, 'from sklearn.utils import shuffle\n'), ((7850, 7870), 'numpy.array', 'np.array', (['train_list'], {}), '(train_list)\n', (7858, 7870), True, 'import numpy as np\n'), ((7946, 7966), 'numpy.array', 'np.array', (['valid_list'], {}), '(valid_list)\n', (7954, 7966), True, 'import numpy as np\n'), ((1008, 1026), 'tensorflow.log', 'tf.log', (['dice_coeff'], {}), '(dice_coeff)\n', (1014, 1026), True, 'import tensorflow as tf\n'), ((1467, 1518), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1489, 1518), True, 'import tensorflow as tf\n'), ((2050, 2061), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2059, 2061), False, 'import os\n'), ((6534, 6545), 'time.time', 'time.time', ([], {}), '()\n', (6543, 6545), False, 'import time\n'), ((7687, 7698), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7696, 7698), False, 'import os\n'), ((7801, 7812), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7810, 7812), False, 'import os\n'), ((7897, 7908), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7906, 7908), False, 'import os\n')] |
# The MIT License (MIT)
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from matplotlib import pyplot as plt
from collections import deque
from threading import Lock, Thread
import myo
import numpy as np
class EmgCollector(myo.DeviceListener):
"""
Collects EMG data in a queue with *n* maximum number of elements.
"""
def __init__(self, n):
self.n = n
self.lock = Lock()
self.emg_data_queue = deque(maxlen=n)
def get_emg_data(self):
with self.lock:
return list(self.emg_data_queue)
# myo.DeviceListener
def on_connected(self, event):
event.device.stream_emg(True)
def on_emg(self, event):
with self.lock:
self.emg_data_queue.append((event.timestamp, event.emg))
class Plot(object):
def __init__(self, listener):
self.n = listener.n
self.listener = listener
self.fig = plt.figure()
self.axes = [self.fig.add_subplot('81' + str(i)) for i in range(1, 9)]
[(ax.set_ylim([-100, 100])) for ax in self.axes]
self.graphs = [ax.plot(np.arange(self.n), np.zeros(self.n))[0] for ax in self.axes]
plt.ion()
def update_plot(self):
emg_data = self.listener.get_emg_data()
emg_data = np.array([x[1] for x in emg_data]).T
for g, data in zip(self.graphs, emg_data):
if len(data) < self.n:
# Fill the left side with zeroes.
data = np.concatenate([np.zeros(self.n - len(data)), data])
g.set_ydata(data)
plt.draw()
def main(self):
while True:
self.update_plot()
plt.pause(1.0 / 30)
def main():
### enter the path to your own MyoSDK package and .dll file here. Download
# with Nuget @ https://www.nuget.org/packages/MyoSDK/2.1.0 and insert .dll file within
# /bin folder if required.
myo.init(sdk_path="C:\\Users\\dicke\\packages\\MyoSDK.2.1.0")
hub = myo.Hub()
listener = EmgCollector(512)
with hub.run_in_background(listener.on_event):
Plot(listener).main()
if __name__ == '__main__':
main()
| [
"collections.deque",
"threading.Lock",
"numpy.array",
"matplotlib.pyplot.figure",
"myo.Hub",
"numpy.zeros",
"myo.init",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause",
"numpy.arange",
"matplotlib.pyplot.ion"
] | [((2773, 2834), 'myo.init', 'myo.init', ([], {'sdk_path': '"""C:\\\\Users\\\\dicke\\\\packages\\\\MyoSDK.2.1.0"""'}), "(sdk_path='C:\\\\Users\\\\dicke\\\\packages\\\\MyoSDK.2.1.0')\n", (2781, 2834), False, 'import myo\n'), ((2843, 2852), 'myo.Hub', 'myo.Hub', ([], {}), '()\n', (2850, 2852), False, 'import myo\n'), ((1424, 1430), 'threading.Lock', 'Lock', ([], {}), '()\n', (1428, 1430), False, 'from threading import Lock, Thread\n'), ((1457, 1472), 'collections.deque', 'deque', ([], {'maxlen': 'n'}), '(maxlen=n)\n', (1462, 1472), False, 'from collections import deque\n'), ((1885, 1897), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1895, 1897), True, 'from matplotlib import pyplot as plt\n'), ((2118, 2127), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2125, 2127), True, 'from matplotlib import pyplot as plt\n'), ((2464, 2474), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2472, 2474), True, 'from matplotlib import pyplot as plt\n'), ((2213, 2247), 'numpy.array', 'np.array', (['[x[1] for x in emg_data]'], {}), '([x[1] for x in emg_data])\n', (2221, 2247), True, 'import numpy as np\n'), ((2541, 2560), 'matplotlib.pyplot.pause', 'plt.pause', (['(1.0 / 30)'], {}), '(1.0 / 30)\n', (2550, 2560), True, 'from matplotlib import pyplot as plt\n'), ((2053, 2070), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (2062, 2070), True, 'import numpy as np\n'), ((2072, 2088), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (2080, 2088), True, 'import numpy as np\n')] |
import math
import numpy as np
from knn_robustness.utils import top_k_min_indices
from knn_robustness.utils import KnnPredictor
from knn_robustness.utils import QpSolver
class ExactSolver:
def __init__(
self, X_train, y_train, qp_solver: QpSolver,
n_pos_for_screen, bounded, upper=1., lower=0.
):
self._X_train = X_train
self._y_train = y_train
self._qp_solver = qp_solver
self._n_pos_for_screen = n_pos_for_screen
self._bounded = bounded
self._upper = upper
self._lower = lower
self._predictor = KnnPredictor(X_train, y_train, n_neighbors=1)
def predict_batch(self, X_eval):
return self._predictor.predict_batch(X_eval)
def predict_individual(self, x_eval):
return self._predictor.predict_individual(x_eval)
def __call__(self, x_eval):
X_pos, X_neg = self._partition(x_eval)
X_screen = self._compute_pos_for_screen(x_eval, X_pos)
inner_product_pos = X_pos @ X_pos.T
best_perturbation = None
min_perturbation_norm = math.inf
for x_neg in self._neg_generator(x_eval, X_neg):
if self._screenable(
x_eval, x_neg, X_screen, min_perturbation_norm
):
continue
else:
perturbation = self._solve_subproblem(
x_eval, x_neg, X_pos, inner_product_pos
)
perturbation_norm = np.linalg.norm(perturbation)
if perturbation_norm < min_perturbation_norm:
min_perturbation_norm = perturbation_norm
best_perturbation = perturbation
return best_perturbation
def _partition(self, x_eval):
y_pred = self.predict_individual(x_eval)
mask = (self._y_train == y_pred)
X_pos = self._X_train[mask]
X_neg = self._X_train[~mask]
return X_pos, X_neg
def _compute_pos_for_screen(self, x_eval, X_pos):
indices = top_k_min_indices(
np.linalg.norm(x_eval - X_pos, axis=1),
self._n_pos_for_screen
)
return X_pos[indices]
def _neg_generator(self, x_eval, X_neg):
indices = np.argsort(
np.linalg.norm(
X_neg - x_eval, axis=1
)
)
for i in indices:
yield X_neg[i]
def _screenable(self, x_eval, x_neg, X_screen, threshold):
return threshold <= np.max(
np.maximum(
np.sum(
np.multiply(
2 * x_eval - X_screen - x_neg, X_screen - x_neg
),
axis=1
),
0
) / (2 * np.linalg.norm(X_screen - x_neg, axis=1))
)
def _solve_subproblem(self, x_eval, x_neg, X_pos, inner_product_pos=None):
A, b, Q = self._compute_qp_params(
x_eval, x_neg, X_pos, inner_product_pos
)
lamda = self._qp_solver(Q, b)
return -A.T @ lamda
def _compute_qp_params(
self, x_eval, x_neg, X_pos, inner_product_pos
):
if inner_product_pos is None:
inner_product_pos = X_pos @ X_pos.T
# A @ u <= b
A = 2 * (X_pos - x_neg)
# test: this one is much more efficient due to less multiplications
b = np.sum(np.multiply(X_pos + x_neg - 2 * x_eval,
X_pos - x_neg), axis=1)
# X @ y
temp = X_pos @ x_neg
# A @ A.T = 4 * (X @ X.T - X @ y - (X @ y).T + y.T @ y)
Q = 4 * (inner_product_pos - temp[np.newaxis, :]
- temp[:, np.newaxis] + x_neg @ x_neg)
# min 0.5 * v.T @ P @ v + v.T @ b, v >= 0
# max - 0.5 * v.T @ P @ v - v.T @ b, v >= 0
if not self._bounded:
return A, b, Q
else:
# upper bound
# A1 @ delta <= b1
# z + delta <= upper
A1 = np.identity(X_pos.shape[1], dtype=X_pos.dtype)
b1 = self._upper - x_eval
# lower bound
# A2 @ delta <= b2
# z + delta >= lower
A2 = -np.identity(X_pos.shape[1], dtype=X_pos.dtype)
b2 = x_eval - self._lower
# A_full @ A_full.T
Q_full = np.block([
[Q, A, -A],
[A.T, A1, A2],
[-A.T, A2, A1],
])
A_full = np.block([
[A],
[A1],
[A2]
])
b_full = np.concatenate([b, b1, b2])
return A_full, b_full, Q_full
| [
"numpy.identity",
"numpy.block",
"numpy.multiply",
"knn_robustness.utils.KnnPredictor",
"numpy.concatenate",
"numpy.linalg.norm"
] | [((599, 644), 'knn_robustness.utils.KnnPredictor', 'KnnPredictor', (['X_train', 'y_train'], {'n_neighbors': '(1)'}), '(X_train, y_train, n_neighbors=1)\n', (611, 644), False, 'from knn_robustness.utils import KnnPredictor\n'), ((2054, 2092), 'numpy.linalg.norm', 'np.linalg.norm', (['(x_eval - X_pos)'], {'axis': '(1)'}), '(x_eval - X_pos, axis=1)\n', (2068, 2092), True, 'import numpy as np\n'), ((2257, 2295), 'numpy.linalg.norm', 'np.linalg.norm', (['(X_neg - x_eval)'], {'axis': '(1)'}), '(X_neg - x_eval, axis=1)\n', (2271, 2295), True, 'import numpy as np\n'), ((3384, 3438), 'numpy.multiply', 'np.multiply', (['(X_pos + x_neg - 2 * x_eval)', '(X_pos - x_neg)'], {}), '(X_pos + x_neg - 2 * x_eval, X_pos - x_neg)\n', (3395, 3438), True, 'import numpy as np\n'), ((3985, 4031), 'numpy.identity', 'np.identity', (['X_pos.shape[1]'], {'dtype': 'X_pos.dtype'}), '(X_pos.shape[1], dtype=X_pos.dtype)\n', (3996, 4031), True, 'import numpy as np\n'), ((4318, 4371), 'numpy.block', 'np.block', (['[[Q, A, -A], [A.T, A1, A2], [-A.T, A2, A1]]'], {}), '([[Q, A, -A], [A.T, A1, A2], [-A.T, A2, A1]])\n', (4326, 4371), True, 'import numpy as np\n'), ((4457, 4484), 'numpy.block', 'np.block', (['[[A], [A1], [A2]]'], {}), '([[A], [A1], [A2]])\n', (4465, 4484), True, 'import numpy as np\n'), ((4569, 4596), 'numpy.concatenate', 'np.concatenate', (['[b, b1, b2]'], {}), '([b, b1, b2])\n', (4583, 4596), True, 'import numpy as np\n'), ((1484, 1512), 'numpy.linalg.norm', 'np.linalg.norm', (['perturbation'], {}), '(perturbation)\n', (1498, 1512), True, 'import numpy as np\n'), ((4179, 4225), 'numpy.identity', 'np.identity', (['X_pos.shape[1]'], {'dtype': 'X_pos.dtype'}), '(X_pos.shape[1], dtype=X_pos.dtype)\n', (4190, 4225), True, 'import numpy as np\n'), ((2750, 2790), 'numpy.linalg.norm', 'np.linalg.norm', (['(X_screen - x_neg)'], {'axis': '(1)'}), '(X_screen - x_neg, axis=1)\n', (2764, 2790), True, 'import numpy as np\n'), ((2557, 2617), 'numpy.multiply', 'np.multiply', (['(2 * x_eval - X_screen - x_neg)', '(X_screen - x_neg)'], {}), '(2 * x_eval - X_screen - x_neg, X_screen - x_neg)\n', (2568, 2617), True, 'import numpy as np\n')] |
import numpy as np
from sdcit.hsic import HSIC
from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes
def FCIT_noniid_K(Kx, Ky, cond_Kx, cond_Ky, Kz=None, seed=None):
if seed is not None:
np.random.seed(seed)
RX1 = residual_kernel(Kx, cond_Kx)
RY1 = residual_kernel(Ky, cond_Ky)
return FCIT_K(RX1, RY1, Kz)
def FCIT_noniid(X, Y, Cond_X, Cond_Y, Z=None, seed=None):
"""Flaxman et al. Residualization-based CI Test, X_||_Y | Z
References
----------
<NAME>., <NAME>., & <NAME>. (2016). Gaussian Processes for Independence Tests with Non-iid Data in Causal Inference.
ACM Transactions on Intelligent Systems and Technology, 7(2), 1–23.
"""
if seed is not None:
np.random.seed(seed)
RX1 = residualize(X, Cond_X)
RY1 = residualize(Y, Cond_Y)
return FCIT(RX1, RY1, Z)
def FCIT_K(Kx, Ky, Kz=None, use_expectation=True, with_gp=True, sigma_squared=1e-3, seed=None, hsic_kws=None):
if seed is not None:
np.random.seed(seed)
if hsic_kws is None:
hsic_kws = {}
if Kz is None:
return HSIC(Kx, Ky, **hsic_kws)
RX_Z = residual_kernel(Kx, Kz, use_expectation=use_expectation, with_gp=with_gp, sigma_squared=sigma_squared)
RY_Z = residual_kernel(Ky, Kz, use_expectation=use_expectation, with_gp=with_gp, sigma_squared=sigma_squared)
return HSIC(RX_Z, RY_Z, **hsic_kws)
def FCIT(X, Y, Z=None, kern=rbf_kernel_median, normalize=False, seed=None, hsic_kws=None):
"""Flaxman et al. Residualization-based CI Test, X_||_Y | Z
References
----------
<NAME>., <NAME>., & <NAME>. (2016). Gaussian Processes for Independence Tests with Non-iid Data in Causal Inference.
ACM Transactions on Intelligent Systems and Technology, 7(2), 1–23.
"""
if seed is not None:
np.random.seed(seed)
if hsic_kws is None:
hsic_kws = {}
if normalize:
X, Y, Z = columnwise_normalizes(X, Y, Z)
if Z is None:
return HSIC(kern(X), kern(Y), **hsic_kws)
e_YZ = residualize(Y, Z)
e_XZ = residualize(X, Z)
if normalize:
e_XZ, e_YZ = columnwise_normalizes(e_XZ, e_YZ)
return HSIC(kern(e_XZ), kern(e_YZ), **hsic_kws)
| [
"sdcit.utils.columnwise_normalizes",
"sdcit.utils.residual_kernel",
"numpy.random.seed",
"sdcit.hsic.HSIC",
"sdcit.utils.residualize"
] | [((275, 303), 'sdcit.utils.residual_kernel', 'residual_kernel', (['Kx', 'cond_Kx'], {}), '(Kx, cond_Kx)\n', (290, 303), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((314, 342), 'sdcit.utils.residual_kernel', 'residual_kernel', (['Ky', 'cond_Ky'], {}), '(Ky, cond_Ky)\n', (329, 342), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((797, 819), 'sdcit.utils.residualize', 'residualize', (['X', 'Cond_X'], {}), '(X, Cond_X)\n', (808, 819), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((830, 852), 'sdcit.utils.residualize', 'residualize', (['Y', 'Cond_Y'], {}), '(Y, Cond_Y)\n', (841, 852), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((1170, 1276), 'sdcit.utils.residual_kernel', 'residual_kernel', (['Kx', 'Kz'], {'use_expectation': 'use_expectation', 'with_gp': 'with_gp', 'sigma_squared': 'sigma_squared'}), '(Kx, Kz, use_expectation=use_expectation, with_gp=with_gp,\n sigma_squared=sigma_squared)\n', (1185, 1276), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((1284, 1390), 'sdcit.utils.residual_kernel', 'residual_kernel', (['Ky', 'Kz'], {'use_expectation': 'use_expectation', 'with_gp': 'with_gp', 'sigma_squared': 'sigma_squared'}), '(Ky, Kz, use_expectation=use_expectation, with_gp=with_gp,\n sigma_squared=sigma_squared)\n', (1299, 1390), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((1399, 1427), 'sdcit.hsic.HSIC', 'HSIC', (['RX_Z', 'RY_Z'], {}), '(RX_Z, RY_Z, **hsic_kws)\n', (1403, 1427), False, 'from sdcit.hsic import HSIC\n'), ((2068, 2085), 'sdcit.utils.residualize', 'residualize', (['Y', 'Z'], {}), '(Y, Z)\n', (2079, 2085), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((2097, 2114), 'sdcit.utils.residualize', 'residualize', (['X', 'Z'], {}), '(X, Z)\n', (2108, 2114), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((243, 263), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (257, 263), True, 'import numpy as np\n'), ((765, 785), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (779, 785), True, 'import numpy as np\n'), ((1029, 1049), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1043, 1049), True, 'import numpy as np\n'), ((1133, 1157), 'sdcit.hsic.HSIC', 'HSIC', (['Kx', 'Ky'], {}), '(Kx, Ky, **hsic_kws)\n', (1137, 1157), False, 'from sdcit.hsic import HSIC\n'), ((1850, 1870), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1864, 1870), True, 'import numpy as np\n'), ((1956, 1986), 'sdcit.utils.columnwise_normalizes', 'columnwise_normalizes', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (1977, 1986), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n'), ((2155, 2188), 'sdcit.utils.columnwise_normalizes', 'columnwise_normalizes', (['e_XZ', 'e_YZ'], {}), '(e_XZ, e_YZ)\n', (2176, 2188), False, 'from sdcit.utils import rbf_kernel_median, residual_kernel, residualize, columnwise_normalizes\n')] |
"""Contains a batch class to segmentation task."""
import numpy as np
from PIL import Image
from batchflow import ImagesBatch, action, inbatch_parallel
class MnistBatch(ImagesBatch):
"""
Batch can create images with specified size and noisy with parts of other digits.
"""
components = 'images', 'labels', 'masks', 'big_img'
@inbatch_parallel(init='indices')
def _paste_img(self, ix, coord, size, src, dst):
img = self.get(ix, src) if src != 'labels' else Image.new('L', (28, 28), color=1)
img.thumbnail(size, Image.ANTIALIAS)
new_img = Image.new("L", size, "black")
i = self.get_pos(None, src, ix)
new_img.paste(img, [*coord[i]])
getattr(self, dst)[i] = new_img
@action
def create_big_img(self, coord, shape, src='images', dst='big_img'):
"""Creation image with specified size and put the image from batch to a random place.
Parameters
----------
coord : list with size 2
x and y cooridates of image position
shape : int or list
crated image's shape
src : str
component's name from which the data will be obtained
dst : str
component's name into which the data will be stored
Returns
-------
self
"""
shape = [shape]*2 if isinstance(shape, int) else shape
for sour, dest in zip(src, dst):
if getattr(self, dest) is None:
setattr(self, dest, np.array([None] * len(self.index)))
self._paste_img(coord, shape, sour, dest)
return self
@action
@inbatch_parallel(init='indices')
def add_noize(self, ix, num_parts=80, src='big_img', dst='big_img'):
"""Adding the parts of numbers to the image.
Parameters
----------
num_parts : int
The number of the image's parts
src : str
component's name from which the data will be obtained
dst : str
component's name into which the data will be stored
"""
img = self.get(ix, src)
ind = np.random.choice(self.indices, num_parts)
hight, width = np.random.randint(1, 5, 2)
coord_f = np.array([np.random.randint(10, 28-hight, len(ind)), \
np.random.randint(10, 28-width, len(ind))])
coord_t = np.array([np.random.randint(0, img.size[0]-hight, len(ind)), \
np.random.randint(0, img.size[1]-width, len(ind))])
for i, num_img in enumerate(np.array(ind)):
crop_img = self.get(num_img, 'images')
crop_img = crop_img.crop([coord_f[0][i], coord_f[1][i], \
coord_f[0][i]+hight, coord_f[1][i]+width])
img.paste(crop_img, list(coord_t[:, i]))
local_ix = self.get_pos(None, dst, ix)
getattr(self, dst)[local_ix] = img
| [
"numpy.random.choice",
"PIL.Image.new",
"batchflow.inbatch_parallel",
"numpy.array",
"numpy.random.randint"
] | [((350, 382), 'batchflow.inbatch_parallel', 'inbatch_parallel', ([], {'init': '"""indices"""'}), "(init='indices')\n", (366, 382), False, 'from batchflow import ImagesBatch, action, inbatch_parallel\n'), ((1640, 1672), 'batchflow.inbatch_parallel', 'inbatch_parallel', ([], {'init': '"""indices"""'}), "(init='indices')\n", (1656, 1672), False, 'from batchflow import ImagesBatch, action, inbatch_parallel\n'), ((589, 618), 'PIL.Image.new', 'Image.new', (['"""L"""', 'size', '"""black"""'], {}), "('L', size, 'black')\n", (598, 618), False, 'from PIL import Image\n'), ((2129, 2170), 'numpy.random.choice', 'np.random.choice', (['self.indices', 'num_parts'], {}), '(self.indices, num_parts)\n', (2145, 2170), True, 'import numpy as np\n'), ((2194, 2220), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)', '(2)'], {}), '(1, 5, 2)\n', (2211, 2220), True, 'import numpy as np\n'), ((492, 525), 'PIL.Image.new', 'Image.new', (['"""L"""', '(28, 28)'], {'color': '(1)'}), "('L', (28, 28), color=1)\n", (501, 525), False, 'from PIL import Image\n'), ((2565, 2578), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (2573, 2578), True, 'import numpy as np\n')] |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
import dice_rl.environments.gridworld.navigation as navigation
import dice_rl.environments.gridworld.tree as tree
import dice_rl.environments.gridworld.taxi as taxi
from dice_rl.estimators import estimator as estimator_lib
from dice_rl.google.rl_algos.tabular_saddle_point import TabularSaddlePoint
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_agents_onpolicy_dataset import TFAgentsOnpolicyDataset
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'grid', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 500,
'Number of trajectories to collect.')
flags.DEFINE_integer('max_trajectory_length', 20,
'Cutoff trajectory at this step.')
flags.DEFINE_float('alpha', -1.0,
'How close to target policy.')
flags.DEFINE_bool('tabular_obs', True,
'Whether to use tabular observations.')
flags.DEFINE_string('load_dir', '/cns/vz-d/home/brain-ofirnachum',
'Directory to load dataset from.')
flags.DEFINE_float('gamma', 0.95,
'Discount factor.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate.')
flags.DEFINE_integer('num_steps', 100000, 'Number of training steps.')
flags.DEFINE_integer('batch_size', 1024, 'Batch size.')
def get_onpolicy_dataset(env_name, tabular_obs, policy_fn, policy_info_spec):
"""Gets target policy."""
if env_name == 'taxi':
env = taxi.Taxi(tabular_obs=tabular_obs)
elif env_name == 'grid':
env = navigation.GridWalk(tabular_obs=tabular_obs)
elif env_name == 'tree':
env = tree.Tree(branching=2, depth=10)
else:
raise ValueError('Unknown environment: %s.' % env_name)
tf_env = tf_py_environment.TFPyEnvironment(
gym_wrapper.GymWrapper(env))
tf_policy = common_utils.TFAgentsWrappedPolicy(
tf_env.time_step_spec(), tf_env.action_spec(),
policy_fn, policy_info_spec,
emit_log_probability=True)
return TFAgentsOnpolicyDataset(tf_env, tf_policy)
def main(argv):
env_name = FLAGS.env_name
seed = FLAGS.seed
tabular_obs = FLAGS.tabular_obs
num_trajectory = FLAGS.num_trajectory
max_trajectory_length = FLAGS.max_trajectory_length
alpha = FLAGS.alpha
load_dir = FLAGS.load_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.
learning_rate = FLAGS.learning_rate
num_steps = FLAGS.num_steps
batch_size = FLAGS.batch_size
hparam_str = ('{ENV_NAME}_tabular{TAB}_alpha{ALPHA}_seed{SEED}_'
'numtraj{NUM_TRAJ}_maxtraj{MAX_TRAJ}').format(
ENV_NAME=env_name,
TAB=tabular_obs,
ALPHA=alpha,
SEED=seed,
NUM_TRAJ=num_trajectory,
MAX_TRAJ=max_trajectory_length)
directory = os.path.join(load_dir, hparam_str)
print('Loading dataset.')
dataset = Dataset.load(directory)
all_steps = dataset.get_all_steps()
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
estimate = estimator_lib.get_fullbatch_average(dataset, gamma=gamma)
print('data per step avg', estimate)
optimizer = tf.keras.optimizers.Adam(learning_rate)
algo = TabularSaddlePoint(
dataset.spec, optimizer,
gamma=gamma)
losses = []
for step in range(num_steps):
init_batch, _ = dataset.get_episode(batch_size, truncate_episode_at=1)
init_batch = tf.nest.map_structure(lambda t: t[:, 0, ...], init_batch)
batch = dataset.get_step(batch_size, num_steps=2)
loss, policy_loss = algo.train_step(init_batch, batch)
losses.append(loss)
if step % 100 == 0 or step == num_steps - 1:
print('step', step, 'loss', np.mean(losses, 0))
losses = []
policy_fn, policy_info_spec = algo.get_policy()
onpolicy_data = get_onpolicy_dataset(env_name, tabular_obs,
policy_fn, policy_info_spec)
onpolicy_episodes, _ = onpolicy_data.get_episode(
10, truncate_episode_at=40)
print('estimated per step avg', np.mean(onpolicy_episodes.reward))
print('Done!')
if __name__ == '__main__':
app.run(main)
| [
"tensorflow.compat.v2.compat.v1.enable_v2_behavior",
"numpy.mean",
"absl.flags.DEFINE_bool",
"tensorflow.compat.v2.keras.optimizers.Adam",
"dice_rl.google.rl_algos.tabular_saddle_point.TabularSaddlePoint",
"absl.flags.DEFINE_integer",
"tf_agents.environments.gym_wrapper.GymWrapper",
"dice_rl.estimator... | [((794, 827), 'tensorflow.compat.v2.compat.v1.enable_v2_behavior', 'tf.compat.v1.enable_v2_behavior', ([], {}), '()\n', (825, 827), True, 'import tensorflow.compat.v2 as tf\n'), ((1510, 1570), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""env_name"""', '"""grid"""', '"""Environment name."""'], {}), "('env_name', 'grid', 'Environment name.')\n", (1529, 1570), False, 'from absl import flags\n'), ((1571, 1626), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""', '(0)', '"""Initial random seed."""'], {}), "('seed', 0, 'Initial random seed.')\n", (1591, 1626), False, 'from absl import flags\n'), ((1627, 1712), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_trajectory"""', '(500)', '"""Number of trajectories to collect."""'], {}), "('num_trajectory', 500,\n 'Number of trajectories to collect.')\n", (1647, 1712), False, 'from absl import flags\n'), ((1730, 1818), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_trajectory_length"""', '(20)', '"""Cutoff trajectory at this step."""'], {}), "('max_trajectory_length', 20,\n 'Cutoff trajectory at this step.')\n", (1750, 1818), False, 'from absl import flags\n'), ((1836, 1900), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""alpha"""', '(-1.0)', '"""How close to target policy."""'], {}), "('alpha', -1.0, 'How close to target policy.')\n", (1854, 1900), False, 'from absl import flags\n'), ((1920, 1998), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""tabular_obs"""', '(True)', '"""Whether to use tabular observations."""'], {}), "('tabular_obs', True, 'Whether to use tabular observations.')\n", (1937, 1998), False, 'from absl import flags\n'), ((2017, 2122), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""load_dir"""', '"""/cns/vz-d/home/brain-ofirnachum"""', '"""Directory to load dataset from."""'], {}), "('load_dir', '/cns/vz-d/home/brain-ofirnachum',\n 'Directory to load dataset from.')\n", (2036, 2122), False, 'from absl import flags\n'), ((2139, 2192), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""gamma"""', '(0.95)', '"""Discount factor."""'], {}), "('gamma', 0.95, 'Discount factor.')\n", (2157, 2192), False, 'from absl import flags\n'), ((2212, 2272), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(0.001)', '"""Learning rate."""'], {}), "('learning_rate', 0.001, 'Learning rate.')\n", (2230, 2272), False, 'from absl import flags\n'), ((2273, 2343), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_steps"""', '(100000)', '"""Number of training steps."""'], {}), "('num_steps', 100000, 'Number of training steps.')\n", (2293, 2343), False, 'from absl import flags\n'), ((2344, 2399), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""', '(1024)', '"""Batch size."""'], {}), "('batch_size', 1024, 'Batch size.')\n", (2364, 2399), False, 'from absl import flags\n'), ((3061, 3103), 'dice_rl.data.tf_agents_onpolicy_dataset.TFAgentsOnpolicyDataset', 'TFAgentsOnpolicyDataset', (['tf_env', 'tf_policy'], {}), '(tf_env, tf_policy)\n', (3084, 3103), False, 'from dice_rl.data.tf_agents_onpolicy_dataset import TFAgentsOnpolicyDataset\n'), ((3877, 3911), 'os.path.join', 'os.path.join', (['load_dir', 'hparam_str'], {}), '(load_dir, hparam_str)\n', (3889, 3911), False, 'import os\n'), ((3952, 3975), 'dice_rl.data.dataset.Dataset.load', 'Dataset.load', (['directory'], {}), '(directory)\n', (3964, 3975), False, 'from dice_rl.data.dataset import Dataset, EnvStep, StepType\n'), ((4252, 4309), 'dice_rl.estimators.estimator.get_fullbatch_average', 'estimator_lib.get_fullbatch_average', (['dataset'], {'gamma': 'gamma'}), '(dataset, gamma=gamma)\n', (4287, 4309), True, 'from dice_rl.estimators import estimator as estimator_lib\n'), ((4364, 4403), 'tensorflow.compat.v2.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (4388, 4403), True, 'import tensorflow.compat.v2 as tf\n'), ((4413, 4469), 'dice_rl.google.rl_algos.tabular_saddle_point.TabularSaddlePoint', 'TabularSaddlePoint', (['dataset.spec', 'optimizer'], {'gamma': 'gamma'}), '(dataset.spec, optimizer, gamma=gamma)\n', (4431, 4469), False, 'from dice_rl.google.rl_algos.tabular_saddle_point import TabularSaddlePoint\n'), ((5346, 5359), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (5353, 5359), False, 'from absl import app\n'), ((2543, 2577), 'dice_rl.environments.gridworld.taxi.Taxi', 'taxi.Taxi', ([], {'tabular_obs': 'tabular_obs'}), '(tabular_obs=tabular_obs)\n', (2552, 2577), True, 'import dice_rl.environments.gridworld.taxi as taxi\n'), ((2851, 2878), 'tf_agents.environments.gym_wrapper.GymWrapper', 'gym_wrapper.GymWrapper', (['env'], {}), '(env)\n', (2873, 2878), False, 'from tf_agents.environments import gym_wrapper\n'), ((4622, 4679), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[:, 0, ...])', 'init_batch'], {}), '(lambda t: t[:, 0, ...], init_batch)\n', (4643, 4679), True, 'import tensorflow.compat.v2 as tf\n'), ((2615, 2659), 'dice_rl.environments.gridworld.navigation.GridWalk', 'navigation.GridWalk', ([], {'tabular_obs': 'tabular_obs'}), '(tabular_obs=tabular_obs)\n', (2634, 2659), True, 'import dice_rl.environments.gridworld.navigation as navigation\n'), ((2697, 2729), 'dice_rl.environments.gridworld.tree.Tree', 'tree.Tree', ([], {'branching': '(2)', 'depth': '(10)'}), '(branching=2, depth=10)\n', (2706, 2729), True, 'import dice_rl.environments.gridworld.tree as tree\n'), ((4900, 4918), 'numpy.mean', 'np.mean', (['losses', '(0)'], {}), '(losses, 0)\n', (4907, 4918), True, 'import numpy as np\n'), ((5262, 5295), 'numpy.mean', 'np.mean', (['onpolicy_episodes.reward'], {}), '(onpolicy_episodes.reward)\n', (5269, 5295), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal
from landlab import FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY, INACTIVE_LINK, \
FIXED_LINK
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid((4, 5))
})
@with_setup(setup_grid)
def test_link_update_with_nodes_closed():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
inactive_array = np.array([INACTIVE_LINK, ] * 5)
assert_array_equal(rmg.status_at_link[4:9], inactive_array)
@with_setup(setup_grid)
def test_link_update_with_nodes_fixed_grad():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = FIXED_GRADIENT_BOUNDARY
fixed_array = np.array([FIXED_LINK, ] * 3)
assert_array_equal(rmg.status_at_link[5:8], fixed_array)
@with_setup(setup_grid)
def test_bc_set_code_init():
assert_equal(rmg.bc_set_code, 0)
@with_setup(setup_grid)
def test_bc_set_code_change():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
assert_not_equal(rmg.bc_set_code, 0)
| [
"nose.tools.with_setup",
"nose.tools.assert_not_equal",
"landlab.RasterModelGrid",
"numpy.array",
"nose.tools.assert_equal",
"numpy.testing.assert_array_equal"
] | [((366, 388), 'nose.tools.with_setup', 'with_setup', (['setup_grid'], {}), '(setup_grid)\n', (376, 388), False, 'from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal\n'), ((618, 640), 'nose.tools.with_setup', 'with_setup', (['setup_grid'], {}), '(setup_grid)\n', (628, 640), False, 'from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal\n'), ((873, 895), 'nose.tools.with_setup', 'with_setup', (['setup_grid'], {}), '(setup_grid)\n', (883, 895), False, 'from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal\n'), ((965, 987), 'nose.tools.with_setup', 'with_setup', (['setup_grid'], {}), '(setup_grid)\n', (975, 987), False, 'from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal\n'), ((519, 548), 'numpy.array', 'np.array', (['([INACTIVE_LINK] * 5)'], {}), '([INACTIVE_LINK] * 5)\n', (527, 548), True, 'import numpy as np\n'), ((555, 614), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['rmg.status_at_link[4:9]', 'inactive_array'], {}), '(rmg.status_at_link[4:9], inactive_array)\n', (573, 614), False, 'from numpy.testing import assert_array_equal\n'), ((780, 806), 'numpy.array', 'np.array', (['([FIXED_LINK] * 3)'], {}), '([FIXED_LINK] * 3)\n', (788, 806), True, 'import numpy as np\n'), ((813, 869), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['rmg.status_at_link[5:8]', 'fixed_array'], {}), '(rmg.status_at_link[5:8], fixed_array)\n', (831, 869), False, 'from numpy.testing import assert_array_equal\n'), ((929, 961), 'nose.tools.assert_equal', 'assert_equal', (['rmg.bc_set_code', '(0)'], {}), '(rmg.bc_set_code, 0)\n', (941, 961), False, 'from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal\n'), ((1090, 1126), 'nose.tools.assert_not_equal', 'assert_not_equal', (['rmg.bc_set_code', '(0)'], {}), '(rmg.bc_set_code, 0)\n', (1106, 1126), False, 'from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal\n'), ((332, 355), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(4, 5)'], {}), '((4, 5))\n', (347, 355), False, 'from landlab import RasterModelGrid\n')] |
from typing import Tuple, Union
import taichi as ti
import numpy as np
from doper.sim.jax_geometry import JaxScene
class TaichiRenderer:
def __init__(
self,
scene: JaxScene,
window_size_meters: Tuple[int, int] = (12, 9),
frustrum_left_corner: Tuple[float, float] = (0, 0),
px_per_meter: float = 50,
name: str = "Taichi",
) -> None:
"""Class for scene visualisation
Args:
scene (Scene): current scene representation
window_size_meters (Tuple[int, int], optional): size of the view frustrum in meters.
Defaults to (12, 9).
frustrum_left_corner (Tuple[float, float], optional): left lower world coordinates of
view frustrum. Defaults to (0, 0).
px_per_meter (float, optional): pixels per meter. Defaults to 50.
name (str, optional): window name. Defaults to "Taichi".
"""
self._scene = scene
self._gui = ti.GUI(name, res=[s * px_per_meter for s in window_size_meters])
self._window_size_meters = np.array(window_size_meters)
self._frustrum_left_corner = frustrum_left_corner
self._px_per_meter = px_per_meter
def _render_scene(self) -> None:
"""Renders scene geometry
"""
for poly in self._scene.polygons:
for i in range(len(poly.segments)):
segment = poly.segments[i].copy()
segment = segment / self._window_size_meters
segment_vector = segment[1] - segment[0]
normal = np.array([segment_vector[1], -segment_vector[0]])
normal = normal / np.linalg.norm(normal) / self._window_size_meters
self._gui.line(segment[1], segment[0], color=0xFF00FF)
self._gui.line(
segment[0] + segment_vector / 2,
segment[0] + segment_vector / 2 + normal,
color=0xFF00FF,
)
def _render_sensor(
self,
sensor_pos: Union[Tuple[int, int], np.ndarray],
ray_intersection_points: np.ndarray,
sensor_heading: Union[Tuple[float, float], np.ndarray, None],
):
"""Renders sensor position, direction and observation.
Args:
sensor_pos (Union[Tuple[int, int], np.ndarray]): world coordinates of the sensor.
ray_intersection_points (np.ndarray): [n_rays, 2] sensor ray intersections with geometry
sensor_heading (Union[Tuple[float, float], np.ndarray, None]): heading vector
"""
sensor_pos = np.array(sensor_pos)
sensor_pos = sensor_pos / self._window_size_meters
ray_intersection_points = ray_intersection_points / self._window_size_meters
self._gui.circle(sensor_pos, color=0xFFFF00, radius=self._px_per_meter * 0.2)
self._gui.circles(ray_intersection_points, color=0x008888, radius=self._px_per_meter * 0.05)
if sensor_heading is not None:
sensor_heading = sensor_heading / self._window_size_meters
sensor_heading = np.array(sensor_heading)
self._gui.line(sensor_pos, sensor_pos + sensor_heading, color=0xFF0000, radius=1)
def render(
self,
sensor_pos: Union[Tuple[int, int], np.ndarray],
ray_intersection_points: np.ndarray,
sensor_heading: Union[Tuple[float, float], np.ndarray, None] = None,
) -> None:
"""Renders one frame of simulation
Args:
sensor_pos (Union[Tuple[int, int], np.ndarray]): world coordinates of the sensor.
ray_intersection_points (np.ndarray): [n_rays, 2] sensor ray intersections with geometry
sensor_heading (Union[Tuple[float, float], np.ndarray, None], optional): heading vector.
Defaults to None.
"""
self._render_scene()
self._render_sensor(sensor_pos, ray_intersection_points, sensor_heading)
self._gui.show()
@property
def ti_gui(self) -> ti.GUI:
"""ti.GUI: handle to taichi gui backend
"""
return self._gui
| [
"numpy.array",
"taichi.GUI",
"numpy.linalg.norm"
] | [((993, 1059), 'taichi.GUI', 'ti.GUI', (['name'], {'res': '[(s * px_per_meter) for s in window_size_meters]'}), '(name, res=[(s * px_per_meter) for s in window_size_meters])\n', (999, 1059), True, 'import taichi as ti\n'), ((1093, 1121), 'numpy.array', 'np.array', (['window_size_meters'], {}), '(window_size_meters)\n', (1101, 1121), True, 'import numpy as np\n'), ((2609, 2629), 'numpy.array', 'np.array', (['sensor_pos'], {}), '(sensor_pos)\n', (2617, 2629), True, 'import numpy as np\n'), ((3100, 3124), 'numpy.array', 'np.array', (['sensor_heading'], {}), '(sensor_heading)\n', (3108, 3124), True, 'import numpy as np\n'), ((1589, 1638), 'numpy.array', 'np.array', (['[segment_vector[1], -segment_vector[0]]'], {}), '([segment_vector[1], -segment_vector[0]])\n', (1597, 1638), True, 'import numpy as np\n'), ((1673, 1695), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {}), '(normal)\n', (1687, 1695), True, 'import numpy as np\n')] |
import numpy as np
from scipy.interpolate import interp2d
class Chebyshev:
def __init__():
self.Dx, self.x = chebyshevMatrix(self.Nx-1)
self.Dy, self.y = chebyshevMatrix(self.Ny-1)
self.D2x = np.dot(self.Dx, self.Dx)
self.D2y = np.dot(self.Dy, self.Dy)
self.X, self.Y = np.meshgrid(self.x, self.y)
self.X0 = 2 / (self.x_max - self.x_min)
self.Y0 = 2 / (self.y_max - self.y_min)
self.u0 = lambda x, y: self.u0(self.tx(x), self.tx(y))
self.b0 = lambda x, y: self.b0(self.tx(x), self.tx(y))
# Chebyshev approximation in space
def RHS(self, t, y):
"""
Compute right hand side of PDE
"""
# Vector field evaluation
V1 = self.v[0](self.X, self.Y, t)
V2 = self.v[1](self.X, self.Y, t)
Uf = np.copy(y[:self.Ny * self.Nx].reshape((self.Ny, self.Nx)))
Bf = np.copy(y[self.Ny * self.Nx:].reshape((self.Ny, self.Nx)))
U = Uf
B = Bf
# Compute derivatives
if self.sparse:
Ux, Uy = (self.Dx.dot(U.T)).T, self.Dy.dot(U) # grad(U) = (u_x, u_y)
Uxx, Uyy = (self.D2x.dot(U.T)).T, self.D2y.dot(U) #
else:
Ux, Uy = np.dot(U, self.Dx.T), np.dot(self.Dy, U)
Uxx, Uyy = np.dot(U, self.D2x.T), np.dot(self.D2y, U)
lapU = self.X0 ** 2 * Uxx + self.Y0 ** 2 * Uyy
if self.complete:
K = self.K(U)
Kx = self.Ku(U) * Ux #(Dx.dot(K.T)).T
Ky = self.Ku(U) * Uy #Dy.dot(K)
diffusion = Kx * Ux + Ky * Uy + K * lapU
else:
diffusion = self.kap * lapU # k \nabla U
convection = self.X0 * Ux * V1 + self.Y0 * Uy * V2 # v \cdot grad u.
reaction = self.f(U, B) # eval fuel
# Components
diffusion *= self.cmp[0]
convection *= self.cmp[1]
reaction *= self.cmp[2]
Uf = diffusion - convection + reaction
Bf = self.g(U, B)
# Boundary conditions
Uf, Bf = self.boundaryConditions(Uf, Bf)
return np.r_[Uf.flatten(), Bf.flatten()]
# Domain transformations
def tx(self, t):
return (self.x_max - self.x_min) * t / 2 + (self.x_max + self.x_min)/2 # [-1,1] to [xa, xb]
def xt(self, x):
return 2 * x / (self.x_max - self.x_min) - (self.x_max +self.x_min) / (self.x_max - self.x_min) # [xa, xb] to [-1, 1] | [
"numpy.meshgrid",
"numpy.dot"
] | [((222, 246), 'numpy.dot', 'np.dot', (['self.Dx', 'self.Dx'], {}), '(self.Dx, self.Dx)\n', (228, 246), True, 'import numpy as np\n'), ((266, 290), 'numpy.dot', 'np.dot', (['self.Dy', 'self.Dy'], {}), '(self.Dy, self.Dy)\n', (272, 290), True, 'import numpy as np\n'), ((325, 352), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (336, 352), True, 'import numpy as np\n'), ((1258, 1278), 'numpy.dot', 'np.dot', (['U', 'self.Dx.T'], {}), '(U, self.Dx.T)\n', (1264, 1278), True, 'import numpy as np\n'), ((1280, 1298), 'numpy.dot', 'np.dot', (['self.Dy', 'U'], {}), '(self.Dy, U)\n', (1286, 1298), True, 'import numpy as np\n'), ((1322, 1343), 'numpy.dot', 'np.dot', (['U', 'self.D2x.T'], {}), '(U, self.D2x.T)\n', (1328, 1343), True, 'import numpy as np\n'), ((1345, 1364), 'numpy.dot', 'np.dot', (['self.D2y', 'U'], {}), '(self.D2y, U)\n', (1351, 1364), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import shutil
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
from core.function import train
from core.function import validate
from utils.utils import get_optimizer
from utils.utils import save_checkpoint
from utils.utils import create_logger
from utils.utils import get_model_summary
from NME import NME
os.environ['CUDA_VISIBLE_DEVICES'] = '1,2'
import dataset
import models
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
# philly
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--prevModelDir',
help='prev Model directory',
type=str,
default='')
args = parser.parse_args()
return args
def copy_prev_models(prev_models_dir, model_dir):
import shutil
vc_folder = '/hdfs/' \
+ '/' + os.environ['PHILLY_VC']
source = prev_models_dir
# If path is set as "sys/jobs/application_1533861538020_2366/models" prefix with the location of vc folder
source = vc_folder + '/' + source if not source.startswith(vc_folder) \
else source
destination = model_dir
if os.path.exists(source) and os.path.exists(destination):
for file in os.listdir(source):
source_file = os.path.join(source, file)
destination_file = os.path.join(destination, file)
if not os.path.exists(destination_file):
print("=> copying {0} to {1}".format(
source_file, destination_file))
shutil.copytree(source_file, destination_file)
else:
print('=> {} or {} does not exist'.format(source, destination))
def main():
args = parse_args()
update_config(cfg, args)
if args.prevModelDir and args.modelDir:
# copy pre models for philly
copy_prev_models(args.prevModelDir, args.modelDir)
logger, final_output_dir, tb_log_dir = create_logger(
cfg, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
cfg, is_train=True
)
# copy model file
this_dir = os.path.dirname(__file__)
shutil.copy2(
os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),
final_output_dir)
# logger.info(pprint.pformat(model))
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
dump_input = torch.rand(
(1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0])
)
writer_dict['writer'].add_graph(model, (dump_input,))
logger.info(get_model_summary(model, dump_input))
model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda()
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(
use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT
).cuda()
# Data loading code
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
transforms.Compose([
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
shuffle=cfg.TRAIN.SHUFFLE,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
shuffle=False,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY
)
best_perf = 0.0
best_model = False
last_epoch = -1
optimizer = get_optimizer(cfg, model)
begin_epoch = cfg.TRAIN.BEGIN_EPOCH
checkpoint_file = os.path.join(
final_output_dir, 'checkpoint.pth'
)
# if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
# logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
# checkpoint = torch.load(checkpoint_file)
# begin_epoch = checkpoint['epoch']
# best_perf = checkpoint['perf']
# last_epoch = checkpoint['epoch']
# model.load_state_dict(checkpoint['state_dict'])
#
# optimizer.load_state_dict(checkpoint['optimizer'])
# logger.info("=> loaded checkpoint '{}' (epoch {})".format(
# checkpoint_file, checkpoint['epoch']))
# checkpoint = torch.load('output/jd/pose_hrnet/crop_face/checkpoint.pth')
# model.load_state_dict(checkpoint['state_dict'])
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,
last_epoch=last_epoch
)
for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
lr_scheduler.step()
# train for one epoch
train(cfg, train_loader, model, criterion, optimizer, epoch,
final_output_dir, tb_log_dir, writer_dict)
# evaluate on validation set
# perf_indicator = validate(
# cfg, valid_loader, valid_dataset, model, criterion,
# final_output_dir, tb_log_dir, writer_dict
# )
#
# if perf_indicator >= best_perf:
# best_perf = perf_indicator
# best_model = True
# else:
# best_model = False
# import tqdm
# import cv2
# import numpy as np
# from lib.utils.imutils import im_to_numpy, im_to_torch
# flip = True
# full_result = []
# for i, (inputs,target, target_weight, meta) in enumerate(valid_loader):
# with torch.no_grad():
# input_var = torch.autograd.Variable(inputs.cuda())
# if flip == True:
# flip_inputs = inputs.clone()
# for i, finp in enumerate(flip_inputs):
# finp = im_to_numpy(finp)
# finp = cv2.flip(finp, 1)
# flip_inputs[i] = im_to_torch(finp)
# flip_input_var = torch.autograd.Variable(flip_inputs.cuda())
#
# # compute output
# refine_output = model(input_var)
# score_map = refine_output.data.cpu()
# score_map = score_map.numpy()
#
# if flip == True:
# flip_output = model(flip_input_var)
# flip_score_map = flip_output.data.cpu()
# flip_score_map = flip_score_map.numpy()
#
# for i, fscore in enumerate(flip_score_map):
# fscore = fscore.transpose((1, 2, 0))
# fscore = cv2.flip(fscore, 1)
# fscore = list(fscore.transpose((2, 0, 1)))
# for (q, w) in train_dataset.flip_pairs:
# fscore[q], fscore[w] = fscore[w], fscore[q]
# fscore = np.array(fscore)
# score_map[i] += fscore
# score_map[i] /= 2
#
# # ids = meta['imgID'].numpy()
# # det_scores = meta['det_scores']
# for b in range(inputs.size(0)):
# # details = meta['augmentation_details']
# # imgid = meta['imgid'][b]
# # print(imgid)
# # category = meta['category'][b]
# # print(category)
# single_result_dict = {}
# single_result = []
#
# single_map = score_map[b]
# r0 = single_map.copy()
# r0 /= 255
# r0 += 0.5
# v_score = np.zeros(106)
# for p in range(106):
# single_map[p] /= np.amax(single_map[p])
# border = 10
# dr = np.zeros((112 + 2 * border, 112 + 2 * border))
# dr[border:-border, border:-border] = single_map[p].copy()
# dr = cv2.GaussianBlur(dr, (7, 7), 0)
# lb = dr.argmax()
# y, x = np.unravel_index(lb, dr.shape)
# dr[y, x] = 0
# lb = dr.argmax()
# py, px = np.unravel_index(lb, dr.shape)
# y -= border
# x -= border
# py -= border + y
# px -= border + x
# ln = (px ** 2 + py ** 2) ** 0.5
# delta = 0.25
# if ln > 1e-3:
# x += delta * px / ln
# y += delta * py / ln
# x = max(0, min(x, 112 - 1))
# y = max(0, min(y, 112 - 1))
# resy = float((4 * y + 2) / 112 * (450))
# resx = float((4 * x + 2) / 112 * (450))
# # resy = float((4 * y + 2) / cfg.data_shape[0] * (450))
# # resx = float((4 * x + 2) / cfg.data_shape[1] * (450))
# v_score[p] = float(r0[p, int(round(y) + 1e-10), int(round(x) + 1e-10)])
# single_result.append(resx)
# single_result.append(resy)
# if len(single_result) != 0:
# result = []
# # result.append(imgid)
# j = 0
# while j < len(single_result):
# result.append(float(single_result[j]))
# result.append(float(single_result[j + 1]))
# j += 2
# full_result.append(result)
model.eval()
import numpy as np
from core.inference import get_final_preds
from utils.transforms import flip_back
import csv
num_samples = len(valid_dataset)
all_preds = np.zeros(
(num_samples, 106, 3),
dtype=np.float32
)
all_boxes = np.zeros((num_samples, 6))
image_path = []
filenames = []
imgnums = []
idx = 0
full_result = []
with torch.no_grad():
for i, (input, target, target_weight, meta) in enumerate(valid_loader):
# compute output
outputs = model(input)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
if cfg.TEST.FLIP_TEST:
# this part is ugly, because pytorch has not supported negative index
# input_flipped = model(input[:, :, :, ::-1])
input_flipped = np.flip(input.cpu().numpy(), 3).copy()
input_flipped = torch.from_numpy(input_flipped).cuda()
outputs_flipped = model(input_flipped)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(output_flipped.cpu().numpy(),
valid_dataset.flip_pairs)
output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
# feature is not aligned, shift flipped heatmap for higher accuracy
if cfg.TEST.SHIFT_HEATMAP:
output_flipped[:, :, :, 1:] = \
output_flipped.clone()[:, :, :, 0:-1]
output = (output + output_flipped) * 0.5
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
loss = criterion(output, target, target_weight)
num_images = input.size(0)
# measure accuracy and record loss
c = meta['center'].numpy()
s = meta['scale'].numpy()
# print(c.shape)
# print(s.shape)
# print(c[:3, :])
# print(s[:3, :])
score = meta['score'].numpy()
preds, maxvals = get_final_preds(
cfg, output.clone().cpu().numpy(), c, s)
# print(preds.shape)
for b in range(input.size(0)):
result = []
# pic_name=meta['image'][b].split('/')[-1]
# result.append(pic_name)
for points in range(106):
# result.append(str(int(preds[b][points][0])) + ' ' + str(int(preds[b][points][1])))
result.append(float(preds[b][points][0]))
result.append(float(preds[b][points][1]))
full_result.append(result)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
all_boxes[idx:idx + num_images, 5] = score
image_path.extend(meta['image'])
idx += num_images
# with open('res.csv', 'w', newline='') as f:
# writer = csv.writer(f)
# writer.writerows(full_result)
gt = []
with open("/home/sk49/workspace/cy/jd/val.txt") as f:
for line in f.readlines():
rows = list(map(float, line.strip().split(' ')[1:]))
gt.append(rows)
error = 0
for i in range(len(gt)):
error = NME(full_result[i], gt[i]) + error
print(error)
log_file = []
log_file.append([epoch, optimizer.state_dict()['param_groups'][0]['lr'], error])
with open('log_file.csv', 'a', newline='') as f:
writer1 = csv.writer(f)
writer1.writerows(log_file)
# logger.close()
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'model': cfg.MODEL.NAME,
'state_dict': model.state_dict(),
'best_state_dict': model.module.state_dict(),
# 'perf': perf_indicator,
'optimizer': optimizer.state_dict(),
}, best_model, final_output_dir)
final_model_state_file = os.path.join(
final_output_dir, 'final_state.pth'
)
logger.info('=> saving final model state to {}'.format(
final_model_state_file)
)
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close()
if __name__ == '__main__':
main()
| [
"numpy.prod",
"torch.optim.lr_scheduler.MultiStepLR",
"NME.NME",
"torch.from_numpy",
"os.path.exists",
"os.listdir",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"config.update_config",
"torchvision.transforms.ToTensor",
"utils.utils.get_optimizer",
"core.function.train",
"csv.wr... | [((1117, 1179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train keypoints network"""'}), "(description='Train keypoints network')\n", (1140, 1179), False, 'import argparse\n'), ((3223, 3247), 'config.update_config', 'update_config', (['cfg', 'args'], {}), '(cfg, args)\n', (3236, 3247), False, 'from config import update_config\n'), ((3433, 3470), 'utils.utils.create_logger', 'create_logger', (['cfg', 'args.cfg', '"""train"""'], {}), "(cfg, args.cfg, 'train')\n", (3446, 3470), False, 'from utils.utils import create_logger\n'), ((3865, 3890), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3880, 3890), False, 'import os\n'), ((4213, 4281), 'torch.rand', 'torch.rand', (['(1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0])'], {}), '((1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))\n', (4223, 4281), False, 'import torch\n'), ((4669, 4744), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (4689, 4744), True, 'import torchvision.transforms as transforms\n'), ((5763, 5788), 'utils.utils.get_optimizer', 'get_optimizer', (['cfg', 'model'], {}), '(cfg, model)\n', (5776, 5788), False, 'from utils.utils import get_optimizer\n'), ((5851, 5899), 'os.path.join', 'os.path.join', (['final_output_dir', '"""checkpoint.pth"""'], {}), "(final_output_dir, 'checkpoint.pth')\n", (5863, 5899), False, 'import os\n'), ((6634, 6749), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer', 'cfg.TRAIN.LR_STEP', 'cfg.TRAIN.LR_FACTOR'], {'last_epoch': 'last_epoch'}), '(optimizer, cfg.TRAIN.LR_STEP, cfg.\n TRAIN.LR_FACTOR, last_epoch=last_epoch)\n', (6670, 6749), False, 'import torch\n'), ((16855, 16904), 'os.path.join', 'os.path.join', (['final_output_dir', '"""final_state.pth"""'], {}), "(final_output_dir, 'final_state.pth')\n", (16867, 16904), False, 'import os\n'), ((2669, 2691), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (2683, 2691), False, 'import os\n'), ((2696, 2723), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (2710, 2723), False, 'import os\n'), ((2745, 2763), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (2755, 2763), False, 'import os\n'), ((3497, 3517), 'pprint.pformat', 'pprint.pformat', (['args'], {}), '(args)\n', (3511, 3517), False, 'import pprint\n'), ((3917, 3980), 'os.path.join', 'os.path.join', (['this_dir', '"""../lib/models"""', "(cfg.MODEL.NAME + '.py')"], {}), "(this_dir, '../lib/models', cfg.MODEL.NAME + '.py')\n", (3929, 3980), False, 'import os\n'), ((4088, 4121), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'tb_log_dir'}), '(log_dir=tb_log_dir)\n', (4101, 4121), False, 'from tensorboardX import SummaryWriter\n'), ((4371, 4407), 'utils.utils.get_model_summary', 'get_model_summary', (['model', 'dump_input'], {}), '(model, dump_input)\n', (4388, 4407), False, 'from utils.utils import get_model_summary\n'), ((6893, 7000), 'core.function.train', 'train', (['cfg', 'train_loader', 'model', 'criterion', 'optimizer', 'epoch', 'final_output_dir', 'tb_log_dir', 'writer_dict'], {}), '(cfg, train_loader, model, criterion, optimizer, epoch,\n final_output_dir, tb_log_dir, writer_dict)\n', (6898, 7000), False, 'from core.function import train\n'), ((12160, 12209), 'numpy.zeros', 'np.zeros', (['(num_samples, 106, 3)'], {'dtype': 'np.float32'}), '((num_samples, 106, 3), dtype=np.float32)\n', (12168, 12209), True, 'import numpy as np\n'), ((12264, 12290), 'numpy.zeros', 'np.zeros', (['(num_samples, 6)'], {}), '((num_samples, 6))\n', (12272, 12290), True, 'import numpy as np\n'), ((2791, 2817), 'os.path.join', 'os.path.join', (['source', 'file'], {}), '(source, file)\n', (2803, 2817), False, 'import os\n'), ((2849, 2880), 'os.path.join', 'os.path.join', (['destination', 'file'], {}), '(destination, file)\n', (2861, 2880), False, 'import os\n'), ((3052, 3098), 'shutil.copytree', 'shutil.copytree', (['source_file', 'destination_file'], {}), '(source_file, destination_file)\n', (3067, 3098), False, 'import shutil\n'), ((4422, 4469), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': '[0, 1]'}), '(model, device_ids=[0, 1])\n', (4443, 4469), False, 'import torch\n'), ((4547, 4606), 'core.loss.JointsMSELoss', 'JointsMSELoss', ([], {'use_target_weight': 'cfg.LOSS.USE_TARGET_WEIGHT'}), '(use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT)\n', (4560, 4606), False, 'from core.loss import JointsMSELoss\n'), ((12413, 12428), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12426, 12428), False, 'import torch\n'), ((16339, 16352), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (16349, 16352), False, 'import csv\n'), ((2900, 2932), 'os.path.exists', 'os.path.exists', (['destination_file'], {}), '(destination_file)\n', (2914, 2932), False, 'import os\n'), ((4920, 4941), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4939, 4941), True, 'import torchvision.transforms as transforms\n'), ((5144, 5165), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5163, 5165), True, 'import torchvision.transforms as transforms\n'), ((15499, 15518), 'numpy.prod', 'np.prod', (['(s * 200)', '(1)'], {}), '(s * 200, 1)\n', (15506, 15518), True, 'import numpy as np\n'), ((16091, 16117), 'NME.NME', 'NME', (['full_result[i]', 'gt[i]'], {}), '(full_result[i], gt[i])\n', (16094, 16117), False, 'from NME import NME\n'), ((13039, 13070), 'torch.from_numpy', 'torch.from_numpy', (['input_flipped'], {}), '(input_flipped)\n', (13055, 13070), False, 'import torch\n')] |
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2020 <NAME>
#
# This file is part of Monet.
import logging
from typing import Tuple, Union
import sys
import time
from ..core import ExpMatrix
from ..latent import PCAModel
from sklearn.neighbors import NearestNeighbors
import pandas as pd
import numpy as np
_LOGGER = logging.getLogger(__name__)
def correct_mnn(
pca_model: PCAModel,
ref_matrix: ExpMatrix, target_matrix: ExpMatrix,
k: int = 20, num_mnn: int = 5) \
-> Tuple[pd.DataFrame, pd.DataFrame]:
"""Perform batch correction in latent space using mutual nearest neighbors.
This function implements a method very similar to the one proposed
by Haghverdi et al. (Nat Biotech, 2018), except all operations are
performed after projecting the data into a latent space defined by a PCA
model.
=> PMID: 29608177
=> DOI: 10.1038/nbt.4091.
Returns:
========
1. The batch-corrected PC scores for the target matrix.
2. The PC scores obtained by applying the PCA model to the
reference matrix.
"""
t0 = time.time()
### determine all MNN pairs
_LOGGER.info('Determining all MNN pairs...')
# transform data using PCA model
ref_pc_scores = pca_model.transform(ref_matrix)
target_pc_scores = pca_model.transform(target_matrix)
# find k nearest neighbors in reference matrix
# for all points in target matrix
ref_nn_model = NearestNeighbors(algorithm='kd_tree', n_neighbors=k)
ref_nn_model.fit(ref_pc_scores.values)
neigh_ind = ref_nn_model.kneighbors(
target_pc_scores.values, return_distance=False)
# test if each point r in ref_matrix is a MNN
# of point t in target matrix
target_nn_model = NearestNeighbors(algorithm='kd_tree', n_neighbors=k)
target_nn_model.fit(target_pc_scores.values)
mnn = []
mnn_indices = {}
cur_neighbor = 0
cur_idx = 0
for t in range(neigh_ind.shape[0]):
# use pre-determined nearest neighbors in reference
ref_neighbors = neigh_ind[t, :]
indices = []
# get an adjacency matrix for the ref_neighbors in the target
A = target_nn_model.kneighbors_graph(
ref_pc_scores.iloc[ref_neighbors])
for i in range(A.shape[0]):
# each i corresponds to one point r in ref_neighbors
if A[i, t] == 1:
indices.append(cur_idx)
mnn.append((t, ref_neighbors[i]))
cur_idx += 1
if indices:
mnn_indices[cur_neighbor] = np.uint32(indices)
cur_neighbor += 1
# mnn is a 2-by-x array containing all MNN pairs
# column 1 = point t index, column 2 = point r index
mnn = np.uint32(mnn)
# calculate correction vectors for all MNN pairs
_LOGGER.info('Calculating batch correction vectors for all MNN pairs...')
corr_vectors = np.empty((mnn.shape[0], pca_model.num_components_),
dtype=np.float32)
for i in range(mnn.shape[0]):
cv = ref_pc_scores.iloc[mnn[i, 1]].values - target_pc_scores.iloc[mnn[i, 0]].values
corr_vectors[i, :] = cv
# now we know which points in T have mutual neareast neighbors (=T_mut)
# next, we will use those for batch correction
#mnn_ind = np.unique(mnn[:, 0])
T_mut = np.unique(mnn[:, 0])
# apply the correction
_LOGGER.info('Applying batch correction to target PC scores...'); sys.stdout.flush()
# first, create NN model for T_mut
mnn_nn_model = NearestNeighbors(algorithm='kd_tree', n_neighbors=num_mnn)
mnn_nn_model.fit(target_pc_scores.iloc[T_mut])
# then, find the closest MNNs for all points in target matrix
# and use the mean of their batch correction vectors
C = target_pc_scores.values.copy()
nearest_mnn = mnn_nn_model.kneighbors(C, return_distance=False)
for t in range(C.shape[0]):
# determine the indices of all batch correction vectors
# for point t
pair_indices = []
for i in nearest_mnn[t, :]:
pair_indices.extend(mnn_indices[i])
pair_indices = np.uint32(pair_indices)
corr = corr_vectors[pair_indices, :].mean(axis=0)
C[t, :] = C[t, :] + corr
corrected_target_pc_scores = pd.DataFrame(
index=target_pc_scores.index,
columns=target_pc_scores.columns,
data=C)
t1 = time.time()
_LOGGER.info('Batch correction using mutual nearest neighbors '
'took %.1f s.', t1-t0)
return corrected_target_pc_scores, ref_pc_scores
| [
"logging.getLogger",
"numpy.unique",
"numpy.uint32",
"numpy.empty",
"sklearn.neighbors.NearestNeighbors",
"pandas.DataFrame",
"sys.stdout.flush",
"time.time"
] | [((316, 343), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (333, 343), False, 'import logging\n'), ((1096, 1107), 'time.time', 'time.time', ([], {}), '()\n', (1105, 1107), False, 'import time\n'), ((1451, 1503), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'algorithm': '"""kd_tree"""', 'n_neighbors': 'k'}), "(algorithm='kd_tree', n_neighbors=k)\n", (1467, 1503), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1751, 1803), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'algorithm': '"""kd_tree"""', 'n_neighbors': 'k'}), "(algorithm='kd_tree', n_neighbors=k)\n", (1767, 1803), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((2765, 2779), 'numpy.uint32', 'np.uint32', (['mnn'], {}), '(mnn)\n', (2774, 2779), True, 'import numpy as np\n'), ((2931, 3000), 'numpy.empty', 'np.empty', (['(mnn.shape[0], pca_model.num_components_)'], {'dtype': 'np.float32'}), '((mnn.shape[0], pca_model.num_components_), dtype=np.float32)\n', (2939, 3000), True, 'import numpy as np\n'), ((3367, 3387), 'numpy.unique', 'np.unique', (['mnn[:, 0]'], {}), '(mnn[:, 0])\n', (3376, 3387), True, 'import numpy as np\n'), ((3486, 3504), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3502, 3504), False, 'import sys\n'), ((3568, 3626), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'algorithm': '"""kd_tree"""', 'n_neighbors': 'num_mnn'}), "(algorithm='kd_tree', n_neighbors=num_mnn)\n", (3584, 3626), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((4327, 4415), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'target_pc_scores.index', 'columns': 'target_pc_scores.columns', 'data': 'C'}), '(index=target_pc_scores.index, columns=target_pc_scores.columns,\n data=C)\n', (4339, 4415), True, 'import pandas as pd\n'), ((4451, 4462), 'time.time', 'time.time', ([], {}), '()\n', (4460, 4462), False, 'import time\n'), ((4165, 4188), 'numpy.uint32', 'np.uint32', (['pair_indices'], {}), '(pair_indices)\n', (4174, 4188), True, 'import numpy as np\n'), ((2594, 2612), 'numpy.uint32', 'np.uint32', (['indices'], {}), '(indices)\n', (2603, 2612), True, 'import numpy as np\n')] |
import numpy as np
bsize=3
batch_label=[0,1,2]
pred_val=[2,2,1]
print(len(batch_label))
print(len(pred_val))
total_seen_class = [0 for _ in range(3)]
total_correct_class = [0 for _ in range(3)]
for i in range(0, bsize):
print('INNER: ', i)
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
print(total_seen_class)
print(total_correct_class)
print(np.mean(np.array(total_correct_class) /
np.array(total_seen_class, dtype=np.float)))
| [
"numpy.array"
] | [((417, 446), 'numpy.array', 'np.array', (['total_correct_class'], {}), '(total_correct_class)\n', (425, 446), True, 'import numpy as np\n'), ((463, 505), 'numpy.array', 'np.array', (['total_seen_class'], {'dtype': 'np.float'}), '(total_seen_class, dtype=np.float)\n', (471, 505), True, 'import numpy as np\n')] |
import xarray as xr
import click
import numpy as np
@click.command()
@click.argument("filename", type=str)
@click.argument("n", type=int)
def convert(filename, n):
data = xr.open_dataset(filename)
out = xr.Dataset()
new_dims = {key: val for key, val in data.dims.items() if key != "column"}
out = out.expand_dims(new_dims)
out = out.expand_dims({"column": n})
lon = data["longitude"][:]
lat = data["latitude"][:]
out["longitude"] = (("column",), np.linspace(np.min(lon), np.max(lon), n))
out["latitude"] = (("column",), np.linspace(np.min(lat), np.max(lat), n))
n_repeat = n // data.dims["column"] + 1
for vn in data.data_vars:
var = data[vn]
if vn not in ["longitude", "latitude"] and "column" in var.dims:
colax = var.dims.index("column")
out[vn] = (var.dims, np.repeat(var[:], n_repeat, axis=colax)[:n])
out.to_netcdf(filename.replace(".nc", "_{}.nc".format(n)))
return
if __name__ == "__main__":
convert() | [
"click.argument",
"numpy.repeat",
"xarray.Dataset",
"xarray.open_dataset",
"numpy.max",
"numpy.min",
"click.command"
] | [((54, 69), 'click.command', 'click.command', ([], {}), '()\n', (67, 69), False, 'import click\n'), ((71, 107), 'click.argument', 'click.argument', (['"""filename"""'], {'type': 'str'}), "('filename', type=str)\n", (85, 107), False, 'import click\n'), ((109, 138), 'click.argument', 'click.argument', (['"""n"""'], {'type': 'int'}), "('n', type=int)\n", (123, 138), False, 'import click\n'), ((176, 201), 'xarray.open_dataset', 'xr.open_dataset', (['filename'], {}), '(filename)\n', (191, 201), True, 'import xarray as xr\n'), ((212, 224), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (222, 224), True, 'import xarray as xr\n'), ((495, 506), 'numpy.min', 'np.min', (['lon'], {}), '(lon)\n', (501, 506), True, 'import numpy as np\n'), ((508, 519), 'numpy.max', 'np.max', (['lon'], {}), '(lon)\n', (514, 519), True, 'import numpy as np\n'), ((573, 584), 'numpy.min', 'np.min', (['lat'], {}), '(lat)\n', (579, 584), True, 'import numpy as np\n'), ((586, 597), 'numpy.max', 'np.max', (['lat'], {}), '(lat)\n', (592, 597), True, 'import numpy as np\n'), ((853, 892), 'numpy.repeat', 'np.repeat', (['var[:]', 'n_repeat'], {'axis': 'colax'}), '(var[:], n_repeat, axis=colax)\n', (862, 892), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data operations, will be used in train.py."""
import json
import pickle
from math import ceil
from pathlib import Path
import mindspore.common.dtype as mstype
import mindspore.dataset as de
import mindspore.dataset.transforms.c_transforms as deC
import numpy as np
from .model_utils.config import config
de.config.set_seed(1)
class MsAudioDataset:
"""
Audio dataset for AISHELL dataset.
Args:
data_json_path (str|Path): Path to dataset json.
chars_dict_path (str|Path): Path to dataset character dictionary.
lfr_m (int): preprocessing param, number of frames to stack. Default: 4.
lfr_n (int): preprocessing param, number of frames to skip. Default: 3.
"""
IGNORE_ID = -1
def __init__(self, data_json_path, chars_dict_path, lfr_m=4, lfr_n=3):
self.data_json_path = Path(data_json_path)
self.lfr_m = lfr_m
self.lfr_n = lfr_n
self.chars_dict_path = Path(chars_dict_path)
self.char_list, self.sos_id, self.eos_id = self.process_dict(self.chars_dict_path)
with self.data_json_path.open('r') as file:
self.data = json.load(file)
self.max_input_len, self.max_output_len = self.get_max_lens()
self.data_samples = list(self.data.values())
@staticmethod
def read_pickle(file_path):
"""read pickle data"""
with Path(file_path).open('rb') as file:
data = pickle.load(file)
return data
@staticmethod
def process_dict(dict_path):
"""process character dict"""
with Path(dict_path).open('r') as files:
dictionary = files.readlines()
char_list = [entry.split(' ')[0] for entry in dictionary]
sos_id = char_list.index('<sos>')
eos_id = char_list.index('<eos>')
return char_list, sos_id, eos_id
def __getitem__(self, item):
"""get preprocessed data"""
sample = self.data_samples[item]
output_tokens = [int(token) for token in sample['output'][0]['tokenid'].split(' ')]
decoder_input_tokens = np.asarray([self.sos_id] + output_tokens)
padded_decoder_input_tokens = np.full((self.max_output_len,), self.eos_id, dtype=np.int64)
padded_decoder_input_tokens[:decoder_input_tokens.shape[0]] = decoder_input_tokens
padded_decoder_input_mask = (padded_decoder_input_tokens != self.eos_id).astype(np.float32)
decoder_output_tokens = np.asarray(output_tokens + [self.eos_id])
padded_decoder_output_tokens = np.full((self.max_output_len,), self.IGNORE_ID, dtype=np.int64)
padded_decoder_output_tokens[:decoder_output_tokens.shape[0]] = decoder_output_tokens
padded_decoder_output_mask = (padded_decoder_output_tokens != self.IGNORE_ID).astype(np.float32)
input_features = self.read_pickle(sample['input'][0]['feat'])
input_features = self.build_lfr_features(input_features, m=self.lfr_m, n=self.lfr_n)
padded_input_features = np.full((self.max_input_len, input_features.shape[1]), 0, dtype=np.float32)
padded_input_features[:input_features.shape[0]] = input_features
padded_input_features_mask = np.full((self.max_input_len,), 0, dtype=np.int32)
padded_input_features_mask[:input_features.shape[0]] = 1
result = (
padded_input_features, padded_input_features_mask,
padded_decoder_input_tokens, padded_decoder_input_mask,
padded_decoder_output_tokens, padded_decoder_output_mask,
)
return result
def __len__(self):
"""num of samples"""
return len(self.data_samples)
@staticmethod
def build_lfr_features(inputs, m, n):
"""
Actually, this implements stacking frames and skipping frames.
if m = 1 and n = 1, just return the origin features.
if m = 1 and n > 1, it works like skipping.
if m > 1 and n = 1, it works like stacking but only support right frames.
if m > 1 and n > 1, it works like LFR.
Args:
inputs_batch: inputs is T x D np.ndarray
m: number of frames to stack
n: number of frames to skip
"""
LFR_inputs = []
T = inputs.shape[0]
T_lfr = int(np.ceil(T / n))
for i in range(T_lfr):
if m <= T - i * n:
LFR_inputs.append(np.hstack(inputs[i * n:i * n + m]))
else: # process last LFR frame
num_padding = m - (T - i * n)
frame = np.hstack(inputs[i * n:])
for _ in range(num_padding):
frame = np.hstack((frame, inputs[-1]))
LFR_inputs.append(frame)
return np.vstack(LFR_inputs)
def get_max_lens(self):
"""get maximum sequence len"""
input_max_len = 0
output_max_len = 0
for sample in self.data.values():
input_cur_len = sample['input'][0]['shape'][0]
output_cur_len = sample['output'][0]['shape'][0]
input_max_len = input_cur_len if input_cur_len > input_max_len else input_max_len
output_max_len = output_cur_len if output_cur_len > output_max_len else output_max_len
return ceil(input_max_len / self.lfr_n) + 1, output_max_len + 1
def create_transformer_dataset(
data_json_path,
chars_dict_path,
lfr_m=4,
lfr_n=3,
do_shuffle='true',
rank_size=1,
rank_id=0,
epoch_count=1,
batch_size=None,
):
"""
Create Audio dataset for AISHELL dataset.
Args:
data_json_path (str|Path): Path to dataset json.
chars_dict_path (str|Path): Path to dataset character dictionary.
lfr_m (int): preprocessing param, number of frames to stack. Default: 4.
lfr_n (int): preprocessing param, number of frames to skip. Default: 3.
do_shuffle (str): if true shuffle dataset. Default: 'true'.
rank_size (int): distributed training rank size. Default: 1.
rank_id (int): distributed training rank id. Default: 0.
epoch_count (int): number of dataset repeats. Default: 1.
batch_size (int): dataset batch size, if none get batch size info from config. Default: None.
"""
dataset = MsAudioDataset(
data_json_path,
chars_dict_path,
lfr_m,
lfr_n,
)
ds = de.GeneratorDataset(
source=dataset,
column_names=[
'source_eos_features',
'source_eos_mask',
'target_sos_ids',
'target_sos_mask',
'target_eos_ids',
'target_eos_mask',
],
shuffle=(do_shuffle == "true"),
num_shards=rank_size,
shard_id=rank_id,
)
type_cast_op_int32 = deC.TypeCast(mstype.int32)
type_cast_op_float32 = deC.TypeCast(mstype.float32)
ds = ds.map(operations=type_cast_op_float32, input_columns="source_eos_features")
ds = ds.map(operations=type_cast_op_int32, input_columns="source_eos_mask")
ds = ds.map(operations=type_cast_op_int32, input_columns="target_sos_ids")
ds = ds.map(operations=type_cast_op_int32, input_columns="target_sos_mask")
ds = ds.map(operations=type_cast_op_int32, input_columns="target_eos_ids")
ds = ds.map(operations=type_cast_op_int32, input_columns="target_eos_mask")
batch_size = batch_size if batch_size is not None else config.batch_size
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.repeat(epoch_count)
return ds
| [
"numpy.ceil",
"math.ceil",
"pathlib.Path",
"mindspore.dataset.GeneratorDataset",
"numpy.hstack",
"mindspore.dataset.transforms.c_transforms.TypeCast",
"numpy.asarray",
"pickle.load",
"mindspore.dataset.config.set_seed",
"numpy.vstack",
"json.load",
"numpy.full"
] | [((978, 999), 'mindspore.dataset.config.set_seed', 'de.config.set_seed', (['(1)'], {}), '(1)\n', (996, 999), True, 'import mindspore.dataset as de\n'), ((7021, 7269), 'mindspore.dataset.GeneratorDataset', 'de.GeneratorDataset', ([], {'source': 'dataset', 'column_names': "['source_eos_features', 'source_eos_mask', 'target_sos_ids',\n 'target_sos_mask', 'target_eos_ids', 'target_eos_mask']", 'shuffle': "(do_shuffle == 'true')", 'num_shards': 'rank_size', 'shard_id': 'rank_id'}), "(source=dataset, column_names=['source_eos_features',\n 'source_eos_mask', 'target_sos_ids', 'target_sos_mask',\n 'target_eos_ids', 'target_eos_mask'], shuffle=do_shuffle == 'true',\n num_shards=rank_size, shard_id=rank_id)\n", (7040, 7269), True, 'import mindspore.dataset as de\n'), ((7415, 7441), 'mindspore.dataset.transforms.c_transforms.TypeCast', 'deC.TypeCast', (['mstype.int32'], {}), '(mstype.int32)\n', (7427, 7441), True, 'import mindspore.dataset.transforms.c_transforms as deC\n'), ((7469, 7497), 'mindspore.dataset.transforms.c_transforms.TypeCast', 'deC.TypeCast', (['mstype.float32'], {}), '(mstype.float32)\n', (7481, 7497), True, 'import mindspore.dataset.transforms.c_transforms as deC\n'), ((1507, 1527), 'pathlib.Path', 'Path', (['data_json_path'], {}), '(data_json_path)\n', (1511, 1527), False, 'from pathlib import Path\n'), ((1613, 1634), 'pathlib.Path', 'Path', (['chars_dict_path'], {}), '(chars_dict_path)\n', (1617, 1634), False, 'from pathlib import Path\n'), ((2737, 2778), 'numpy.asarray', 'np.asarray', (['([self.sos_id] + output_tokens)'], {}), '([self.sos_id] + output_tokens)\n', (2747, 2778), True, 'import numpy as np\n'), ((2817, 2877), 'numpy.full', 'np.full', (['(self.max_output_len,)', 'self.eos_id'], {'dtype': 'np.int64'}), '((self.max_output_len,), self.eos_id, dtype=np.int64)\n', (2824, 2877), True, 'import numpy as np\n'), ((3102, 3143), 'numpy.asarray', 'np.asarray', (['(output_tokens + [self.eos_id])'], {}), '(output_tokens + [self.eos_id])\n', (3112, 3143), True, 'import numpy as np\n'), ((3183, 3246), 'numpy.full', 'np.full', (['(self.max_output_len,)', 'self.IGNORE_ID'], {'dtype': 'np.int64'}), '((self.max_output_len,), self.IGNORE_ID, dtype=np.int64)\n', (3190, 3246), True, 'import numpy as np\n'), ((3642, 3717), 'numpy.full', 'np.full', (['(self.max_input_len, input_features.shape[1])', '(0)'], {'dtype': 'np.float32'}), '((self.max_input_len, input_features.shape[1]), 0, dtype=np.float32)\n', (3649, 3717), True, 'import numpy as np\n'), ((3828, 3877), 'numpy.full', 'np.full', (['(self.max_input_len,)', '(0)'], {'dtype': 'np.int32'}), '((self.max_input_len,), 0, dtype=np.int32)\n', (3835, 3877), True, 'import numpy as np\n'), ((5355, 5376), 'numpy.vstack', 'np.vstack', (['LFR_inputs'], {}), '(LFR_inputs)\n', (5364, 5376), True, 'import numpy as np\n'), ((1802, 1817), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1811, 1817), False, 'import json\n'), ((2091, 2108), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2102, 2108), False, 'import pickle\n'), ((4907, 4921), 'numpy.ceil', 'np.ceil', (['(T / n)'], {}), '(T / n)\n', (4914, 4921), True, 'import numpy as np\n'), ((5169, 5194), 'numpy.hstack', 'np.hstack', (['inputs[i * n:]'], {}), '(inputs[i * n:])\n', (5178, 5194), True, 'import numpy as np\n'), ((5869, 5901), 'math.ceil', 'ceil', (['(input_max_len / self.lfr_n)'], {}), '(input_max_len / self.lfr_n)\n', (5873, 5901), False, 'from math import ceil\n'), ((2036, 2051), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (2040, 2051), False, 'from pathlib import Path\n'), ((2232, 2247), 'pathlib.Path', 'Path', (['dict_path'], {}), '(dict_path)\n', (2236, 2247), False, 'from pathlib import Path\n'), ((5019, 5053), 'numpy.hstack', 'np.hstack', (['inputs[i * n:i * n + m]'], {}), '(inputs[i * n:i * n + m])\n', (5028, 5053), True, 'import numpy as np\n'), ((5268, 5298), 'numpy.hstack', 'np.hstack', (['(frame, inputs[-1])'], {}), '((frame, inputs[-1]))\n', (5277, 5298), True, 'import numpy as np\n')] |
import numpy as np
import h5py
import random
def numWindows(tot, deltaT):
""" Evaluates the number of windows that will be used
given the total time (tot) of a particular induction.
"""
return int( (tot - deltaT)*60. )
def maxLengthCal(dataset, metadata):
'''
Calculate the maxium length of a 10 min window
'''
maxiumLength=0
deltaT = 1./6.
oneMin = 1./60.
for preID in range(100):
# number of windows to be used
dataset_ = dataset[ dataset[:,0] == preID, 1: ]
# restricting the dataset
nwin = numWindows(metadata[preID][4], deltaT)
for j in range(nwin):
# evaluating indices
IDX = np.logical_and(dataset_[:,0] >= j*oneMin, dataset_[:,0] < j*oneMin + 10*oneMin )
maxiumLength=np.amax([dataset_[IDX, 1:].shape[0],maxiumLength])
return maxiumLength
def classLabel(name):
'''
Encode the classLabel
'''
if name=='banana':
return 1
elif name=='wine':
return 2
elif name=='background':
return 0
def vectorizeClass(classLabel):
'''
Translate the classLabel to vector for RNN regression
'''
if classLabel == 1:
return np.array([-1,1,-1])
elif classLabel == 2:
return np.array([-1,-1,1])
elif classLabel == 0:
return np.array([1,-1,-1])
def randomData(file,numSample):
'''
This function randomly pick sample points from the processedData h5py object
'''
sample=random.sample(range(len(file)),k=numSample)
try:
shape=file['0'].shape
except KeyError:
return np.array([])
trainX=np.zeros((numSample,shape[1],shape[2]-3))
trainY=np.zeros((numSample,shape[1],shape[2]))
dataset=np.zeros(shape)
for index in range(numSample):
file[str(sample[index])].read_direct(dataset)
trainX[index,:,:]=dataset[0,:,:-3]
trainY[index,:,:]=dataset[1,:,:]
return trainX, trainY
def dataSplit(ratio):
'''
This function split the total data set into training and test sets with respect
to a ratio.
'''
f=h5py.File('processedData.hdf5','r')
perm=np.random.permutation(len(f))
try:
shape=f['0'].shape
except KeyError:
return np.array([])
trainingSize=int(len(f)*ratio)
dataset=np.zeros(shape)
trainX=np.zeros((trainingSize, shape[1], shape[2]-3))
trainY=np.zeros((trainingSize, shape[1], shape[2]))
testX=np.zeros((len(f)-trainingSize, shape[1], shape[2]-3))
testY=np.zeros((len(f)-trainingSize, shape[1], shape[2]))
for i in range(trainingSize):
f[str(perm[i])].read_direct(dataset)
trainX[i,:,:]=dataset[0,:,:-3]
trainY[i,:,:]=dataset[1,:,:]
for i in range(trainingSize,len(f)):
f[str(perm[i])].read_direct(dataset)
testX[i-trainingSize,:,:]=dataset[0,:,:-3]
testY[i-trainingSize,:,:]=dataset[1,:,:]
return trainX, trainY, testX, testY
def categoricalParse(dataY):
sampleCategory=np.sum(dataY[:,:,-3:],axis=1)
return np.argmax(sampleCategory, axis=1)
def misClassificationRate(testY, predictY):
testCategory=categoricalParse(testY)
predictCategory=categoricalParse(predictY)
return np.sum(testCategory!=predictCategory)/testCategory.size
def hdf2np(dataset):
result=np.zeros((dataset.shape))
dataset.read_direct(result)
return result
def dataSplitGroup(ratio):
f=h5py.File('processedDataGroup.hdf5','r')
perm=np.random.permutation(len(f))
trainingSize=int(len(f)*ratio)
firstTag=True
for i in range(trainingSize):
try:
tempCurrent=np.zeros(f[str(perm[i])]['current'].shape)
tempFuture=np.zeros(f[str(perm[i])]['future'].shape)
except KeyError:
continue
f[str(perm[i])]['current'].read_direct(tempCurrent)
f[str(perm[i])]['future'].read_direct(tempFuture)
if firstTag==True:
trainX=np.copy(tempCurrent)
trainY=np.copy(tempFuture)
firstTag=False
else:
trainX=np.concatenate((trainX,tempCurrent),axis=0)
trainY=np.concatenate((trainY,tempFuture),axis=0)
firstTag=True
for i in range(trainingSize,len(f)):
try:
tempCurrent=np.zeros(f[str(perm[i])]['current'].shape)
tempFuture=np.zeros(f[str(perm[i])]['future'].shape)
except KeyError:
continue
f[str(perm[i])]['current'].read_direct(tempCurrent)
f[str(perm[i])]['future'].read_direct(tempFuture)
if firstTag==True:
testX=np.copy(tempCurrent)
testY=np.copy(tempFuture)
firstTag=False
else:
testX=np.concatenate((testX,tempCurrent),axis=0)
testY=np.concatenate((testY,tempFuture),axis=0)
return trainX, trainY, testX, testY
def convert2Classification(dataY):
result=dataY[:,:,-3:]
result[result==-1]=0
return result
| [
"numpy.copy",
"numpy.logical_and",
"numpy.argmax",
"h5py.File",
"numpy.array",
"numpy.sum",
"numpy.zeros",
"numpy.concatenate",
"numpy.amax"
] | [((1639, 1684), 'numpy.zeros', 'np.zeros', (['(numSample, shape[1], shape[2] - 3)'], {}), '((numSample, shape[1], shape[2] - 3))\n', (1647, 1684), True, 'import numpy as np\n'), ((1692, 1733), 'numpy.zeros', 'np.zeros', (['(numSample, shape[1], shape[2])'], {}), '((numSample, shape[1], shape[2]))\n', (1700, 1733), True, 'import numpy as np\n'), ((1744, 1759), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1752, 1759), True, 'import numpy as np\n'), ((2106, 2142), 'h5py.File', 'h5py.File', (['"""processedData.hdf5"""', '"""r"""'], {}), "('processedData.hdf5', 'r')\n", (2115, 2142), False, 'import h5py\n'), ((2315, 2330), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2323, 2330), True, 'import numpy as np\n'), ((2342, 2390), 'numpy.zeros', 'np.zeros', (['(trainingSize, shape[1], shape[2] - 3)'], {}), '((trainingSize, shape[1], shape[2] - 3))\n', (2350, 2390), True, 'import numpy as np\n'), ((2400, 2444), 'numpy.zeros', 'np.zeros', (['(trainingSize, shape[1], shape[2])'], {}), '((trainingSize, shape[1], shape[2]))\n', (2408, 2444), True, 'import numpy as np\n'), ((3005, 3037), 'numpy.sum', 'np.sum', (['dataY[:, :, -3:]'], {'axis': '(1)'}), '(dataY[:, :, -3:], axis=1)\n', (3011, 3037), True, 'import numpy as np\n'), ((3046, 3079), 'numpy.argmax', 'np.argmax', (['sampleCategory'], {'axis': '(1)'}), '(sampleCategory, axis=1)\n', (3055, 3079), True, 'import numpy as np\n'), ((3313, 3336), 'numpy.zeros', 'np.zeros', (['dataset.shape'], {}), '(dataset.shape)\n', (3321, 3336), True, 'import numpy as np\n'), ((3423, 3464), 'h5py.File', 'h5py.File', (['"""processedDataGroup.hdf5"""', '"""r"""'], {}), "('processedDataGroup.hdf5', 'r')\n", (3432, 3464), False, 'import h5py\n'), ((1212, 1233), 'numpy.array', 'np.array', (['[-1, 1, -1]'], {}), '([-1, 1, -1])\n', (1220, 1233), True, 'import numpy as np\n'), ((3224, 3263), 'numpy.sum', 'np.sum', (['(testCategory != predictCategory)'], {}), '(testCategory != predictCategory)\n', (3230, 3263), True, 'import numpy as np\n'), ((693, 785), 'numpy.logical_and', 'np.logical_and', (['(dataset_[:, 0] >= j * oneMin)', '(dataset_[:, 0] < j * oneMin + 10 * oneMin)'], {}), '(dataset_[:, 0] >= j * oneMin, dataset_[:, 0] < j * oneMin + \n 10 * oneMin)\n', (707, 785), True, 'import numpy as np\n'), ((799, 850), 'numpy.amax', 'np.amax', (['[dataset_[IDX, 1:].shape[0], maxiumLength]'], {}), '([dataset_[IDX, 1:].shape[0], maxiumLength])\n', (806, 850), True, 'import numpy as np\n'), ((1273, 1294), 'numpy.array', 'np.array', (['[-1, -1, 1]'], {}), '([-1, -1, 1])\n', (1281, 1294), True, 'import numpy as np\n'), ((1614, 1626), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1622, 1626), True, 'import numpy as np\n'), ((2254, 2266), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2262, 2266), True, 'import numpy as np\n'), ((3946, 3966), 'numpy.copy', 'np.copy', (['tempCurrent'], {}), '(tempCurrent)\n', (3953, 3966), True, 'import numpy as np\n'), ((3986, 4005), 'numpy.copy', 'np.copy', (['tempFuture'], {}), '(tempFuture)\n', (3993, 4005), True, 'import numpy as np\n'), ((4067, 4112), 'numpy.concatenate', 'np.concatenate', (['(trainX, tempCurrent)'], {'axis': '(0)'}), '((trainX, tempCurrent), axis=0)\n', (4081, 4112), True, 'import numpy as np\n'), ((4131, 4175), 'numpy.concatenate', 'np.concatenate', (['(trainY, tempFuture)'], {'axis': '(0)'}), '((trainY, tempFuture), axis=0)\n', (4145, 4175), True, 'import numpy as np\n'), ((4589, 4609), 'numpy.copy', 'np.copy', (['tempCurrent'], {}), '(tempCurrent)\n', (4596, 4609), True, 'import numpy as np\n'), ((4628, 4647), 'numpy.copy', 'np.copy', (['tempFuture'], {}), '(tempFuture)\n', (4635, 4647), True, 'import numpy as np\n'), ((4707, 4751), 'numpy.concatenate', 'np.concatenate', (['(testX, tempCurrent)'], {'axis': '(0)'}), '((testX, tempCurrent), axis=0)\n', (4721, 4751), True, 'import numpy as np\n'), ((4768, 4811), 'numpy.concatenate', 'np.concatenate', (['(testY, tempFuture)'], {'axis': '(0)'}), '((testY, tempFuture), axis=0)\n', (4782, 4811), True, 'import numpy as np\n'), ((1334, 1355), 'numpy.array', 'np.array', (['[1, -1, -1]'], {}), '([1, -1, -1])\n', (1342, 1355), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
# Dataset for generating sequences, with labels predicting whether the cumulative sum
# is odd/even.
class Dataset:
def __init__(self, sequences_num, sequence_length, sequence_dim, seed, shuffle_batches=True):
sequences = np.zeros(
[sequences_num, sequence_length, sequence_dim], np.int32)
labels = np.zeros([sequences_num, sequence_length, 1], np.bool)
generator = np.random.RandomState(seed)
for i in range(sequences_num):
sequences[i, :, 0] = generator.random_integers(
0, max(1, sequence_dim - 1), size=[sequence_length])
labels[i, :, 0] = np.bitwise_and(np.cumsum(sequences[i, :, 0]), 1)
if sequence_dim > 1:
sequences[i] = np.eye(sequence_dim)[sequences[i, :, 0]]
self._data = {"sequences": sequences.astype(
np.float32), "labels": labels}
self._size = sequences_num
self._shuffler = np.random.RandomState(
seed) if shuffle_batches else None
@property
def data(self):
return self._data
@property
def size(self):
return self._size
def batches(self, size=None):
permutation = self._shuffler.permutation(
self._size) if self._shuffler else np.arange(self._size)
while len(permutation):
batch_size = min(size or np.inf, len(permutation))
batch_perm = permutation[:batch_size]
permutation = permutation[batch_size:]
batch = {}
for key in self._data:
batch[key] = self._data[key][batch_perm]
yield batch
class Network:
def __init__(self, args):
sequences = tf.keras.layers.Input(
shape=[args.sequence_length, args.sequence_dim])
# TODO: Process the sequence using a RNN with cell type `args.rnn_cell`
# and with dimensionality `args.rnn_cell_dim`. Use `return_sequences=True`
# to get outputs for all sequence elements.
#
# Prefer `tf.keras.layers.LSTM` (and analogously for `GRU` and
# `SimpleRNN`) to `tf.keras.layers.RNN` wrapper with
# `tf.keras.layers.LSTMCell` (the former can run transparently on a GPU
# and is also considerably faster on a CPU).
if(args.rnn_cell == "LSTM"):
cell = tf.keras.layers.LSTM(
args.rnn_cell_dim, return_sequences=True)
if(args.rnn_cell == "GRU"):
cell = tf.keras.layers.GRU(
args.rnn_cell_dim, return_sequences=True)
processed = cell(sequences)
# TODO: If `args.hidden_layer` is defined, process the result using
# a ReLU-activated fully connected layer with `args.hidden_layer` units.
if(args.hidden_layer != None):
processed = tf.keras.layers.Dense(
args.hidden_layer, activation="relu")
# TODO: Generate predictions using a fully connected layer
# with one output and `tf.nn.sigmoid` activation.
predictions = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
self.model = tf.keras.Model(inputs=sequences, outputs=predictions)
# TODO: Create an Adam optimizer in self._optimizer
# TODO: Create a suitable loss in self._loss
# TODO: Create two metrics in self._metrics dictionary:
# - "loss", which is tf.metrics.Mean()
# - "accuracy", which is suitable accuracy
# TODO: Create a summary file writer using `tf.summary.create_file_writer`.
# I usually add `flush_millis=10 * 1000` arguments to get the results reasonably quickly.
self._optimizer = tf.keras.optimizers.Adam(),
self._loss = tf.keras.losses.BinaryCrossentropy(),
self._metrics = {"loss": tf.metrics.Mean(
), "accuracy": tf.keras.metrics.BinaryAccuracy(name="accuracy")}
@tf.function
def train_batch(self, batch, clip_gradient):
# TODO: Using a gradient tape, compute
# - probabilities from self.model, passing `training=True` to the model
# - loss, using `self._loss`
# Then, compute `gradients` using `tape.gradients` with the loss and model variables.
#
# If clip_gradient is defined, clip the gradient and compute `gradient_norm` using
# `tf.clip_by_global_norm`. Otherwise, only compute the `gradient_norm` using
# `tf.linalg.global_norm`.
#
# Apply the gradients using the `self._optimizer`
with tf.GradientTape() as tape:
probabilities = self.model(inputs, training=True)
loss = self._loss
gradients = tape.gradient(loss, self.model.variables)
self._optimizer.apply_gradients(zip(gradients, self.model.variables))
# Generate the summaries. Start by setting the current summary step using
# `tf.summary.experimental.set_step(self._optimizer.iterations)`.
# Then, use `with self._writer.as_default():` block and in the block
# - iterate through the self._metrics
# - reset each metric
# - for "loss" metric, apply currently computed `loss`
# - for other metrics, compute their value using the gold labels and predictions
# - then, add a summary using `tf.summary.scalar("train/" + name, metric.result())`
# - Finall, add the gradient_norm using `tf.summary.scalar("train/gradient_norm", gradient_norm)`
tf.summary.experijjouimental.set_step(self._optimizer.iterations)
with self._writer.as_default():
for metric in self._metrics:
metric.reset()
def train_epoch(self, dataset, args):
for batch in dataset.batches(args.batch_size):
self.train_batch(batch, args.clip_gradient)
@tf.function
def predict_batch(self, batch):
return self.model(batch["sequences"], training=False)
def evaluate(self, dataset, args):
# TODO: Similarly to training summaries, compute the metrics
# averaged over all `dataset`.
#
# Start by resetting all metrics in `self._metrics`.
#
# Then iterate over all batches in `dataset.batches(args.batch_size)`.
# - For each, predict probabilities using `self.predict_batch`.
# - Compute loss of the batch
# - Update the metrics (the "loss" metric uses current loss, other are computed
# using the gold labels and the predictions)
#
# Finally, create a dictionary `metrics` with results, using names and values in `self._metrics`.
with self._writer.as_default():
for name, metric in metrics.items():
tf.summary.scalar("test/" + name, metric)
return metrics
if __name__ == "__main__":
import argparse
import datetime
import os
import re
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=16,
type=int, help="Batch size.")
parser.add_argument("--clip_gradient", default=None, type=lambda x: None if x ==
"None" else float(x), help="Gradient clipping norm.")
parser.add_argument("--hidden_layer", default=None, type=lambda x: None if x ==
"None" else int(x), help="Dense layer after RNN.")
parser.add_argument("--epochs", default=20, type=int,
help="Number of epochs.")
parser.add_argument("--rnn_cell", default="LSTM",
type=str, help="RNN cell type.")
parser.add_argument("--rnn_cell_dim", default=10,
type=int, help="RNN cell dimension.")
parser.add_argument("--sequence_dim", default=1, type=int,
help="Sequence element dimension.")
parser.add_argument("--sequence_length", default=50,
type=int, help="Sequence length.")
parser.add_argument("--recodex", default=False,
action="store_true", help="Evaluation in ReCodEx.")
parser.add_argument("--test_sequences", default=1000,
type=int, help="Number of testing sequences.")
parser.add_argument("--threads", default=1, type=int,
help="Maximum number of threads to use.")
parser.add_argument("--train_sequences", default=10000,
type=int, help="Number of training sequences.")
args = parser.parse_args()
# Fix random seeds and number of threads
np.random.seed(42)
tf.random.set_seed(42)
if args.recodex:
tf.keras.utils.get_custom_objects(
)["glorot_uniform"] = lambda: tf.keras.initializers.glorot_uniform(seed=42)
tf.keras.utils.get_custom_objects(
)["orthogonal"] = lambda: tf.keras.initializers.orthogonal(seed=42)
tf.config.threading.set_inter_op_parallelism_threads(args.threads)
tf.config.threading.set_intra_op_parallelism_threads(args.threads)
# Create logdir name
args.logdir = os.path.join("logs", "{}-{}-{}".format(
os.path.basename(__file__),
datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
",".join(("{}={}".format(re.sub(
"(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
))
# Create the data
train = Dataset(args.train_sequences, args.sequence_length,
args.sequence_dim, seed=42, shuffle_batches=True)
test = Dataset(args.test_sequences, args.sequence_length,
args.sequence_dim, seed=43, shuffle_batches=False)
# Create the network and train
network = Network(args)
for epoch in range(args.epochs):
network.train_epoch(train, args)
metrics = network.evaluate(test, args)
with open("sequence_classification.out", "w") as out_file:
print("{:.2f}".format(100 * metrics["accuracy"]), file=out_file)
| [
"tensorflow.metrics.Mean",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"numpy.cumsum",
"numpy.arange",
"numpy.random.RandomState",
"tensorflow.keras.layers.Input",
"argparse.ArgumentParser",
"numpy.random.seed",
"tensorflow.config.threading.set_inter_op_parallelism_threads",
"ten... | [((6914, 6939), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6937, 6939), False, 'import argparse\n'), ((8536, 8554), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (8550, 8554), True, 'import numpy as np\n'), ((8559, 8581), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (8577, 8581), True, 'import tensorflow as tf\n'), ((8853, 8919), 'tensorflow.config.threading.set_inter_op_parallelism_threads', 'tf.config.threading.set_inter_op_parallelism_threads', (['args.threads'], {}), '(args.threads)\n', (8905, 8919), True, 'import tensorflow as tf\n'), ((8924, 8990), 'tensorflow.config.threading.set_intra_op_parallelism_threads', 'tf.config.threading.set_intra_op_parallelism_threads', (['args.threads'], {}), '(args.threads)\n', (8976, 8990), True, 'import tensorflow as tf\n'), ((303, 369), 'numpy.zeros', 'np.zeros', (['[sequences_num, sequence_length, sequence_dim]', 'np.int32'], {}), '([sequences_num, sequence_length, sequence_dim], np.int32)\n', (311, 369), True, 'import numpy as np\n'), ((400, 454), 'numpy.zeros', 'np.zeros', (['[sequences_num, sequence_length, 1]', 'np.bool'], {}), '([sequences_num, sequence_length, 1], np.bool)\n', (408, 454), True, 'import numpy as np\n'), ((475, 502), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (496, 502), True, 'import numpy as np\n'), ((1761, 1831), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '[args.sequence_length, args.sequence_dim]'}), '(shape=[args.sequence_length, args.sequence_dim])\n', (1782, 1831), True, 'import tensorflow as tf\n'), ((3091, 3141), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (3112, 3141), True, 'import tensorflow as tf\n'), ((3164, 3217), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'sequences', 'outputs': 'predictions'}), '(inputs=sequences, outputs=predictions)\n', (3178, 3217), True, 'import tensorflow as tf\n'), ((5485, 5550), 'tensorflow.summary.experijjouimental.set_step', 'tf.summary.experijjouimental.set_step', (['self._optimizer.iterations'], {}), '(self._optimizer.iterations)\n', (5522, 5550), True, 'import tensorflow as tf\n'), ((1012, 1039), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1033, 1039), True, 'import numpy as np\n'), ((1336, 1357), 'numpy.arange', 'np.arange', (['self._size'], {}), '(self._size)\n', (1345, 1357), True, 'import numpy as np\n'), ((2392, 2454), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['args.rnn_cell_dim'], {'return_sequences': '(True)'}), '(args.rnn_cell_dim, return_sequences=True)\n', (2412, 2454), True, 'import tensorflow as tf\n'), ((2527, 2588), 'tensorflow.keras.layers.GRU', 'tf.keras.layers.GRU', (['args.rnn_cell_dim'], {'return_sequences': '(True)'}), '(args.rnn_cell_dim, return_sequences=True)\n', (2546, 2588), True, 'import tensorflow as tf\n'), ((2865, 2924), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['args.hidden_layer'], {'activation': '"""relu"""'}), "(args.hidden_layer, activation='relu')\n", (2886, 2924), True, 'import tensorflow as tf\n'), ((3705, 3731), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (3729, 3731), True, 'import tensorflow as tf\n'), ((3754, 3790), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), '()\n', (3788, 3790), True, 'import tensorflow as tf\n'), ((3825, 3842), 'tensorflow.metrics.Mean', 'tf.metrics.Mean', ([], {}), '()\n', (3840, 3842), True, 'import tensorflow as tf\n'), ((3865, 3913), 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ([], {'name': '"""accuracy"""'}), "(name='accuracy')\n", (3896, 3913), True, 'import tensorflow as tf\n'), ((4544, 4561), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4559, 4561), True, 'import tensorflow as tf\n'), ((8611, 8646), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (8644, 8646), True, 'import tensorflow as tf\n'), ((8684, 8729), 'tensorflow.keras.initializers.glorot_uniform', 'tf.keras.initializers.glorot_uniform', ([], {'seed': '(42)'}), '(seed=42)\n', (8720, 8729), True, 'import tensorflow as tf\n'), ((8738, 8773), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (8771, 8773), True, 'import tensorflow as tf\n'), ((8807, 8848), 'tensorflow.keras.initializers.orthogonal', 'tf.keras.initializers.orthogonal', ([], {'seed': '(42)'}), '(seed=42)\n', (8839, 8848), True, 'import tensorflow as tf\n'), ((9083, 9109), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (9099, 9109), False, 'import os\n'), ((716, 745), 'numpy.cumsum', 'np.cumsum', (['sequences[i, :, 0]'], {}), '(sequences[i, :, 0])\n', (725, 745), True, 'import numpy as np\n'), ((6715, 6756), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('test/' + name)", 'metric'], {}), "('test/' + name, metric)\n", (6732, 6756), True, 'import tensorflow as tf\n'), ((814, 834), 'numpy.eye', 'np.eye', (['sequence_dim'], {}), '(sequence_dim)\n', (820, 834), True, 'import numpy as np\n'), ((9119, 9142), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9140, 9142), False, 'import datetime\n'), ((9205, 9237), 're.sub', 're.sub', (['"""(.)[^_]*_?"""', '"""\\\\1"""', 'key'], {}), "('(.)[^_]*_?', '\\\\1', key)\n", (9211, 9237), False, 'import re\n')] |
import os
import sys
import numpy as np
import math
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from agent import Agent
# Add the network folder to sys.path
REGRAPHNET_DIR = os.path.join(os.path.dirname(__file__), "..", "regraphnet")
REGRAPHNET_SRC_DIR = os.path.join(os.path.dirname(__file__), "..", "regraphnet", "src")
if REGRAPHNET_SRC_DIR not in sys.path:
sys.path.append(REGRAPHNET_SRC_DIR)
from train import *
class AgentSupervised(Agent):
def __init__(self, use_gcn=True, use_aug=False):
super().__init__()
self.model = NodePointer(nfeat=708, nhid=256, Use_GCN=use_gcn)
regraphnet_dir = Path(REGRAPHNET_DIR)
if use_gcn:
if use_aug:
checkpoint_file = regraphnet_dir / "ckpt/model_mpn_aug.ckpt"
else:
checkpoint_file = regraphnet_dir / "ckpt/model_mpn.ckpt"
else:
if use_aug:
checkpoint_file = regraphnet_dir / "ckpt/model_mlp_aug.ckpt"
else:
checkpoint_file = regraphnet_dir / "ckpt/model_mlp.ckpt"
print(f"Using {checkpoint_file.name}")
assert checkpoint_file.exists()
# Using CUDA is slower, so we use cpu
# Specify cpu to map to
self.model.load_state_dict(
torch.load(checkpoint_file, map_location=torch.device("cpu"))
)
def get_actions_probabilities(self, current_graph, target_graph):
super().get_actions_probabilities(current_graph, target_graph)
graph_pair_formatted, node_names = self.load_graph_pair(target_graph, current_graph)
actions_sorted, probs_sorted = self.inference(graph_pair_formatted, node_names)
return np.array(actions_sorted), np.array(probs_sorted)
def load_graph_pair(self, data_tar, data_cur):
adj_tar, features_tar = format_graph_data(data_tar, self.bounding_box)
# If the current graph is empty
if len(data_cur["nodes"]) == 0:
adj_cur, features_cur = torch.zeros((0)), torch.zeros((0))
else:
adj_cur, features_cur = format_graph_data(
data_cur, self.bounding_box
)
graph_pair_formatted = [adj_tar, features_tar, adj_cur, features_cur]
node_names = [x["id"] for x in data_tar["nodes"]]
return graph_pair_formatted, node_names
def inference(self, graph_pair_formatted, node_names):
self.model.eval()
num_nodes = graph_pair_formatted[1].size()[0]
output_end_conditioned = np.zeros((num_nodes, num_nodes))
with torch.no_grad():
graph_pair_formatted.append(0)
output_start, _, output_op = self.model(
graph_pair_formatted, use_gpu=False)
output_start = F.softmax(output_start.view(1, -1), dim=1)
output_op = F.softmax(output_op, dim=1)
for i in range(num_nodes):
graph_pair_formatted[4] = i
_, output_end, _ = self.model(graph_pair_formatted, use_gpu=False)
output_end = F.softmax(output_end.view(1, -1), dim=1)
output_end_conditioned[i, :] = output_end.data.numpy()
ps = [
output_start.data.numpy()[0, :],
output_end_conditioned,
output_op.data.numpy()[0, :]
]
# enumerate all actions
actions, probs = [], []
for i in range(len(node_names)):
for j in range(len(node_names)):
for k in range(len(self.operations)):
actions.append({
"start_face": node_names[i],
"end_face": node_names[j],
"operation": self.operations[k]
})
probs.append(ps[0][i]*ps[1][i, j]*ps[2][k])
return actions, probs
| [
"pathlib.Path",
"numpy.array",
"os.path.dirname",
"numpy.zeros",
"torch.zeros",
"torch.no_grad",
"sys.path.append",
"torch.nn.functional.softmax",
"torch.device"
] | [((237, 262), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (252, 262), False, 'import os\n'), ((318, 343), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (333, 343), False, 'import os\n'), ((415, 450), 'sys.path.append', 'sys.path.append', (['REGRAPHNET_SRC_DIR'], {}), '(REGRAPHNET_SRC_DIR)\n', (430, 450), False, 'import sys\n'), ((681, 701), 'pathlib.Path', 'Path', (['REGRAPHNET_DIR'], {}), '(REGRAPHNET_DIR)\n', (685, 701), False, 'from pathlib import Path\n'), ((2558, 2590), 'numpy.zeros', 'np.zeros', (['(num_nodes, num_nodes)'], {}), '((num_nodes, num_nodes))\n', (2566, 2590), True, 'import numpy as np\n'), ((1743, 1767), 'numpy.array', 'np.array', (['actions_sorted'], {}), '(actions_sorted)\n', (1751, 1767), True, 'import numpy as np\n'), ((1769, 1791), 'numpy.array', 'np.array', (['probs_sorted'], {}), '(probs_sorted)\n', (1777, 1791), True, 'import numpy as np\n'), ((2604, 2619), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2617, 2619), False, 'import torch\n'), ((2864, 2891), 'torch.nn.functional.softmax', 'F.softmax', (['output_op'], {'dim': '(1)'}), '(output_op, dim=1)\n', (2873, 2891), True, 'import torch.nn.functional as F\n'), ((2039, 2053), 'torch.zeros', 'torch.zeros', (['(0)'], {}), '(0)\n', (2050, 2053), False, 'import torch\n'), ((2057, 2071), 'torch.zeros', 'torch.zeros', (['(0)'], {}), '(0)\n', (2068, 2071), False, 'import torch\n'), ((1374, 1393), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1386, 1393), False, 'import torch\n')] |
import numpy as np
import operator
def calcshannonEnt(dataset):
# 香农熵计算
numEntries = len(dataset)
labelCounts = {}
for featVec in dataset:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=1
else:
labelCounts[currentLabel]+=1
shannonEnt =0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob*np.log2(prob)
return shannonEnt
def createDataSet():
dataSet = [[1,1,'yes'],
[1,1,'yes'],
[1,0,'no'],
[0,1,'no'],
[0,1,'no']]
labels = ['no surfacing','flippers']
return dataSet,labels
def splitDataSet(dataSet,axis,value):
# 将数据集划分开
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec =featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
# 选择最好的特征划分数据
numFeatures = len(dataSet[0])-1
baseEntropy = calcshannonEnt(dataSet)
bestinfoGain = 0.0
bestfeature=-1
for i in range(numFeatures):
# 创建唯一的分类标签列表
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
# 计算每种划分方式的信息熵
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcshannonEnt(subDataSet)
infoGain = baseEntropy -newEntropy
if(infoGain>bestinfoGain):
# 计算最好的信息增益
bestinfoGain =infoGain
bestfeature = i
return bestfeature
def majorityCnt(classList):
# 返回最多的类别
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 1
else:
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def creatTree(dataSet,labels):
# 算得决策树
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList): # 类别完全相同时停止划分
return classList[0]
if len(dataSet) ==1: # 遍历完所有特征时返回出现次数最多的
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
# 得到列表包含的所有属性值
del (labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] # Python的列表是引用类型,对引用的修改相当于对原列表修改,因此需要重新复制一份原列表
myTree[bestFeatLabel][value] = creatTree(splitDataSet(dataSet,bestFeat,value),subLabels)
return myTree
def classify(inputTree,featLabels,testVec):
firstStr = list(inputTree.keys())[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr) # 将标签字符串换为索引
key = testVec[featIndex]
valueOfFeat = secondDict[key]
if isinstance(valueOfFeat, dict):
classLabel = classify(valueOfFeat, featLabels, testVec)
else:
classLabel = valueOfFeat
return classLabel
def storeTree(inputTree,filename):
import pickle
fw = open(filename,'w')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename)
return pickle.load(fr)
import matplotlib.pyplot as plt
# 定义文本框和箭头格式
decisionNode = dict(boxstyle='sawtooth',fc= '0.8')
leafNode = dict(boxstyle='round4',fc='0.8')
arrow_args = dict(arrowstyle='<-')
# 绘制带箭头的注解
def plotNode(nodeTxt,centerPt,parentPt,nodeType):
# annotate(注解文字,xy=箭头起点,xytext=箭头终点,va=垂直居中,ha=水平居中,bbox=文字外框类型,arrowprops=箭头类型属性)
createPlot.ax1.annotate(nodeTxt,xy=parentPt,xytext=centerPt,xycoords='axes fraction',va='center',ha='center',bbox=nodeType,arrowprops=arrow_args)
def createPlot():
fig = plt.figure(1,facecolor='white')
fig.clf()
createPlot.ax1 = plt.subplot(111,frameon=False)
plotNode('a decision node',(0.5,0.2),(0.1,0.5),decisionNode)
plotNode('a leaf node',(0.8,0.1),(0.3,0.8),leafNode)
plt.show()
def getNumLeafs(myTree):
# 获得树的叶节点
numLeafs = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
numLeafs+= getNumLeafs(secondDict[key])
else:
numLeafs+=1
return numLeafs
def getTreeDepth(myTree):
# 获得树的深度
maxDepth = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
thisDepth = 1+ getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth>maxDepth:
maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
# 快速初始化数据
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
def plotMidText(cntrPt,parentPt,txtString):
# 在父子节点中间填充文本信息
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid,txtString)
def plotTree(myTree,parentPt,nodeTxt):
# 计算宽和高
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = list(myTree.keys())[0]
cntrPt = (plotTree.xOff +(1.0 + float(numLeafs))/2.0/plotTree.totalW,plotTree.yOff)
plotMidText(cntrPt,parentPt,nodeTxt) # 标记子节点属性值
plotNode(firstStr,cntrPt,parentPt,decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff -1.0/plotTree.totalD # 减少y偏移
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
plotTree(secondDict[key],cntrPt,str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key],(plotTree.xOff,plotTree.yOff),cntrPt,leafNode)
plotMidText((plotTree.xOff,plotTree.yOff),cntrPt,str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
def createPlot(inTree):
fig = plt.figure(1,facecolor='white')
fig.clf()
axprops = dict(xticks=[],yticks=[])
createPlot.ax1 = plt.subplot(111,frameon=False,**axprops)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW
plotTree.yOff = 1.0
plotTree(inTree,(0.5,1.0),'')
plt.show()
if __name__=='__main__':
fr = open('lenses.txt')
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
lensesLabels = ['age', 'prescript', 'astigmatic', 'tearRate']
lensesTree = creatTree(lenses,lensesLabels)
print(lensesTree)
createPlot(lensesTree) | [
"pickle.dump",
"pickle.load",
"matplotlib.pyplot.figure",
"operator.itemgetter",
"numpy.log2",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((3383, 3409), 'pickle.dump', 'pickle.dump', (['inputTree', 'fw'], {}), '(inputTree, fw)\n', (3394, 3409), False, 'import pickle\n'), ((3503, 3518), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (3514, 3518), False, 'import pickle\n'), ((4025, 4057), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'facecolor': '"""white"""'}), "(1, facecolor='white')\n", (4035, 4057), True, 'import matplotlib.pyplot as plt\n'), ((4092, 4123), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'frameon': '(False)'}), '(111, frameon=False)\n', (4103, 4123), True, 'import matplotlib.pyplot as plt\n'), ((4249, 4259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4257, 4259), True, 'import matplotlib.pyplot as plt\n'), ((6379, 6411), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'facecolor': '"""white"""'}), "(1, facecolor='white')\n", (6389, 6411), True, 'import matplotlib.pyplot as plt\n'), ((6486, 6528), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'frameon': '(False)'}), '(111, frameon=False, **axprops)\n', (6497, 6528), True, 'import matplotlib.pyplot as plt\n'), ((6729, 6739), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6737, 6739), True, 'import matplotlib.pyplot as plt\n'), ((464, 477), 'numpy.log2', 'np.log2', (['prob'], {}), '(prob)\n', (471, 477), True, 'import numpy as np\n'), ((2055, 2077), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2074, 2077), False, 'import operator\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Little test to explore the intuition behind UCB.
Plot how the estimated action value evolves over time.
We find that the time between draws of the suboptimal arms becomes larger and
larger (depending on c and noise variance), while the upper bound estimate of
the best arm becomes increasingly accurate.
Author: <NAME>
License: See LICENSE file
Copyright: 2020, <NAME>
"""
import argparse
import json
import tqdm
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="JSON file to write to")
parser.add_argument(
"-s", "--seed", help="Random seed", default=42, type=int
)
return parser.parse_args()
def main():
args = parse_args()
np.random.seed(args.seed)
T = 1000
c = 1.0
n_arms = 3
noisevar = 0.1
Nt = {k: 0 for k in range(1, n_arms + 1)}
means = {k: float(k) for k in range(1, n_arms + 1)}
obs = {k: [] for k in range(1, n_arms + 1)}
Av = np.zeros((T, n_arms))
# run each arm at least once
for a in Nt:
r = means[a] + np.random.normal(0, noisevar)
obs[a].append(r)
Av[0, a - 1] = r
Nt[a] += 1
for t in tqdm.trange(2, T + 1):
for i, a in enumerate(means):
# noisy reward
r = means[a] + np.random.normal(0, noisevar)
obs[a].append(r)
estimate = np.mean(obs[a]) + c * np.sqrt(np.log(t) / Nt[a])
Av[t - 1, i] = estimate
amax = np.argmax(Av[t - 1, :]) + 1
Nt[amax] += 1
print(f"Number of draws per arm: {Nt}")
if args.output:
out = {"meta": {"xlabel": "Time", "ylabel": "Estimated action value"}}
data = {"X": list(range(1, T + 1))}
series = []
for i, a in enumerate(means):
series.append({"name": f"μ = {a}", "values": list(Av[:, i])})
data["series"] = series
out["data"] = data
with open(args.output, "w") as fp:
json.dump(out, fp)
else:
for i, a in enumerate(means):
plt.plot(Av[:, i], label=f"$\mu = {a}$")
plt.ylabel("Estimated action value")
plt.xlabel("Time")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| [
"numpy.random.normal",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"json.dump",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.log",
"numpy.zeros",
"numpy.random.seed",
"tqdm.trange",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show... | [((556, 581), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (579, 581), False, 'import argparse\n'), ((823, 848), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (837, 848), True, 'import numpy as np\n'), ((1070, 1091), 'numpy.zeros', 'np.zeros', (['(T, n_arms)'], {}), '((T, n_arms))\n', (1078, 1091), True, 'import numpy as np\n'), ((1279, 1300), 'tqdm.trange', 'tqdm.trange', (['(2)', '(T + 1)'], {}), '(2, T + 1)\n', (1290, 1300), False, 'import tqdm\n'), ((2190, 2226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Estimated action value"""'], {}), "('Estimated action value')\n", (2200, 2226), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2245, 2253), True, 'import matplotlib.pyplot as plt\n'), ((2262, 2274), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2272, 2274), True, 'import matplotlib.pyplot as plt\n'), ((2283, 2293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2291, 2293), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1195), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noisevar'], {}), '(0, noisevar)\n', (1182, 1195), True, 'import numpy as np\n'), ((1577, 1600), 'numpy.argmax', 'np.argmax', (['Av[t - 1, :]'], {}), '(Av[t - 1, :])\n', (1586, 1600), True, 'import numpy as np\n'), ((2062, 2080), 'json.dump', 'json.dump', (['out', 'fp'], {}), '(out, fp)\n', (2071, 2080), False, 'import json\n'), ((2141, 2182), 'matplotlib.pyplot.plot', 'plt.plot', (['Av[:, i]'], {'label': 'f"""$\\\\mu = {a}$"""'}), "(Av[:, i], label=f'$\\\\mu = {a}$')\n", (2149, 2182), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1423), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noisevar'], {}), '(0, noisevar)\n', (1410, 1423), True, 'import numpy as np\n'), ((1476, 1491), 'numpy.mean', 'np.mean', (['obs[a]'], {}), '(obs[a])\n', (1483, 1491), True, 'import numpy as np\n'), ((1506, 1515), 'numpy.log', 'np.log', (['t'], {}), '(t)\n', (1512, 1515), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from matplotlib import pyplot as plt
image = cv2.imread('/home/pi/book/dataset/ruler.512.tiff', 1)
input = cv2.cvtColor(image, cv2.COLOR_BGR2RGB )
rows, cols, channels = input.shape
points1 = np.float32([[0, 0], [400, 0], [0, 400], [400, 400]])
points2 = np.float32([[0,0], [300, 0], [0, 300], [300, 300]])
P = cv2.getPerspectiveTransform(points1, points2)
output = cv2.warpPerspective(input, P, (300, 300))
plt.subplot(121)
plt.imshow(input)
plt.title('Input Image')
plt.subplot(122)
plt.imshow(output)
plt.title('Perspective Transform')
plt.show()
| [
"matplotlib.pyplot.imshow",
"cv2.getPerspectiveTransform",
"matplotlib.pyplot.subplot",
"cv2.warpPerspective",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"cv2.imread",
"numpy.float32",
"matplotlib.pyplot.show"
] | [((75, 128), 'cv2.imread', 'cv2.imread', (['"""/home/pi/book/dataset/ruler.512.tiff"""', '(1)'], {}), "('/home/pi/book/dataset/ruler.512.tiff', 1)\n", (85, 128), False, 'import cv2\n'), ((137, 175), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (149, 175), False, 'import cv2\n'), ((222, 274), 'numpy.float32', 'np.float32', (['[[0, 0], [400, 0], [0, 400], [400, 400]]'], {}), '([[0, 0], [400, 0], [0, 400], [400, 400]])\n', (232, 274), True, 'import numpy as np\n'), ((285, 337), 'numpy.float32', 'np.float32', (['[[0, 0], [300, 0], [0, 300], [300, 300]]'], {}), '([[0, 0], [300, 0], [0, 300], [300, 300]])\n', (295, 337), True, 'import numpy as np\n'), ((341, 386), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['points1', 'points2'], {}), '(points1, points2)\n', (368, 386), False, 'import cv2\n'), ((396, 437), 'cv2.warpPerspective', 'cv2.warpPerspective', (['input', 'P', '(300, 300)'], {}), '(input, P, (300, 300))\n', (415, 437), False, 'import cv2\n'), ((438, 454), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (449, 454), True, 'from matplotlib import pyplot as plt\n'), ((455, 472), 'matplotlib.pyplot.imshow', 'plt.imshow', (['input'], {}), '(input)\n', (465, 472), True, 'from matplotlib import pyplot as plt\n'), ((473, 497), 'matplotlib.pyplot.title', 'plt.title', (['"""Input Image"""'], {}), "('Input Image')\n", (482, 497), True, 'from matplotlib import pyplot as plt\n'), ((498, 514), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (509, 514), True, 'from matplotlib import pyplot as plt\n'), ((515, 533), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output'], {}), '(output)\n', (525, 533), True, 'from matplotlib import pyplot as plt\n'), ((534, 568), 'matplotlib.pyplot.title', 'plt.title', (['"""Perspective Transform"""'], {}), "('Perspective Transform')\n", (543, 568), True, 'from matplotlib import pyplot as plt\n'), ((569, 579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (577, 579), True, 'from matplotlib import pyplot as plt\n')] |
import numpy
from matplotlib import pyplot
def bisection(f, interval, max_steps=100, tol=1e-10):
x_lo, x_hi = interval
x = (x_lo + x_hi)/2
f_lo = f(x_lo)
f_hi = f(x_hi)
fx = f(x)
steps = 0
while steps < max_steps and abs(fx) > tol and (x_hi - x_lo) > tol:
steps = steps + 1
if fx*f_hi < 0: # Root lies in right-hand half
x_lo = x
f_lo = fx
else: # Root lies in left-hand half
x_hi = x
f_hi = fx
x = (x_lo + x_hi) / 2
fx = f(x)
print("Nsteps", steps)
return x
if __name__=="__main__":
def f(x):
return numpy.exp(x) + x - 2
def g(x):
return numpy.sin(x**2) - 0.1*x
interval = [0,1]
s = bisection(f, interval)
print("s = ", s, "f(s) = ", f(s))
x = numpy.linspace(0, 10, 1000)
pyplot.plot(x, g(x))
pyplot.show()
s = bisection(g, [1,10])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,9])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,8.5])
print("s = ", s, "g(s) = ", g(s))
s = bisection(g, [1,8])
print("s = ", s, "g(s) = ", g(s))
| [
"numpy.sin",
"numpy.linspace",
"numpy.exp",
"matplotlib.pyplot.show"
] | [((835, 862), 'numpy.linspace', 'numpy.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (849, 862), False, 'import numpy\n'), ((892, 905), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (903, 905), False, 'from matplotlib import pyplot\n'), ((699, 716), 'numpy.sin', 'numpy.sin', (['(x ** 2)'], {}), '(x ** 2)\n', (708, 716), False, 'import numpy\n'), ((649, 661), 'numpy.exp', 'numpy.exp', (['x'], {}), '(x)\n', (658, 661), False, 'import numpy\n')] |
"""
Brief: Genetic algorithm demo for eight queens game.
Author: <NAME>
Date: 2020/3/22
"""
import argparse
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpt
def calc_conflict(location_0, location_1):
if abs(location_0[0] - location_1[0]) != abs(location_0[1] - location_1[1]) \
and location_0[0] != location_1[0] and location_0[1] != location_1[1]:
return True
return False
def calc_resilience(unity):
length = len(unity)
result = 0
for ri in range(length):
for ci in range(ri + 1, length):
if calc_conflict([ri, unity[ri]], [ci, unity[ci]]):
result = result + 1
return result
def calc_best_unity(group_list):
best_result = -1
index = 0
for k in range(len(group_list)):
result = calc_resilience(group_list[k])
if result > best_result:
best_result = result
index = k
return group_list[index]
def random_group_list(num_unity=16, num_queen=8):
group_list = [[0] * num_queen] * num_unity
for k in range(num_unity):
group_list[k] = list(np.random.choice(num_queen, size=num_queen, replace=False))
return group_list
def calc_resiliences(group_list):
num = len(group_list)
result = [0] * num
for k in range(num):
result[k] = calc_resilience(group_list[k])
return result
def normalize(resiliences):
num = len(resiliences)
v = sum(resiliences) + 1e-7
result = [0.0] * num
for k in range(num):
result[k] = resiliences[k] / v
return result
def calc_cumulate(probabilities):
num = len(probabilities)
result = [0.0] * num
result[0] = probabilities[0]
for k in range(1, num):
result[k] = result[k - 1] + probabilities[k]
return result
def random_match(cumulate):
rand = random.random()
index = 0
for k in range(len(cumulate)):
if rand < cumulate[k]:
index = k
break
return index
def random_select(group_list):
num = len(group_list)
rows = len(group_list[0])
result = [[0] * rows] * num
resiliences = calc_resiliences(group_list)
probabilities = normalize(resiliences)
cumulate = calc_cumulate(probabilities)
for k in range(num):
index = random_match(cumulate)
result[k] = group_list[index][:]
return result
def compete_select(group_list):
num = len(group_list)
rows = len(group_list[0])
result = [[0] * rows] * num
resiliences = calc_resiliences(group_list)
# top_k = np.array(resiliences).argsort()[::-1]
for k in range(num):
index = np.random.choice(num, size=2, replace=False)
winner = index[0]
if resiliences[index[0]] < resiliences[index[1]]:
winner = index[1]
result[k] = group_list[winner][:]
return result
def do_with_probability(probability):
if random.random() < probability:
return True
return False
def crossover(group_list, probability):
num = len(group_list)
sa = len(group_list[0])
result = [[0] * sa] * num
for k in range(num):
result[k] = group_list[k][:]
if do_with_probability(probability):
index = np.random.choice(num, size=num, replace=False)
for k in range(num // 2):
parent_0 = group_list[index[k * 2 + 0]]
parent_1 = group_list[index[k * 2 + 1]]
child_0 = parent_0[:]
child_1 = parent_1[:]
rand = np.random.choice(sa, size=2, replace=False)
if rand[0] > rand[1]:
t = rand[0]
rand[0] = rand[1]
rand[1] = t
sz = rand[1] - rand[0] + 1
idx = np.random.choice(sz, size=sz, replace=False)
for n in range(0, rand[1] - rand[0] + 1):
child_0[n + rand[0]] = parent_0[idx[n] + rand[0]]
child_1[n + rand[0]] = parent_1[idx[n] + rand[0]]
result[k * 2 + 0] = child_0[:]
result[k * 2 + 1] = child_1[:]
return result
def mutate(group_list, probability):
num = len(group_list)
sa = len(group_list[0])
result = [[0] * sa] * num
for k in range(num):
result[k] = group_list[k][:]
if do_with_probability(probability):
parent = group_list[k]
child = parent[:]
index = np.random.choice(sa, size=2, replace=False)
child[index[0]] = parent[index[1]]
child[index[1]] = parent[index[0]]
result[k] = child[:]
return result
def draw_circle(axes, row, col, color='r'):
center = (0.5, 0.5)
circle = mpt.Circle(center, radius=0.4, color=color, fill=True)
axes[row, col].add_patch(circle)
def draw_chessboard(axes, rows=8, cols=8):
for y in range(0, rows):
for x in range(0, cols):
# set the background of canvas.
if y % 2 == 0 and x % 2 == 0:
axes[y, x].set_facecolor('k')
if y % 2 != 0 and x % 2 != 0:
axes[y, x].set_facecolor('k')
# close all the figure xes.
axes[y, x].set_xticks([])
axes[y, x].set_yticks([])
# set all the edge labels.
if x == 0:
axes[y, x].set_ylabel(y + 1, rotation='horizontal', ha='right', va='center', fontsize=12)
if y == 0:
axes[y, x].set_title(x + 1, ha='center', va='center', fontsize=12)
def update_monitor(axes, unity, last_unity, color='r', rows=8, cols=8):
# clear all the last locations.
for k in range(rows):
axes[k, last_unity[k]].cla()
draw_chessboard(axes, rows, cols)
# redraw all the new locations.
for k in range(len(unity)):
draw_circle(axes, row=k, col=unity[k], color=color)
def start_monitor(unities=16, p_crossover=0.5, p_mutate=0.05, iterations=100):
rows = cols = 8
fig, axes = plt.subplots(rows, cols, figsize=(5, 5))
# set window title and figure title.
fig.canvas.set_window_title('Genetic Algorithm')
fig.suptitle('Eight Queens', fontsize=16)
plt.ion()
group_list = random_group_list(num_unity=unities, num_queen=rows)
last_unity = [0] * rows
for k in range(iterations):
unity = calc_best_unity(group_list)
resilience = calc_resilience(unity)
print('unity: ' + str(unity))
print('resilience: ' + str(resilience))
if resilience == 28:
color = 'b'
else:
color = 'r'
update_monitor(axes, unity, last_unity, color, rows, cols)
if resilience == 28:
break
else:
group_list = compete_select(group_list)
group_list = crossover(group_list, p_crossover)
group_list = mutate(group_list, p_mutate)
last_unity = unity[:]
plt.pause(0.25)
# stay until close the window manually.
print('Monitor has done.')
# stop and close the window.
plt.pause(0)
# plt.ioff()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arguments of genetic algorithm.')
parser.add_argument('--unities', type=int, default=16, help='Number of group members.')
parser.add_argument('--p_crossover', type=float, default=0.5, help='Probability of gene crossover.')
parser.add_argument('--p_mutate', type=float, default=0.05, help='Probability of gene mutation.')
parser.add_argument('--iterations', type=int, default=100, help='Iterations of algorithm.')
args = parser.parse_args()
if args.unities < 1 or args.unities > 1000:
print('Bad argument --unities, too small or too large')
exit(0)
if args.p_crossover < 0.0 or args.p_crossover > 1.0:
print('Bad argument --p_crossover, too small or too large')
exit(0)
if args.p_mutate < 0.0 or args.p_mutate > 1.0:
print('Bad argument --p_mutate, too small or too large')
exit(0)
if args.iterations < 1 or args.iterations > 1000:
print('Bad argument --iterations, too small or too large')
exit(0)
start_monitor(unities=args.unities, p_crossover=args.p_crossover,
p_mutate=args.p_mutate, iterations=args.iterations)
| [
"argparse.ArgumentParser",
"numpy.random.choice",
"matplotlib.pyplot.pause",
"random.random",
"matplotlib.patches.Circle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ion"
] | [((1853, 1868), 'random.random', 'random.random', ([], {}), '()\n', (1866, 1868), False, 'import random\n'), ((4638, 4692), 'matplotlib.patches.Circle', 'mpt.Circle', (['center'], {'radius': '(0.4)', 'color': 'color', 'fill': '(True)'}), '(center, radius=0.4, color=color, fill=True)\n', (4648, 4692), True, 'import matplotlib.patches as mpt\n'), ((5901, 5941), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(5, 5)'}), '(rows, cols, figsize=(5, 5))\n', (5913, 5941), True, 'import matplotlib.pyplot as plt\n'), ((6086, 6095), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (6093, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6952, 6964), 'matplotlib.pyplot.pause', 'plt.pause', (['(0)'], {}), '(0)\n', (6961, 6964), True, 'import matplotlib.pyplot as plt\n'), ((7023, 7093), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Arguments of genetic algorithm."""'}), "(description='Arguments of genetic algorithm.')\n", (7046, 7093), False, 'import argparse\n'), ((2644, 2688), 'numpy.random.choice', 'np.random.choice', (['num'], {'size': '(2)', 'replace': '(False)'}), '(num, size=2, replace=False)\n', (2660, 2688), True, 'import numpy as np\n'), ((2909, 2924), 'random.random', 'random.random', ([], {}), '()\n', (2922, 2924), False, 'import random\n'), ((3221, 3267), 'numpy.random.choice', 'np.random.choice', (['num'], {'size': 'num', 'replace': '(False)'}), '(num, size=num, replace=False)\n', (3237, 3267), True, 'import numpy as np\n'), ((6824, 6839), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.25)'], {}), '(0.25)\n', (6833, 6839), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1195), 'numpy.random.choice', 'np.random.choice', (['num_queen'], {'size': 'num_queen', 'replace': '(False)'}), '(num_queen, size=num_queen, replace=False)\n', (1153, 1195), True, 'import numpy as np\n'), ((3493, 3536), 'numpy.random.choice', 'np.random.choice', (['sa'], {'size': '(2)', 'replace': '(False)'}), '(sa, size=2, replace=False)\n', (3509, 3536), True, 'import numpy as np\n'), ((3718, 3762), 'numpy.random.choice', 'np.random.choice', (['sz'], {'size': 'sz', 'replace': '(False)'}), '(sz, size=sz, replace=False)\n', (3734, 3762), True, 'import numpy as np\n'), ((4367, 4410), 'numpy.random.choice', 'np.random.choice', (['sa'], {'size': '(2)', 'replace': '(False)'}), '(sa, size=2, replace=False)\n', (4383, 4410), True, 'import numpy as np\n')] |
'''
Birmingham Parallel Genetic Algorithm
A pool genetic algorithm for the
structural characterisation of
nanoalloys.
Please cite -
<NAME> et al, PCCP, 2015, 17, 2104-2112
Authors -
The Johnston Group
10/10/14
--- Roulette Wheel Selection Class ---
'''
import random as ran
import numpy as np
from checkPool import checkPool
class select:
def __init__(self,nPool):
ran.seed()
self.nPool = nPool
self.getFitness()
# self.getEnergies()
# self.selectClusters(n)
def getFitness(self):
self.fitness = []
self.energies = sorted(checkPool().energies)
energyRange = self.energies[len(self.energies)-1] - self.energies[0]
for energy in self.energies:
fit = 0.5*(1-np.tanh(2.*((energy-self.energies[0])/energyRange)-1.))
self.fitness.append(fit)
def roulette(self):
self.pair = []
while len(self.pair) < 2:
ranPos = ran.randrange(0,self.nPool)
ranFit = ran.uniform(0,1)
if ranFit < self.fitness[ranPos] and ranPos not in self.pair:
self.pair.append(ranPos)
return self.pair
# def selectClusters(self,n):
# '''
# Selects random
# pair from pool
# for crossover.
# '''
# clust1 = self.tournament()
# clust2 = self.tournament()
# while clust2 == clust1:
# clust2 = self.tournament()
# self.pair = [clust1,clust2]
# def tournament(self):
# '''
# Randomly select
# tournament.
# '''
# size = 4
# tournEn = []
# tourn = ran.sample(range(0,self.nPool),size)
# for i in tourn:
# tournEn.append(self.energies[i])
# return self.energies.index(min(tournEn))
| [
"random.uniform",
"random.randrange",
"numpy.tanh",
"checkPool.checkPool",
"random.seed"
] | [((381, 391), 'random.seed', 'ran.seed', ([], {}), '()\n', (389, 391), True, 'import random as ran\n'), ((863, 891), 'random.randrange', 'ran.randrange', (['(0)', 'self.nPool'], {}), '(0, self.nPool)\n', (876, 891), True, 'import random as ran\n'), ((903, 920), 'random.uniform', 'ran.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (914, 920), True, 'import random as ran\n'), ((555, 566), 'checkPool.checkPool', 'checkPool', ([], {}), '()\n', (564, 566), False, 'from checkPool import checkPool\n'), ((697, 761), 'numpy.tanh', 'np.tanh', (['(2.0 * ((energy - self.energies[0]) / energyRange) - 1.0)'], {}), '(2.0 * ((energy - self.energies[0]) / energyRange) - 1.0)\n', (704, 761), True, 'import numpy as np\n')] |
from torch.utils.data import Sampler
import numpy as np
from random import shuffle
import pandas as pd
class BySequenceLengthRegressionSampler(Sampler):
def __init__(self, tokenizer, text_col, data_source, max_length=256, batch_size=64,):
self.tokenizer = tokenizer
self.text_col = text_col
self.data_source = data_source
self.max_length = max_length
self.ind_n_len = self.__get_lengths(data_source)
self.bucket_boundaries = self.__get_bucket_boundaries()
self.batch_size = batch_size
def __get_lengths(self, data_source):
ind_n_len = []
data_source = data_source[self.text_col].apply(
lambda x: min(len(self.tokenizer.tokenize(x)), self.max_length)
)
for i, p in enumerate(data_source):
ind_n_len.append( (i, p) )
return ind_n_len
def __get_bucket_boundaries(self):
lenghts = np.array(self.ind_n_len)[:, 1]
bucket_boundaries = pd.qcut(lenghts, 8, retbins = True, duplicates = 'drop')[1]
return bucket_boundaries
def __iter__(self):
data_buckets = dict()
# where p is the id number and seq_len is the length of this id number.
for p, seq_len in self.ind_n_len:
pid = self.element_to_bucket_id(p,seq_len)
if pid in data_buckets.keys():
data_buckets[pid].append(p)
else:
data_buckets[pid] = [p]
for k in data_buckets.keys():
data_buckets[k] = np.asarray(data_buckets[k])
iter_list = []
for k in data_buckets.keys():
try:
np.random.shuffle(data_buckets[k])
iter_list += (np.array_split(data_buckets[k]
, int(data_buckets[k].shape[0]/self.batch_size)))
except:
pass
shuffle(iter_list) # shuffle all the batches so they arent ordered by bucket
# size
for i in iter_list:
yield i.tolist() # as it was stored in an array
def __len__(self):
return len(self.data_source)
def element_to_bucket_id(self, x, seq_length):
boundaries = list(self.bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = np.logical_and(
np.less_equal(buckets_min, seq_length),
np.less(seq_length, buckets_max))
bucket_id = np.min(np.where(conditions_c))
return bucket_id
class BySequenceLengthPairedSampler(Sampler):
def __init__(
self,
tokenizer,
text1_col,
text2_col,
data_source,
max_length1=256,
max_length2=256,
batch_size=64,):
self.tokenizer = tokenizer
self.text1_col = text1_col
self.text2_col = text2_col
self.max_length1 = max_length1
self.max_length2 = max_length2
self.data_source = data_source
self.ind_n_len = self.__get_lengths(data_source)
self.bucket_boundaries = self.__get_bucket_boundaries()
self.batch_size = batch_size
def __get_lengths(self, data_source):
text1 = data_source[self.text1_col].apply(
lambda x: min(len(self.tokenizer.tokenize(x)), self.max_length1)
)
text2 = data_source[self.text2_col].apply(
lambda x: min(len(self.tokenizer.tokenize(x)), self.max_length2)
)
ind_n_len = []
data_source = list(map(lambda x: max(x), zip(text1, text2)))
for i, p in enumerate(data_source):
ind_n_len.append( (i, p) )
return ind_n_len
def __get_bucket_boundaries(self):
lenghts = np.array(self.ind_n_len)[:, 1]
bucket_boundaries = pd.qcut(lenghts, 8, retbins = True, duplicates = 'drop')[1]
return bucket_boundaries
def __iter__(self):
data_buckets = dict()
# where p is the id number and seq_len is the length of this id number.
for p, seq_len in self.ind_n_len:
pid = self.element_to_bucket_id(p,seq_len)
if pid in data_buckets.keys():
data_buckets[pid].append(p)
else:
data_buckets[pid] = [p]
for k in data_buckets.keys():
data_buckets[k] = np.asarray(data_buckets[k])
iter_list = []
for k in data_buckets.keys():
try:
np.random.shuffle(data_buckets[k])
iter_list += (np.array_split(data_buckets[k]
, int(data_buckets[k].shape[0]/self.batch_size)))
except:
pass
shuffle(iter_list) # shuffle all the batches so they arent ordered by bucket
# size
for i in iter_list:
yield i.tolist() # as it was stored in an array
def __len__(self):
return len(self.data_source)
def element_to_bucket_id(self, x, seq_length):
boundaries = list(self.bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = np.logical_and(
np.less_equal(buckets_min, seq_length),
np.less(seq_length, buckets_max))
bucket_id = np.min(np.where(conditions_c))
return bucket_id
| [
"numpy.less",
"random.shuffle",
"pandas.qcut",
"numpy.where",
"numpy.less_equal",
"numpy.asarray",
"numpy.iinfo",
"numpy.array",
"numpy.random.shuffle"
] | [((1863, 1881), 'random.shuffle', 'shuffle', (['iter_list'], {}), '(iter_list)\n', (1870, 1881), False, 'from random import shuffle\n'), ((4701, 4719), 'random.shuffle', 'shuffle', (['iter_list'], {}), '(iter_list)\n', (4708, 4719), False, 'from random import shuffle\n'), ((912, 936), 'numpy.array', 'np.array', (['self.ind_n_len'], {}), '(self.ind_n_len)\n', (920, 936), True, 'import numpy as np\n'), ((969, 1021), 'pandas.qcut', 'pd.qcut', (['lenghts', '(8)'], {'retbins': '(True)', 'duplicates': '"""drop"""'}), "(lenghts, 8, retbins=True, duplicates='drop')\n", (976, 1021), True, 'import pandas as pd\n'), ((1516, 1543), 'numpy.asarray', 'np.asarray', (['data_buckets[k]'], {}), '(data_buckets[k])\n', (1526, 1543), True, 'import numpy as np\n'), ((2384, 2422), 'numpy.less_equal', 'np.less_equal', (['buckets_min', 'seq_length'], {}), '(buckets_min, seq_length)\n', (2397, 2422), True, 'import numpy as np\n'), ((2434, 2466), 'numpy.less', 'np.less', (['seq_length', 'buckets_max'], {}), '(seq_length, buckets_max)\n', (2441, 2466), True, 'import numpy as np\n'), ((2495, 2517), 'numpy.where', 'np.where', (['conditions_c'], {}), '(conditions_c)\n', (2503, 2517), True, 'import numpy as np\n'), ((3738, 3762), 'numpy.array', 'np.array', (['self.ind_n_len'], {}), '(self.ind_n_len)\n', (3746, 3762), True, 'import numpy as np\n'), ((3795, 3847), 'pandas.qcut', 'pd.qcut', (['lenghts', '(8)'], {'retbins': '(True)', 'duplicates': '"""drop"""'}), "(lenghts, 8, retbins=True, duplicates='drop')\n", (3802, 3847), True, 'import pandas as pd\n'), ((4342, 4369), 'numpy.asarray', 'np.asarray', (['data_buckets[k]'], {}), '(data_buckets[k])\n', (4352, 4369), True, 'import numpy as np\n'), ((5222, 5260), 'numpy.less_equal', 'np.less_equal', (['buckets_min', 'seq_length'], {}), '(buckets_min, seq_length)\n', (5235, 5260), True, 'import numpy as np\n'), ((5272, 5304), 'numpy.less', 'np.less', (['seq_length', 'buckets_max'], {}), '(seq_length, buckets_max)\n', (5279, 5304), True, 'import numpy as np\n'), ((5333, 5355), 'numpy.where', 'np.where', (['conditions_c'], {}), '(conditions_c)\n', (5341, 5355), True, 'import numpy as np\n'), ((1639, 1673), 'numpy.random.shuffle', 'np.random.shuffle', (['data_buckets[k]'], {}), '(data_buckets[k])\n', (1656, 1673), True, 'import numpy as np\n'), ((4465, 4499), 'numpy.random.shuffle', 'np.random.shuffle', (['data_buckets[k]'], {}), '(data_buckets[k])\n', (4482, 4499), True, 'import numpy as np\n'), ((2238, 2256), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (2246, 2256), True, 'import numpy as np\n'), ((2311, 2329), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (2319, 2329), True, 'import numpy as np\n'), ((5076, 5094), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (5084, 5094), True, 'import numpy as np\n'), ((5149, 5167), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (5157, 5167), True, 'import numpy as np\n')] |
from simulation import Physic_simulation
from simulation import act
import numpy as np
import actuator_array
import time
import copy
import actuator_array as act
from init import action_dim,state_dim,col_num,Parcel_number,state_per_dim
Additional_parcel = 6 ## 输入进网络的只有前6个包裹,实际上有11个包裹,提高网络的泛化性
class Environment(Physic_simulation):
def __init__(self):
self.finish_tmp = 0
self.T = 0
self.cal_dist = 0
self._build()
def _build(self):
# self.finish_time = []
self.finish_flag = 0 # 声明继承Physic_simulation的属性
self.evl_flag = 0 # evl时统计时间差
self.delta_t = 0
self.Parcels = []
self.act_array = act.Actuator_array()
self.Generate_parcels()
def reset(self):
# self.finish_time = []
self.finish_flag = 0
self.delta_t = 0
self.Parcels = []
self.act_array = act.Actuator_array()
self.Generate_parcels()
# 返回两个包裹的位置信息 加x方向速度
loc = []
i = 0
for parcel in self.Parcels: ## 数据预处理,去均值,归一化
if i < Parcel_number:
# loc.extend([(parcel.x)/600])
# loc.extend([(parcel.y)/250])
# loc.extend([(parcel.l)/220])
# loc.extend([(parcel.w)/200])
loc.extend([(parcel.x - parcel.l/2)/600])
loc.extend([(parcel.x + parcel.l/2)/600])
loc.extend([(parcel.y - parcel.w/2)/250])
loc.extend([(parcel.y + parcel.w/2)/250])
loc.extend([(parcel.v_cm[0])/60])
i = i + 1
loc = np.array(loc)
# loc = np.zeros(Parcel_number*state_per_dim)
return loc #dim=1
def Sort_parcel(self):
unsort_prio = []
num = 0
for parcel in self.Parcels:
unsort_prio.append(parcel.x + parcel.l/2)
if parcel.x + parcel.l/2 > 0:
num = num + 1
for index in range(0, len(self.Parcels)): # 每个包裹的最大x值在当前包裹中的顺序作为优先级,可以近似为右边界比较
self.Parcels[index].prio = np.where(np.sort(-np.array(unsort_prio))==-unsort_prio[index])[0][0]
sort_index = np.argsort(np.array(unsort_prio)) # 升序下标
return sort_index[-num:] ## 取后num个数,从而保证输入网络的都是传送带上的
def Padding_parcel(self,l): ## 从生成包裹分布中重新采样
loc = []
count = 0
h = 250
while count < l:
y_low = 0
delta = np.random.randint(1,3)
while y_low < h:
temp1 = actuator_array.Parcel(2)
temp1.x = 0.0 ##- (temp1.l - 1)/2 + 0.0
temp1.y = delta + (temp1.w - 1)/2 + 0.0 + y_low
y_low = y_low + temp1.w + delta
temp1.r_cm = np.array([temp1.x, temp1.y])
if count < l:
if y_low < h:
count = count + 1
loc.extend([(temp1.x - temp1.l/2)/600])
loc.extend([(temp1.x + temp1.l/2)/600])
loc.extend([(temp1.y - temp1.w/2)/250])
loc.extend([(temp1.y + temp1.w/2)/250])
loc.extend([(temp1.v_cm[0])/60])
else:
break
return loc
def step(self, action):
# take action
# RL 修正
for i in range(0,action_dim):
j = i
if i > (col_num - 1):
j = i+1
if i > (col_num - 1)*2:
j = i+2
if i > (col_num - 1)*3:
j = i+3
if i > (col_num - 1)*4:
j = i+4
if i > (col_num - 1)*5:
j = i+5
if i > (col_num - 1)*6:
j = i+6
self.act_array.actuator[j].speed = action[i]
if self.act_array.actuator[j].speed > self.act_array.actuator[0].speed_max: ## 限幅
self.act_array.actuator[j].speed = self.act_array.actuator[0].speed_max
if self.act_array.actuator[j].speed < self.act_array.actuator[0].speed_min:
self.act_array.actuator[j].speed = self.act_array.actuator[0].speed_min
## 测试区域不可控
self.act_array.actuator[5].speed = 60 ## 训练完后降低速度,效率大大提高
self.act_array.actuator[10].speed = 60
self.act_array.actuator[15].speed = 60
self.act_array.actuator[20].speed = 60
self.act_array.actuator[25].speed = 60
self.act_array.actuator[30].speed = 60
# next step
self.Parcel_sim()
r_normal = -0.01*len(self.Parcels) ### 放在仿真完成之后计算,否则计算出的是常数
while len(self.Parcels) < np.random.randint(12,13): ## 仿真环境中包裹数量随机
self.Add_parcels()
# s_
loc = []
sort_index = self.Sort_parcel()
# print("排序",start2-start1)
# 输入包裹信息为前N个包裹,并且要按照顺序排序?,不按照顺序排序没有泛化性
for i,index in enumerate(sort_index[::-1]): ## 逆序 ## 正常应该输入大于0的包裹到网络中才能保持稳定性
if i < Parcel_number: # i 从0开始,最大不超过预设 Parcel_number
parcel = self.Parcels[index]
loc.extend([(parcel.x - parcel.l/2)/600])
loc.extend([(parcel.x + parcel.l/2)/600])
loc.extend([(parcel.y - parcel.w/2)/250])
loc.extend([(parcel.y + parcel.w/2)/250])
loc.extend([(parcel.v_cm[0])/60])
l = Parcel_number - len(sort_index) # 只有在传送带上的才会输入进网络
if l <= 0: ## 传送带上包裹超过预设的大小
l = 0
s_ = np.array(loc)
s_ = np.pad(s_,[0,(l)*state_per_dim]) # 自动补0
# r_normal = 0 # 可能不需要
r_finish = 0
done = 0
sum_v = 0
if len(self.Parcels)!=0:
for parcel in self.Parcels:
sum_v = sum_v + parcel.v_cm[0]
if sum_v < 5: ## 都没有在动,说明卡死,重启 不能以0判断,有可能都小于1然后就卡死
done = 1
r = -20
# print("done")
return s_,r,done
# if int(self.Parcels[0].v_cm[0]) == 0 and int(self.Parcels[1].v_cm[0]) == 0:
# done = 1
# r = -20
# return s_,r,done
if self.finish_flag == 1: ## 有包裹过线
self.finish_flag = 0
self.evl_flag = 1
# x1 = [0,150-150*0.1,150-150*0.05,150];
# x2 = [150,450];
# x = [x1,x2];
# y1 = [-20,-10,0,10];
# y2 = [10,0];
# y = [y1,y2];
if self.cal_dist >= 150:##and delta_t <= 6:
r_finish = -0.03333*self.cal_dist + 15 ## 150 - 450 都有奖励
else:
# r_finish = -20
r_finish = 20*((self.cal_dist-150)/150) ## 150_0 0_-20
# r_finish = np.power(1.071,(self.cal_dist - 100))- 20.87
# if delta_t >6:
# r_finish = -10
# print("两个包裹相差时间\t",round(delta_t,1),"\tr_finish\t", round(r_finish,1))
# print(delta_t)
r = r_normal + r_finish
return s_,r,done | [
"actuator_array.Actuator_array",
"numpy.array",
"numpy.random.randint",
"actuator_array.Parcel",
"numpy.pad"
] | [((736, 756), 'actuator_array.Actuator_array', 'act.Actuator_array', ([], {}), '()\n', (754, 756), True, 'import actuator_array as act\n'), ((957, 977), 'actuator_array.Actuator_array', 'act.Actuator_array', ([], {}), '()\n', (975, 977), True, 'import actuator_array as act\n'), ((1694, 1707), 'numpy.array', 'np.array', (['loc'], {}), '(loc)\n', (1702, 1707), True, 'import numpy as np\n'), ((5767, 5780), 'numpy.array', 'np.array', (['loc'], {}), '(loc)\n', (5775, 5780), True, 'import numpy as np\n'), ((5797, 5831), 'numpy.pad', 'np.pad', (['s_', '[0, l * state_per_dim]'], {}), '(s_, [0, l * state_per_dim])\n', (5803, 5831), True, 'import numpy as np\n'), ((2274, 2295), 'numpy.array', 'np.array', (['unsort_prio'], {}), '(unsort_prio)\n', (2282, 2295), True, 'import numpy as np\n'), ((2551, 2574), 'numpy.random.randint', 'np.random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2568, 2574), True, 'import numpy as np\n'), ((4828, 4853), 'numpy.random.randint', 'np.random.randint', (['(12)', '(13)'], {}), '(12, 13)\n', (4845, 4853), True, 'import numpy as np\n'), ((2629, 2653), 'actuator_array.Parcel', 'actuator_array.Parcel', (['(2)'], {}), '(2)\n', (2650, 2653), False, 'import actuator_array\n'), ((2855, 2883), 'numpy.array', 'np.array', (['[temp1.x, temp1.y]'], {}), '([temp1.x, temp1.y])\n', (2863, 2883), True, 'import numpy as np\n'), ((2190, 2211), 'numpy.array', 'np.array', (['unsort_prio'], {}), '(unsort_prio)\n', (2198, 2211), True, 'import numpy as np\n')] |
import pyqtgraph
from pyqtgraph.Qt import QtGui
import numpy as np
from osu_analysis import StdScoreData
from app.data_recording.data import RecData
class DevGraphAngle(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.DEV_DATA_X = 0
self.DEV_DATA_Y = 1
self.DEV_DATA_T = 2
self.DEV_TYPE_AVG = 0
self.DEV_TYPE_DEV = 1
self.NEEDED_NUM_DATA_POINTS = 30
self.__dev_data_select = self.DEV_DATA_X
self.__dev_type_select = self.DEV_TYPE_DEV
self.__avg_data_points = True
# Main graph
self.__graph = pyqtgraph.PlotWidget(title='Aim dev-x (angle)')
self.__graph.getPlotItem().getAxis('left').enableAutoSIPrefix(False)
self.__graph.getPlotItem().getAxis('bottom').enableAutoSIPrefix(False)
self.__graph.enableAutoRange(axis='x', enable=False)
self.__graph.enableAutoRange(axis='y', enable=False)
self.__graph.setLimits(xMin=-10, xMax=190, yMin=-10, yMax=200)
self.__graph.setRange(xRange=[-10, 190], yRange=[-10, 20])
self.__graph.setLabel('left', 'deviation (averaged)', units='σ', unitPrefix='')
self.__graph.setLabel('bottom', 'angle', units='deg', unitPrefix='')
self.__graph.addLegend()
# Deviation marker indicating expected deviation according to set CS
self.__dev_marker_95 = pyqtgraph.InfiniteLine(angle=0, movable=False, pen=pyqtgraph.mkPen(color=(255, 100, 0, 100), style=pyqtgraph.QtCore.Qt.DashLine))
self.__graph.addItem(self.__dev_marker_95, ignoreBounds=True)
# Used to set text in legend item
self.__label_style = pyqtgraph.PlotDataItem(pen=(0,0,0))
self.__graph.getPlotItem().legend.addItem(self.__label_style, '')
self.__text = self.__graph.getPlotItem().legend.getLabel(self.__label_style)
# Put it all together
self.__layout = QtGui.QHBoxLayout(self)
self.__layout.setContentsMargins(0, 0, 0, 0)
self.__layout.setSpacing(2)
self.__layout.addWidget(self.__graph)
def __get_deviation_data(self, play_data):
'''
x-axis: angles
y-axis: deviation or mean
color: bpm
Meant to be used on single play and not multiple plays
'''
# Filters to get just hitcircles with valid hits
data_filter = np.ones(play_data.shape[0], dtype=bool)
# Filter out sliders
data_filter[:-1] = \
(play_data[:-1, RecData.ACT_TYPE] == StdScoreData.ACTION_PRESS) & ~(
(play_data[1:, RecData.ACT_TYPE] == StdScoreData.ACTION_HOLD) | \
(play_data[1:, RecData.ACT_TYPE] == StdScoreData.ACTION_RELEASE)
)
# Select hit presses
data_filter &= (play_data[:, RecData.HIT_TYPE] == StdScoreData.TYPE_HITP)
# Apply filter
play_data = play_data[data_filter]
# Gather relevant data
data_c = 15000/play_data[:, RecData.DT]
data_x = play_data[:, RecData.ANGLE]
if self.__dev_data_select == self.DEV_DATA_X:
data_y = play_data[:, RecData.X_OFFSETS]
elif self.__dev_data_select == self.DEV_DATA_Y:
data_y = play_data[:, RecData.Y_OFFSETS]
elif self.__dev_data_select == self.DEV_DATA_T:
data_y = play_data[:, RecData.T_OFFSETS]
# MIN MAX MIN DELTA
chunks_c = [ 0, 400, 20 ] # BPM, 20 bins max
chunks_x = [ 0, 180, 3 ] # Angle, 60 bins max
# Filter out data outside the range
range_filter = \
(chunks_c[0] <= data_c) & (data_c <= chunks_c[1]) & \
(chunks_x[0] <= data_x) & (data_x <= chunks_x[1])
data_c = data_c[range_filter]
data_x = data_x[range_filter]
data_y = data_y[range_filter]
# Reduce data to bins
num_bins_c = (chunks_c[1] - chunks_c[0])//chunks_c[2]
num_bins_x = (chunks_x[1] - chunks_x[0])//chunks_x[2]
dev_data_c = np.linspace(chunks_c[0], chunks_c[1], num_bins_c)
dev_data_x = np.linspace(chunks_x[0], chunks_x[1], num_bins_x)
idx_data_c = np.digitize(data_c, dev_data_c) - 1
idx_data_x = np.digitize(data_x, dev_data_x) - 1
c_unique_idxs = np.unique(idx_data_c)
x_unique_idxs = np.unique(idx_data_x)
dev_data = np.zeros((c_unique_idxs.shape[0]*x_unique_idxs.shape[0], 3), dtype=float)
for c_idx in range(c_unique_idxs.shape[0]):
for x_idx in range(x_unique_idxs.shape[0]):
data_select = (idx_data_c == c_unique_idxs[c_idx]) & (idx_data_x == x_unique_idxs[x_idx])
if np.sum(data_select) < self.NEEDED_NUM_DATA_POINTS:
continue
if self.__dev_type_select == self.DEV_TYPE_AVG:
dev_data_y = np.mean(data_y[data_select])
elif self.__dev_type_select == self.DEV_TYPE_DEV:
dev_data_y = np.std(data_y[data_select])
else:
print('Unknown deviation type')
return
idx_dev_data = c_idx*x_unique_idxs.shape[0] + x_idx
dev_data[idx_dev_data, 0] = dev_data_y
dev_data[idx_dev_data, 1] = dev_data_x[x_unique_idxs[x_idx]]
dev_data[idx_dev_data, 2] = dev_data_c[c_unique_idxs[c_idx]]
return dev_data
def plot_data(self, play_data):
dev_data = self.__get_deviation_data(play_data)
# Clear plots for redraw
self.__graph.clearPlots()
if dev_data.shape[0] == 0:
return
bpm_data = dev_data[:, 2]
unique_bpms = np.unique(bpm_data)
bpm_lut = pyqtgraph.ColorMap(
np.linspace(min(unique_bpms), max(unique_bpms), 3),
np.array(
[
[ 0, 100, 255, 200],
[100, 255, 100, 200],
[255, 100, 100, 200],
]
)
)
# Main plot - deviation vs osu!px
# Adds a plot for every unique BPM recorded
for bpm in unique_bpms:
data_select = (bpm_data == bpm)
if not any(data_select):
# Selected region has no data. Nothing else to do
continue
data_y = dev_data[data_select, 0]
data_x = dev_data[data_select, 1]
if self.__avg_data_points:
# Average overlapping data points (those that fall on same angle)
data_y = np.asarray([ np.sort(data_y[data_x == x]).mean() for x in np.unique(data_x) ])
unique_data_x = np.unique(data_x)
# Get sort mapping to make points on line graph connect in proper order
idx_sort = np.argsort(unique_data_x)
data_x = unique_data_x[idx_sort]
data_y = data_y[idx_sort]
# Plot color
color = bpm_lut.map(bpm, 'qcolor')
self.__graph.plot(x=data_x, y=data_y, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm:.2f} bpm')
'''
m, b = MathUtils.linear_regresion(angles, stdevs)
if type(m) == type(None) or type(b) == type(None):
self.__graph.plot(x=angles, y=stdevs, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm} bpm')
continue
if self.model_compensation:
y_model = m*angles + b
self.__graph.plot(x=angles, y=stdevs - y_model, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm} bpm σ = {np.std(stdevs - y_model):.2f} m={m:.5f} b={b:.2f}')
else:
self.__graph.plot(x=angles, y=stdevs, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm:.0f} bpm')
'''
def set_dev(self, dev):
self.__dev_marker_95.setPos(dev/4)
| [
"numpy.mean",
"pyqtgraph.PlotDataItem",
"numpy.ones",
"numpy.unique",
"numpy.digitize",
"numpy.std",
"numpy.sort",
"numpy.argsort",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"pyqtgraph.PlotWidget",
"numpy.sum",
"pyqtgraph.Qt.QtGui.QHBoxLayout",
"pyqtgraph.mkPen",
"pyqtgraph.Qt.Qt... | [((233, 269), 'pyqtgraph.Qt.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (255, 269), False, 'from pyqtgraph.Qt import QtGui\n'), ((642, 689), 'pyqtgraph.PlotWidget', 'pyqtgraph.PlotWidget', ([], {'title': '"""Aim dev-x (angle)"""'}), "(title='Aim dev-x (angle)')\n", (662, 689), False, 'import pyqtgraph\n'), ((1685, 1722), 'pyqtgraph.PlotDataItem', 'pyqtgraph.PlotDataItem', ([], {'pen': '(0, 0, 0)'}), '(pen=(0, 0, 0))\n', (1707, 1722), False, 'import pyqtgraph\n'), ((1935, 1958), 'pyqtgraph.Qt.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', (['self'], {}), '(self)\n', (1952, 1958), False, 'from pyqtgraph.Qt import QtGui\n'), ((2387, 2426), 'numpy.ones', 'np.ones', (['play_data.shape[0]'], {'dtype': 'bool'}), '(play_data.shape[0], dtype=bool)\n', (2394, 2426), True, 'import numpy as np\n'), ((4045, 4094), 'numpy.linspace', 'np.linspace', (['chunks_c[0]', 'chunks_c[1]', 'num_bins_c'], {}), '(chunks_c[0], chunks_c[1], num_bins_c)\n', (4056, 4094), True, 'import numpy as np\n'), ((4116, 4165), 'numpy.linspace', 'np.linspace', (['chunks_x[0]', 'chunks_x[1]', 'num_bins_x'], {}), '(chunks_x[0], chunks_x[1], num_bins_x)\n', (4127, 4165), True, 'import numpy as np\n'), ((4306, 4327), 'numpy.unique', 'np.unique', (['idx_data_c'], {}), '(idx_data_c)\n', (4315, 4327), True, 'import numpy as np\n'), ((4352, 4373), 'numpy.unique', 'np.unique', (['idx_data_x'], {}), '(idx_data_x)\n', (4361, 4373), True, 'import numpy as np\n'), ((4394, 4469), 'numpy.zeros', 'np.zeros', (['(c_unique_idxs.shape[0] * x_unique_idxs.shape[0], 3)'], {'dtype': 'float'}), '((c_unique_idxs.shape[0] * x_unique_idxs.shape[0], 3), dtype=float)\n', (4402, 4469), True, 'import numpy as np\n'), ((5714, 5733), 'numpy.unique', 'np.unique', (['bpm_data'], {}), '(bpm_data)\n', (5723, 5733), True, 'import numpy as np\n'), ((4188, 4219), 'numpy.digitize', 'np.digitize', (['data_c', 'dev_data_c'], {}), '(data_c, dev_data_c)\n', (4199, 4219), True, 'import numpy as np\n'), ((4245, 4276), 'numpy.digitize', 'np.digitize', (['data_x', 'dev_data_x'], {}), '(data_x, dev_data_x)\n', (4256, 4276), True, 'import numpy as np\n'), ((5849, 5923), 'numpy.array', 'np.array', (['[[0, 100, 255, 200], [100, 255, 100, 200], [255, 100, 100, 200]]'], {}), '([[0, 100, 255, 200], [100, 255, 100, 200], [255, 100, 100, 200]])\n', (5857, 5923), True, 'import numpy as np\n'), ((1464, 1541), 'pyqtgraph.mkPen', 'pyqtgraph.mkPen', ([], {'color': '(255, 100, 0, 100)', 'style': 'pyqtgraph.QtCore.Qt.DashLine'}), '(color=(255, 100, 0, 100), style=pyqtgraph.QtCore.Qt.DashLine)\n', (1479, 1541), False, 'import pyqtgraph\n'), ((6695, 6712), 'numpy.unique', 'np.unique', (['data_x'], {}), '(data_x)\n', (6704, 6712), True, 'import numpy as np\n'), ((6829, 6854), 'numpy.argsort', 'np.argsort', (['unique_data_x'], {}), '(unique_data_x)\n', (6839, 6854), True, 'import numpy as np\n'), ((4702, 4721), 'numpy.sum', 'np.sum', (['data_select'], {}), '(data_select)\n', (4708, 4721), True, 'import numpy as np\n'), ((4880, 4908), 'numpy.mean', 'np.mean', (['data_y[data_select]'], {}), '(data_y[data_select])\n', (4887, 4908), True, 'import numpy as np\n'), ((5008, 5035), 'numpy.std', 'np.std', (['data_y[data_select]'], {}), '(data_y[data_select])\n', (5014, 5035), True, 'import numpy as np\n'), ((6642, 6659), 'numpy.unique', 'np.unique', (['data_x'], {}), '(data_x)\n', (6651, 6659), True, 'import numpy as np\n'), ((6597, 6625), 'numpy.sort', 'np.sort', (['data_y[data_x == x]'], {}), '(data_y[data_x == x])\n', (6604, 6625), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator
from typing import Optional
import paddle.fluid.compiler as compiler
SEED = 2021
ipu_compiler_ref: Optional[compiler.IPUCompiledProgram] = None
map_np_dtype_to_fluid_dtype = {
'bool': "bool",
'int8': "int8",
'uint8': "uint8",
"int32": "int32",
"int64": "int64",
"float16": "float16",
"float32": "float32",
"float64": "float64",
}
def np_dtype_to_fluid_str(dtype: np.dtype) -> str:
return map_np_dtype_to_fluid_dtype[dtype.name]
class IPUOpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate()
cls.SEED = SEED
np.random.seed(cls.SEED)
random.seed(cls.SEED)
cls._use_system_allocator = _set_use_system_allocator(True)
@classmethod
def tearDownClass(cls):
"""Restore random seeds"""
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
_set_use_system_allocator(cls._use_system_allocator)
# unittest will to trigger IPUCompiledProgram.__del__ automatically
global ipu_compiler_ref
ipu_compiler_ref is not None and ipu_compiler_ref.clean()
def set_atol(self):
self.atol = 1e-5
def set_training(self):
self.is_training = False
self.epoch = 1
| [
"paddle.fluid.tests.unittests.op_test._set_use_system_allocator",
"numpy.random.get_state",
"numpy.random.set_state",
"random.setstate",
"random.seed",
"random.getstate",
"numpy.random.seed"
] | [((1316, 1337), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1335, 1337), True, 'import numpy as np\n'), ((1367, 1384), 'random.getstate', 'random.getstate', ([], {}), '()\n', (1382, 1384), False, 'import random\n'), ((1418, 1442), 'numpy.random.seed', 'np.random.seed', (['cls.SEED'], {}), '(cls.SEED)\n', (1432, 1442), True, 'import numpy as np\n'), ((1451, 1472), 'random.seed', 'random.seed', (['cls.SEED'], {}), '(cls.SEED)\n', (1462, 1472), False, 'import random\n'), ((1510, 1541), 'paddle.fluid.tests.unittests.op_test._set_use_system_allocator', '_set_use_system_allocator', (['(True)'], {}), '(True)\n', (1535, 1541), False, 'from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator\n'), ((1631, 1670), 'numpy.random.set_state', 'np.random.set_state', (['cls._np_rand_state'], {}), '(cls._np_rand_state)\n', (1650, 1670), True, 'import numpy as np\n'), ((1679, 1714), 'random.setstate', 'random.setstate', (['cls._py_rand_state'], {}), '(cls._py_rand_state)\n', (1694, 1714), False, 'import random\n'), ((1724, 1776), 'paddle.fluid.tests.unittests.op_test._set_use_system_allocator', '_set_use_system_allocator', (['cls._use_system_allocator'], {}), '(cls._use_system_allocator)\n', (1749, 1776), False, 'from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator\n')] |
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
def call_back(x):
print(x)
# L_H = Lower Hue
# L_s = lower saturation
# L_v = lower value
cv2.namedWindow("Tracking")
cv2.createTrackbar('l_h','Tracking',0,255,call_back)
cv2.createTrackbar('l_s','Tracking',0,255,call_back)
cv2.createTrackbar('l_v','Tracking',0,255,call_back)
cv2.createTrackbar('u_h','Tracking',255,255,call_back)
cv2.createTrackbar('u_s','Tracking',255,255,call_back)
cv2.createTrackbar('u_v','Tracking',255,255,call_back)
while True:
# frame = cv2.imread("images/smarties.png")
success,frame = cap.read()
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("l_h","Tracking")
l_s = cv2.getTrackbarPos("l_s","Tracking")
l_v = cv2.getTrackbarPos("l_v","Tracking")
u_h = cv2.getTrackbarPos("u_h","Tracking")
u_s = cv2.getTrackbarPos("u_s","Tracking")
u_v = cv2.getTrackbarPos("u_v","Tracking")
lb = np.array([l_h,l_s,l_v])
up = np.array([u_h,u_s,u_v])
mask = cv2.inRange(hsv,lb,up)
result = cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow("frame1",frame)
cv2.imshow("frame2",mask)
cv2.imshow("frame3",result)
if cv2.waitKey(1) == 27:
break
cap.release()
cv2.destroyAllWindows() | [
"cv2.inRange",
"cv2.bitwise_and",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.getTrackbarPos",
"cv2.createTrackbar",
"cv2.namedWindow"
] | [((37, 56), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (53, 56), False, 'import cv2\n'), ((154, 181), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Tracking"""'], {}), "('Tracking')\n", (169, 181), False, 'import cv2\n'), ((182, 238), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""l_h"""', '"""Tracking"""', '(0)', '(255)', 'call_back'], {}), "('l_h', 'Tracking', 0, 255, call_back)\n", (200, 238), False, 'import cv2\n'), ((235, 291), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""l_s"""', '"""Tracking"""', '(0)', '(255)', 'call_back'], {}), "('l_s', 'Tracking', 0, 255, call_back)\n", (253, 291), False, 'import cv2\n'), ((288, 344), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""l_v"""', '"""Tracking"""', '(0)', '(255)', 'call_back'], {}), "('l_v', 'Tracking', 0, 255, call_back)\n", (306, 344), False, 'import cv2\n'), ((342, 400), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""u_h"""', '"""Tracking"""', '(255)', '(255)', 'call_back'], {}), "('u_h', 'Tracking', 255, 255, call_back)\n", (360, 400), False, 'import cv2\n'), ((397, 455), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""u_s"""', '"""Tracking"""', '(255)', '(255)', 'call_back'], {}), "('u_s', 'Tracking', 255, 255, call_back)\n", (415, 455), False, 'import cv2\n'), ((452, 510), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""u_v"""', '"""Tracking"""', '(255)', '(255)', 'call_back'], {}), "('u_v', 'Tracking', 255, 255, call_back)\n", (470, 510), False, 'import cv2\n'), ((1237, 1260), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1258, 1260), False, 'import cv2\n'), ((608, 646), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (620, 646), False, 'import cv2\n'), ((657, 694), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""l_h"""', '"""Tracking"""'], {}), "('l_h', 'Tracking')\n", (675, 694), False, 'import cv2\n'), ((704, 741), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""l_s"""', '"""Tracking"""'], {}), "('l_s', 'Tracking')\n", (722, 741), False, 'import cv2\n'), ((751, 788), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""l_v"""', '"""Tracking"""'], {}), "('l_v', 'Tracking')\n", (769, 788), False, 'import cv2\n'), ((799, 836), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""u_h"""', '"""Tracking"""'], {}), "('u_h', 'Tracking')\n", (817, 836), False, 'import cv2\n'), ((846, 883), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""u_s"""', '"""Tracking"""'], {}), "('u_s', 'Tracking')\n", (864, 883), False, 'import cv2\n'), ((893, 930), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""u_v"""', '"""Tracking"""'], {}), "('u_v', 'Tracking')\n", (911, 930), False, 'import cv2\n'), ((940, 965), 'numpy.array', 'np.array', (['[l_h, l_s, l_v]'], {}), '([l_h, l_s, l_v])\n', (948, 965), True, 'import numpy as np\n'), ((973, 998), 'numpy.array', 'np.array', (['[u_h, u_s, u_v]'], {}), '([u_h, u_s, u_v])\n', (981, 998), True, 'import numpy as np\n'), ((1009, 1033), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lb', 'up'], {}), '(hsv, lb, up)\n', (1020, 1033), False, 'import cv2\n'), ((1046, 1086), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (1061, 1086), False, 'import cv2\n'), ((1089, 1116), 'cv2.imshow', 'cv2.imshow', (['"""frame1"""', 'frame'], {}), "('frame1', frame)\n", (1099, 1116), False, 'import cv2\n'), ((1120, 1146), 'cv2.imshow', 'cv2.imshow', (['"""frame2"""', 'mask'], {}), "('frame2', mask)\n", (1130, 1146), False, 'import cv2\n'), ((1150, 1178), 'cv2.imshow', 'cv2.imshow', (['"""frame3"""', 'result'], {}), "('frame3', result)\n", (1160, 1178), False, 'import cv2\n'), ((1186, 1200), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1197, 1200), False, 'import cv2\n')] |
import time
import logging
import fire
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
import models
import utils
from dataset import ImageDataset
logging.getLogger().setLevel(logging.INFO)
def run(model_name, output_dir, dataname, data_dir='./data', batch_size=16, test_run=-1):
data_path = '%s/%s' % (data_dir, dataname)
logging.info('Load data from %s' % data_path)
logging.info('Using model=%s' % model_name)
ds = ImageDataset(data_path)
model = models.get_model(model_name)
data_loader = DataLoader(ds, batch_size=batch_size)
features_list = []
count = 0
iterator = tqdm(data_loader)
for batch in iterator:
output = model.forward_pass(batch.to(utils.torch_device()))
features_list.append(output.cpu().detach().numpy())
if test_run != -1 and count > test_run:
iterator.close()
break
count = count + 1
features = np.vstack(features_list)
logging.info(features.shape)
output_path = '%s/%s-%s--%s' % (output_dir, model_name, dataname, time.strftime('%Y-%m-%d-%H-%M-%S'))
np.save(output_path, features)
logging.info('save data at %s' % output_path)
if __name__ == "__main__":
fire.Fire(run)
| [
"logging.getLogger",
"fire.Fire",
"models.get_model",
"tqdm.tqdm",
"dataset.ImageDataset",
"time.strftime",
"numpy.vstack",
"utils.torch_device",
"torch.utils.data.DataLoader",
"logging.info",
"numpy.save"
] | [((369, 414), 'logging.info', 'logging.info', (["('Load data from %s' % data_path)"], {}), "('Load data from %s' % data_path)\n", (381, 414), False, 'import logging\n'), ((419, 462), 'logging.info', 'logging.info', (["('Using model=%s' % model_name)"], {}), "('Using model=%s' % model_name)\n", (431, 462), False, 'import logging\n'), ((473, 496), 'dataset.ImageDataset', 'ImageDataset', (['data_path'], {}), '(data_path)\n', (485, 496), False, 'from dataset import ImageDataset\n'), ((509, 537), 'models.get_model', 'models.get_model', (['model_name'], {}), '(model_name)\n', (525, 537), False, 'import models\n'), ((557, 594), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': 'batch_size'}), '(ds, batch_size=batch_size)\n', (567, 594), False, 'from torch.utils.data import DataLoader\n'), ((649, 666), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (653, 666), False, 'from tqdm import tqdm\n'), ((963, 987), 'numpy.vstack', 'np.vstack', (['features_list'], {}), '(features_list)\n', (972, 987), True, 'import numpy as np\n'), ((992, 1020), 'logging.info', 'logging.info', (['features.shape'], {}), '(features.shape)\n', (1004, 1020), False, 'import logging\n'), ((1132, 1162), 'numpy.save', 'np.save', (['output_path', 'features'], {}), '(output_path, features)\n', (1139, 1162), True, 'import numpy as np\n'), ((1167, 1212), 'logging.info', 'logging.info', (["('save data at %s' % output_path)"], {}), "('save data at %s' % output_path)\n", (1179, 1212), False, 'import logging\n'), ((1245, 1259), 'fire.Fire', 'fire.Fire', (['run'], {}), '(run)\n', (1254, 1259), False, 'import fire\n'), ((183, 202), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (200, 202), False, 'import logging\n'), ((1092, 1126), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M-%S"""'], {}), "('%Y-%m-%d-%H-%M-%S')\n", (1105, 1126), False, 'import time\n'), ((740, 760), 'utils.torch_device', 'utils.torch_device', ([], {}), '()\n', (758, 760), False, 'import utils\n')] |
import numpy as np
def defaultMotionPlanners():
return {
'home': HomeMotionPlanner,
'search': SearchMotionPlanner,
'aboveTarget': AboveTargetMotionPlanner,
'approach': ApproachMotionPlanner,
'target': TargetMotionPlanner,
# 'clean': CleanMotionPlanner,
# 'rinse': RinseMotionPlanner,
'idle': IdleMotionPlanner,
}
class PipetteMotionPlanner(object):
def __init__(self, pip, position, speed, **kwds):
self.pip = pip
self.position = position
self.speed = speed
self.kwds = kwds
self.future = None
def move(self):
"""Move the pipette to the requested named position and return a Future
"""
if self.future is not None:
self.stop()
self.future = self._move()
return self.future
def stop(self):
if self.future is not None:
self.future.stop()
def _move(self):
raise NotImplementedError()
def shouldUseLinearMotion(self):
return self.pip._shouldUseLinearMovement()
_LOCAL_ORIGIN = (0, 0, 0)
def _extractionWaypoint(destLocal, pipAngle):
"""
Parameters
----------
destLocal
Destination coordinates in pipette-local frame of reference. Extraction is only needed when +z and -x from the origin.
pipAngle
The angle of the pipette in radians, oriented to be between 0 and π/2.
Returns
-------
waypoint
Local coordinates of the extraction waypoint, or None if none is needed.
"""
if pipAngle < 0 or pipAngle > np.pi / 2:
raise ValueError("Invalid pipette pitch; orient your measurement to put it between 0 and π/2")
destX = destLocal[0]
destZ = destLocal[2]
if destX > 0 or destZ < 0 or (destX, destZ) == (0, 0):
# no clear diagonal extraction to go forward or down
return None
destAngle = np.arctan2(destZ, -destX) # `-x` to match the pipAngle orientation
if destAngle > pipAngle:
dz = destX * np.tan(pipAngle)
waypoint = (destX, 0, -dz)
else:
dx = destZ / np.tan(pipAngle)
waypoint = (-dx, 0, destZ)
# sanity check, floating point errors
return np.clip(waypoint, _LOCAL_ORIGIN, destLocal)
class HomeMotionPlanner(PipetteMotionPlanner):
"""Extract pipette tip diagonally, then move to home position.
"""
def _move(self):
pip = self.pip
speed = self.speed
manipulator = pip.parentDevice()
manipulatorHome = manipulator.homePosition()
assert manipulatorHome is not None, "No home position defined for %s" % manipulator.name()
# how much should the pipette move in global coordinates
globalMove = np.asarray(manipulatorHome) - np.asarray(manipulator.globalPosition())
startPosGlobal = pip.globalPosition()
# where should the pipette tip end up in global coordinates
endPosGlobal = np.asarray(startPosGlobal) + globalMove
# use local coordinates to make it easier to do the boundary intersections
endPosLocal = pip.mapFromGlobal(endPosGlobal)
waypointLocal = _extractionWaypoint(endPosLocal, pip.pitchRadians())
# sensapex manipulators shouldn't need a waypoint to perform correct extraction
if waypointLocal is None or not self.shouldUseLinearMotion():
path = [(endPosGlobal, speed, False), ]
else:
waypointGlobal = pip.mapToGlobal(waypointLocal)
path = [
(waypointGlobal, speed, True),
(endPosGlobal, speed, False),
]
return pip._movePath(path)
class SearchMotionPlanner(PipetteMotionPlanner):
"""Focus the microscope 2mm above the surface, then move the electrode
tip to 500um below the focal point of the microscope.
This position is used when searching for new electrodes.
Set *distance* to adjust the search position along the pipette's x-axis. Positive values
move the tip farther from the microscope center to reduce the probability of collisions.
Negative values move the pipette past the center of the microscope to improve the
probability of seeing the tip immediately.
"""
def _move(self):
pip = self.pip
speed = self.speed
distance = self.kwds.get('distance', 0)
# Bring focus to 2mm above surface (if needed)
scope = pip.scopeDevice()
surfaceDepth = scope.getSurfaceDepth()
if surfaceDepth is None:
raise Exception("Cannot determine search position; surface depth is not defined.")
searchDepth = surfaceDepth + pip._opts['searchHeight']
cam = pip.imagingDevice()
focusDepth = cam.getFocusDepth()
# move scope such that camera will be focused at searchDepth
if focusDepth < searchDepth:
scopeFocus = scope.getFocusDepth()
scope.setFocusDepth(scopeFocus + searchDepth - focusDepth).wait(updates=True)
# Here's where we want the pipette tip in global coordinates:
globalTarget = cam.globalCenterPosition('roi')
globalTarget[2] += pip._opts['searchTipHeight'] - pip._opts['searchHeight']
# adjust for distance argument:
localTarget = pip.mapFromGlobal(globalTarget)
localTarget[0] -= distance
globalTarget = pip.mapToGlobal(localTarget)
return pip._moveToGlobal(globalTarget, speed)
class ApproachMotionPlanner(PipetteMotionPlanner):
def _move(self):
pip = self.pip
speed = self.speed
target = pip.targetPosition()
return pip._movePath(self.approachPath(target, speed))
def approachPath(self, target, speed):
"""
Describe a path that puts the pipette in-line to do straight movement along the pipette pitch to the target
Parameters
----------
target: coordinates
speed: m/s
"""
pip = self.pip
# Return steps (in global coords) needed to move to approach position
stbyDepth = pip.approachDepth()
pos = pip.globalPosition()
# steps are in global coordinates.
path = []
# If tip is below the surface, then first pull out slowly along pipette axis
if pos[2] < stbyDepth:
dz = stbyDepth - pos[2]
dx = -dz / np.tan(pip.pitchRadians())
last = np.array([dx, 0., dz])
path.append([pip.mapToGlobal(last), 100e-6, self.shouldUseLinearMotion()]) # slow removal from sample
else:
last = np.array([0., 0., 0.])
# local vector pointing in direction of electrode tip
evec = np.array([1., 0., -np.tan(pip.pitchRadians())])
evec /= np.linalg.norm(evec)
# target in local coordinates
ltarget = pip.mapFromGlobal(target)
# compute approach position (axis aligned to target, at standby depth or higher)
dz2 = max(0, stbyDepth - target[2])
dx2 = -dz2 / np.tan(pip.pitchRadians())
stby = ltarget + np.array([dx2, 0., dz2])
# compute intermediate position (point along approach axis that is closest to the current position)
targetToTip = last - ltarget
targetToStby = stby - ltarget
targetToStby /= np.linalg.norm(targetToStby)
closest = ltarget + np.dot(targetToTip, targetToStby) * targetToStby
if np.linalg.norm(stby - last) > 1e-6:
if (closest[2] > stby[2]) and (np.linalg.norm(stby - closest) > 1e-6):
path.append([pip.mapToGlobal(closest), speed, self.shouldUseLinearMotion()])
path.append([pip.mapToGlobal(stby), speed, self.shouldUseLinearMotion()])
return path
class TargetMotionPlanner(ApproachMotionPlanner):
def _move(self):
pip = self.pip
speed = self.speed
target = pip.targetPosition()
pos = pip.globalPosition()
if np.linalg.norm(np.asarray(target) - pos) < 1e-7:
return
path = self.approachPath(target, speed)
path.append([target, 100e-6, self.shouldUseLinearMotion()])
return pip._movePath(path)
class AboveTargetMotionPlanner(PipetteMotionPlanner):
"""Move the pipette tip to be centered over the target in x/y, and 100 um above
the sample surface in z.
This position is used to recalibrate the pipette immediately before going to approach.
"""
def _move(self):
pip = self.pip
speed = self.speed
scope = pip.scopeDevice()
waypoint1, waypoint2 = self.aboveTargetPath()
pfut = pip._moveToGlobal(waypoint1, speed)
sfut = scope.setGlobalPosition(waypoint2)
pfut.wait(updates=True)
pip._moveToGlobal(waypoint2, 'slow').wait(updates=True)
sfut.wait(updates=True)
return sfut
def aboveTargetPath(self):
"""Return the path to the "above target" recalibration position.
The path has 2 waypoints:
1. 100 um away from the second waypoint, on a diagonal approach. This is meant to normalize the hysteresis
at the second waypoint.
2. This position is centered on the target, a small distance above the sample surface.
"""
pip = self.pip
target = pip.targetPosition()
# will recalibrate 50 um above surface
scope = pip.scopeDevice()
surfaceDepth = scope.getSurfaceDepth()
waypoint2 = np.array(target)
waypoint2[2] = surfaceDepth + 50e-6
# Need to arrive at this point via approach angle to correct for hysteresis
lwp = pip.mapFromGlobal(waypoint2)
dz = 100e-6
lwp[2] += dz
lwp[0] -= dz / np.tan(pip.pitchRadians())
waypoint1 = pip.mapToGlobal(lwp)
return waypoint1, waypoint2
class IdleMotionPlanner(PipetteMotionPlanner):
"""Move the electrode tip to the outer edge of the recording chamber, 1mm above the sample surface.
NOTE: this method assumes that (0, 0) in global coordinates represents the center of the recording
chamber.
"""
def _move(self):
pip = self.pip
speed = self.speed
scope = pip.scopeDevice()
surface = scope.getSurfaceDepth()
if surface is None:
raise Exception("Surface depth has not been set.")
# we want to land 1 mm above sample surface
idleDepth = surface + pip._opts['idleHeight']
# If the tip is below idle depth, bring it up along the axis of the electrode.
pos = pip.globalPosition()
if pos[2] < idleDepth:
pip.advance(idleDepth, speed)
# From here, move directly to idle position
angle = pip.yawRadians()
ds = pip._opts['idleDistance'] # move to 7 mm from center
globalIdlePos = -ds * np.cos(angle), -ds * np.sin(angle), idleDepth
return pip._moveToGlobal(globalIdlePos, speed)
| [
"numpy.clip",
"numpy.tan",
"numpy.asarray",
"numpy.array",
"numpy.dot",
"numpy.arctan2",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin"
] | [((1916, 1941), 'numpy.arctan2', 'np.arctan2', (['destZ', '(-destX)'], {}), '(destZ, -destX)\n', (1926, 1941), True, 'import numpy as np\n'), ((2224, 2267), 'numpy.clip', 'np.clip', (['waypoint', '_LOCAL_ORIGIN', 'destLocal'], {}), '(waypoint, _LOCAL_ORIGIN, destLocal)\n', (2231, 2267), True, 'import numpy as np\n'), ((6743, 6763), 'numpy.linalg.norm', 'np.linalg.norm', (['evec'], {}), '(evec)\n', (6757, 6763), True, 'import numpy as np\n'), ((7287, 7315), 'numpy.linalg.norm', 'np.linalg.norm', (['targetToStby'], {}), '(targetToStby)\n', (7301, 7315), True, 'import numpy as np\n'), ((9440, 9456), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (9448, 9456), True, 'import numpy as np\n'), ((2035, 2051), 'numpy.tan', 'np.tan', (['pipAngle'], {}), '(pipAngle)\n', (2041, 2051), True, 'import numpy as np\n'), ((2118, 2134), 'numpy.tan', 'np.tan', (['pipAngle'], {}), '(pipAngle)\n', (2124, 2134), True, 'import numpy as np\n'), ((2742, 2769), 'numpy.asarray', 'np.asarray', (['manipulatorHome'], {}), '(manipulatorHome)\n', (2752, 2769), True, 'import numpy as np\n'), ((2951, 2977), 'numpy.asarray', 'np.asarray', (['startPosGlobal'], {}), '(startPosGlobal)\n', (2961, 2977), True, 'import numpy as np\n'), ((6407, 6430), 'numpy.array', 'np.array', (['[dx, 0.0, dz]'], {}), '([dx, 0.0, dz])\n', (6415, 6430), True, 'import numpy as np\n'), ((6578, 6603), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6586, 6603), True, 'import numpy as np\n'), ((7054, 7079), 'numpy.array', 'np.array', (['[dx2, 0.0, dz2]'], {}), '([dx2, 0.0, dz2])\n', (7062, 7079), True, 'import numpy as np\n'), ((7405, 7432), 'numpy.linalg.norm', 'np.linalg.norm', (['(stby - last)'], {}), '(stby - last)\n', (7419, 7432), True, 'import numpy as np\n'), ((7344, 7377), 'numpy.dot', 'np.dot', (['targetToTip', 'targetToStby'], {}), '(targetToTip, targetToStby)\n', (7350, 7377), True, 'import numpy as np\n'), ((10801, 10814), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (10807, 10814), True, 'import numpy as np\n'), ((10822, 10835), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (10828, 10835), True, 'import numpy as np\n'), ((7484, 7514), 'numpy.linalg.norm', 'np.linalg.norm', (['(stby - closest)'], {}), '(stby - closest)\n', (7498, 7514), True, 'import numpy as np\n'), ((7947, 7965), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (7957, 7965), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.