code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Metafier V3: writes directly to output.mc
# Avoids memory errors for large programs
# Assumes the pattern width is less than or equal to 1024
# ===REQUIRES metatemplate11.mc===
import golly as g
import numpy as np
from shutil import copyfile
g.show("Retrieving selection...")
#Get the selection
selection = g.getselrect()
if not selection: g.exit("No selection.")
# Align the pattern to 8x8 grids.
# There are 1-pixel headers and footers on top and the bottom of the entire pattern,
# in each of the multiplexers for the ROM and the RAM.
selection[1] -= 7
selection[3] += 7 + 7
#Get the cells in the selection
cells = g.getcells(selection)
if not cells: g.exit("No pattern in selection")
if len(cells) % 3: cells = cells[:-1]
selw = selection[2]
selh = selection[3]
metafier_width = 1024
metafier_height = 1024
patternsize = 1 << int(np.ceil(np.log2(selh | selw)))
patternsize_h = max(metafier_height, 1 << int(np.ceil(np.log2(selh))))
patternsize_w = max(metafier_width, 1 << int(np.ceil(np.log2(selw))))
g.show("Retrieved selection. pattern size: {}, h: {}, w: {}".format(patternsize, patternsize_h, patternsize_w))
metapattern = np.zeros((patternsize_h, patternsize_w), dtype=np.int16)
g.show(str(metapattern.shape))
#Pseudo-convolution, to detect diagonal neighbors
# +1 +0 +2
# +0 *16 +0
# +4 +0 +8
for cell in np.reshape(cells, (-1, 3)):
selx = cell[0] - selection[0]
sely = cell[1] - selection[1]
metapattern[sely][selx] += 16 * cell[2]
if sely:
if selx:
metapattern[sely - 1][selx - 1] += 8
if selx + 1 < selw:
metapattern[sely - 1][selx + 1] += 4
if sely + 1 < selh:
if selx:
metapattern[sely + 1][selx - 1] += 2
if selx + 1 < selw:
metapattern[sely + 1][selx + 1] += 1
#Remove all B/S cells
metapattern += 5630 - 32 #5632 is the starting point of 11s in template
metapattern[metapattern < 5630] = 0
#Using metatemplate11, memoization, and some recursion
def createLine(pattern, outfile, linenum = [5726], memo = {}): #linenum and memo are mutable function arguments, which are only initialized during function definition
memoizable = pattern.shape[0] <= 1024
if (not memoizable) or (tuple(pattern.ravel().tolist()) not in memo): #If we haven't seen this type of pattern before, let's remember it
if pattern.shape[0] == 2: #Pattern is a leaf, write leaf line
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
pattern[0, 0],
pattern[0, 1],
pattern[1, 0],
pattern[1, 1]))
else: #Pattern is a branch, keep going down quadtree
if pattern.shape[0] <= 1024 and pattern.shape[1] <= 1024:
subpatterns = pattern.reshape(2, pattern.shape[0] >> 1, 2, pattern.shape[1] >> 1).swapaxes(1,2)
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
createLine(subpatterns[0, 0], outfile),
createLine(subpatterns[0, 1], outfile),
createLine(subpatterns[1, 0], outfile),
createLine(subpatterns[1, 1], outfile)))
else:
subpatterns = pattern.reshape(2, pattern.shape[0] >> 1, 1, pattern.shape[1]).swapaxes(1,2)
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
createLine(subpatterns[0, 0], outfile),
0,
createLine(subpatterns[1, 0], outfile),
0))
if memoizable:
memo[tuple(pattern.ravel().tolist())] = linenum[0]
linenum[0] += 1
return linenum[0] - 1
return memo[tuple(pattern.ravel().tolist())]
g.show("Initialized pattern array {}. Metafying...".format(metapattern.shape))
copyfile('metatemplate11.mc', 'output.mc')
with open('output.mc', 'a') as outputfile:
createLine(metapattern, outputfile)
g.show("Done.")
#Display output.mc
g.addlayer()
g.open('output.mc')
#TODO: Use metatemplate10?
| [
"golly.getcells",
"numpy.reshape",
"golly.getselrect",
"golly.exit",
"golly.show",
"golly.addlayer",
"golly.open",
"numpy.zeros",
"shutil.copyfile",
"numpy.log2"
] | [((246, 279), 'golly.show', 'g.show', (['"""Retrieving selection..."""'], {}), "('Retrieving selection...')\n", (252, 279), True, 'import golly as g\n'), ((311, 325), 'golly.getselrect', 'g.getselrect', ([], {}), '()\n', (323, 325), True, 'import golly as g\n'), ((625, 646), 'golly.getcells', 'g.getcells', (['selection'], {}), '(selection)\n', (635, 646), True, 'import golly as g\n'), ((1144, 1200), 'numpy.zeros', 'np.zeros', (['(patternsize_h, patternsize_w)'], {'dtype': 'np.int16'}), '((patternsize_h, patternsize_w), dtype=np.int16)\n', (1152, 1200), True, 'import numpy as np\n'), ((1334, 1360), 'numpy.reshape', 'np.reshape', (['cells', '(-1, 3)'], {}), '(cells, (-1, 3))\n', (1344, 1360), True, 'import numpy as np\n'), ((4280, 4322), 'shutil.copyfile', 'copyfile', (['"""metatemplate11.mc"""', '"""output.mc"""'], {}), "('metatemplate11.mc', 'output.mc')\n", (4288, 4322), False, 'from shutil import copyfile\n'), ((4407, 4422), 'golly.show', 'g.show', (['"""Done."""'], {}), "('Done.')\n", (4413, 4422), True, 'import golly as g\n'), ((4443, 4455), 'golly.addlayer', 'g.addlayer', ([], {}), '()\n', (4453, 4455), True, 'import golly as g\n'), ((4456, 4475), 'golly.open', 'g.open', (['"""output.mc"""'], {}), "('output.mc')\n", (4462, 4475), True, 'import golly as g\n'), ((344, 367), 'golly.exit', 'g.exit', (['"""No selection."""'], {}), "('No selection.')\n", (350, 367), True, 'import golly as g\n'), ((661, 694), 'golly.exit', 'g.exit', (['"""No pattern in selection"""'], {}), "('No pattern in selection')\n", (667, 694), True, 'import golly as g\n'), ((852, 872), 'numpy.log2', 'np.log2', (['(selh | selw)'], {}), '(selh | selw)\n', (859, 872), True, 'import numpy as np\n'), ((929, 942), 'numpy.log2', 'np.log2', (['selh'], {}), '(selh)\n', (936, 942), True, 'import numpy as np\n'), ((999, 1012), 'numpy.log2', 'np.log2', (['selw'], {}), '(selw)\n', (1006, 1012), True, 'import numpy as np\n')] |
import numpy as np
def sigmoid(x):
indp = np.where(x>=0)
indn = np.where(x<0)
tx = np.zeros(x.shape)
tx[indp] = 1./(1.+np.exp(-x[indp]))
tx[indn] = np.exp(x[indn])/(1.+np.exp(x[indn]))
return tx
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
def KL_divergence(x, y):
return x * (np.log(x+1E-20)-np.log(y+1E-20)) + (1 - x) * (np.log(1 - x+1E-20) - np.log(1 - y+1E-20))
def initialize(hidden_size, visible_size):
r = np.sqrt(6) / np.sqrt(hidden_size + visible_size + 1)
W1 = np.random.random((hidden_size, visible_size)) * 2 * r - r
W2 = np.random.random((visible_size, hidden_size)) * 2 * r - r
b1 = np.zeros(hidden_size, dtype=np.float64)
b2 = np.zeros(visible_size, dtype=np.float64)
theta = np.concatenate((W1.reshape(hidden_size * visible_size),
W2.reshape(hidden_size * visible_size),
b1.reshape(hidden_size),
b2.reshape(visible_size)))
return theta
def sparse_autoencoder_cost(theta, visible_size, hidden_size,
lambda_, sparsity_param, beta, data):
W1 = theta[0:hidden_size * visible_size].reshape(hidden_size, visible_size)
W2 = theta[hidden_size * visible_size:2 * hidden_size * visible_size].reshape(visible_size, hidden_size)
b1 = theta[2 * hidden_size * visible_size:2 * hidden_size * visible_size + hidden_size]
b2 = theta[2 * hidden_size * visible_size + hidden_size:]
m = data.shape[1]
z2 = W1.dot(data) + np.tile(b1, (m, 1)).transpose()
a2 = sigmoid(z2)
z3 = W2.dot(a2) + np.tile(b2, (m, 1)).transpose()
h = sigmoid(z3)
cost = np.sum((h - data) ** 2) / (2 * m) + \
(lambda_ / 2) * (np.sum(W1 ** 2) + np.sum(W2 ** 2))# + \
sparsity_delta = 0
delta3 = -(data - h) * sigmoid_prime(z3)
delta2 = (W2.transpose().dot(delta3) + beta * sparsity_delta) * sigmoid_prime(z2)
W1grad = delta2.dot(data.transpose()) / m + lambda_ * W1
W2grad = delta3.dot(a2.transpose()) / m + lambda_ * W2
b1grad = np.sum(delta2, axis=1) / m
b2grad = np.sum(delta3, axis=1) / m
grad = np.concatenate((W1grad.reshape(hidden_size * visible_size),
W2grad.reshape(hidden_size * visible_size),
b1grad.reshape(hidden_size),
b2grad.reshape(visible_size)))
return cost, grad
def sparse_autoencoder(theta, hidden_size, visible_size, data):
W1 = theta[0:hidden_size * visible_size].reshape(hidden_size, visible_size)
b1 = theta[2 * hidden_size * visible_size:2 * hidden_size * visible_size + hidden_size]
m = data.shape[1]
z2 = W1.dot(data) + np.tile(b1, (m, 1)).transpose()
a2 = sigmoid(z2)
return a2
def sparse_autoencoder_linear_cost(theta, visible_size, hidden_size,
lambda_, sparsity_param, beta, data):
W1 = theta[0:hidden_size * visible_size].reshape(hidden_size, visible_size)
W2 = theta[hidden_size * visible_size:2 * hidden_size * visible_size].reshape(visible_size, hidden_size)
b1 = theta[2 * hidden_size * visible_size:2 * hidden_size * visible_size + hidden_size]
b2 = theta[2 * hidden_size * visible_size + hidden_size:]
m = data.shape[1]
z2 = W1.dot(data) + np.tile(b1, (m, 1)).transpose()
a2 = sigmoid(z2)
z3 = W2.dot(a2) + np.tile(b2, (m, 1)).transpose()
h = z3
cost = np.sum((h - data) ** 2) / (2 * m) + \
(lambda_ / 2) * (np.sum(W1 ** 2) + np.sum(W2 ** 2))
sparsity_delta = 0.
delta3 = -(data - h)
delta2 = (W2.transpose().dot(delta3) + beta * sparsity_delta) * sigmoid_prime(z2)
W1grad = delta2.dot(data.transpose()) / m + lambda_ * W1
W2grad = delta3.dot(a2.transpose()) / m + lambda_ * W2
b1grad = np.sum(delta2, axis=1) / m
b2grad = np.sum(delta3, axis=1) / m
grad = np.concatenate((W1grad.reshape(hidden_size * visible_size),
W2grad.reshape(hidden_size * visible_size),
b1grad.reshape(hidden_size),
b2grad.reshape(visible_size)))
return cost, grad
| [
"numpy.tile",
"numpy.sqrt",
"numpy.where",
"numpy.random.random",
"numpy.log",
"numpy.exp",
"numpy.sum",
"numpy.zeros"
] | [((57, 73), 'numpy.where', 'np.where', (['(x >= 0)'], {}), '(x >= 0)\n', (65, 73), True, 'import numpy as np\n'), ((83, 98), 'numpy.where', 'np.where', (['(x < 0)'], {}), '(x < 0)\n', (91, 98), True, 'import numpy as np\n'), ((106, 123), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (114, 123), True, 'import numpy as np\n'), ((677, 716), 'numpy.zeros', 'np.zeros', (['hidden_size'], {'dtype': 'np.float64'}), '(hidden_size, dtype=np.float64)\n', (685, 716), True, 'import numpy as np\n'), ((726, 766), 'numpy.zeros', 'np.zeros', (['visible_size'], {'dtype': 'np.float64'}), '(visible_size, dtype=np.float64)\n', (734, 766), True, 'import numpy as np\n'), ((179, 194), 'numpy.exp', 'np.exp', (['x[indn]'], {}), '(x[indn])\n', (185, 194), True, 'import numpy as np\n'), ((480, 490), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (487, 490), True, 'import numpy as np\n'), ((493, 532), 'numpy.sqrt', 'np.sqrt', (['(hidden_size + visible_size + 1)'], {}), '(hidden_size + visible_size + 1)\n', (500, 532), True, 'import numpy as np\n'), ((2086, 2108), 'numpy.sum', 'np.sum', (['delta2'], {'axis': '(1)'}), '(delta2, axis=1)\n', (2092, 2108), True, 'import numpy as np\n'), ((2126, 2148), 'numpy.sum', 'np.sum', (['delta3'], {'axis': '(1)'}), '(delta3, axis=1)\n', (2132, 2148), True, 'import numpy as np\n'), ((3826, 3848), 'numpy.sum', 'np.sum', (['delta2'], {'axis': '(1)'}), '(delta2, axis=1)\n', (3832, 3848), True, 'import numpy as np\n'), ((3866, 3888), 'numpy.sum', 'np.sum', (['delta3'], {'axis': '(1)'}), '(delta3, axis=1)\n', (3872, 3888), True, 'import numpy as np\n'), ((146, 162), 'numpy.exp', 'np.exp', (['(-x[indp])'], {}), '(-x[indp])\n', (152, 162), True, 'import numpy as np\n'), ((199, 214), 'numpy.exp', 'np.exp', (['x[indn]'], {}), '(x[indn])\n', (205, 214), True, 'import numpy as np\n'), ((1691, 1714), 'numpy.sum', 'np.sum', (['((h - data) ** 2)'], {}), '((h - data) ** 2)\n', (1697, 1714), True, 'import numpy as np\n'), ((3453, 3476), 'numpy.sum', 'np.sum', (['((h - data) ** 2)'], {}), '((h - data) ** 2)\n', (3459, 3476), True, 'import numpy as np\n'), ((338, 355), 'numpy.log', 'np.log', (['(x + 1e-20)'], {}), '(x + 1e-20)\n', (344, 355), True, 'import numpy as np\n'), ((354, 371), 'numpy.log', 'np.log', (['(y + 1e-20)'], {}), '(y + 1e-20)\n', (360, 371), True, 'import numpy as np\n'), ((384, 405), 'numpy.log', 'np.log', (['(1 - x + 1e-20)'], {}), '(1 - x + 1e-20)\n', (390, 405), True, 'import numpy as np\n'), ((406, 427), 'numpy.log', 'np.log', (['(1 - y + 1e-20)'], {}), '(1 - y + 1e-20)\n', (412, 427), True, 'import numpy as np\n'), ((542, 587), 'numpy.random.random', 'np.random.random', (['(hidden_size, visible_size)'], {}), '((hidden_size, visible_size))\n', (558, 587), True, 'import numpy as np\n'), ((609, 654), 'numpy.random.random', 'np.random.random', (['(visible_size, hidden_size)'], {}), '((visible_size, hidden_size))\n', (625, 654), True, 'import numpy as np\n'), ((1552, 1571), 'numpy.tile', 'np.tile', (['b1', '(m, 1)'], {}), '(b1, (m, 1))\n', (1559, 1571), True, 'import numpy as np\n'), ((1627, 1646), 'numpy.tile', 'np.tile', (['b2', '(m, 1)'], {}), '(b2, (m, 1))\n', (1634, 1646), True, 'import numpy as np\n'), ((1757, 1772), 'numpy.sum', 'np.sum', (['(W1 ** 2)'], {}), '(W1 ** 2)\n', (1763, 1772), True, 'import numpy as np\n'), ((1775, 1790), 'numpy.sum', 'np.sum', (['(W2 ** 2)'], {}), '(W2 ** 2)\n', (1781, 1790), True, 'import numpy as np\n'), ((2719, 2738), 'numpy.tile', 'np.tile', (['b1', '(m, 1)'], {}), '(b1, (m, 1))\n', (2726, 2738), True, 'import numpy as np\n'), ((3323, 3342), 'numpy.tile', 'np.tile', (['b1', '(m, 1)'], {}), '(b1, (m, 1))\n', (3330, 3342), True, 'import numpy as np\n'), ((3398, 3417), 'numpy.tile', 'np.tile', (['b2', '(m, 1)'], {}), '(b2, (m, 1))\n', (3405, 3417), True, 'import numpy as np\n'), ((3519, 3534), 'numpy.sum', 'np.sum', (['(W1 ** 2)'], {}), '(W1 ** 2)\n', (3525, 3534), True, 'import numpy as np\n'), ((3537, 3552), 'numpy.sum', 'np.sum', (['(W2 ** 2)'], {}), '(W2 ** 2)\n', (3543, 3552), True, 'import numpy as np\n')] |
import numpy as np
import torch
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose.
Example:
>>> transforms.Compose([
>>> transforms.MriNoise(),
>>> transforms.ComplexToTensor(),
>>> ])
Returns:
ob (PyTorch transform object): Can be used with PyTorch dataset
with transform=ob option.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, sample):
# list conversion (for multiple coils)
dat = sample['dat']
dat = np.reshape(dat, (1,) + dat.shape)
sample['dat'] = dat
sample['siglevel'] = np.mean(np.absolute(sample['target']))
# transform array
for t in self.transforms:
sample = t(sample)
return sample
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
| [
"numpy.absolute",
"numpy.reshape"
] | [((722, 755), 'numpy.reshape', 'np.reshape', (['dat', '((1,) + dat.shape)'], {}), '(dat, (1,) + dat.shape)\n', (732, 755), True, 'import numpy as np\n'), ((822, 851), 'numpy.absolute', 'np.absolute', (["sample['target']"], {}), "(sample['target'])\n", (833, 851), True, 'import numpy as np\n')] |
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
import numpy as np
data = load_iris()
data.feature_names, data.target_names
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.33, random_state=3)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
"""#Gaussian"""
from sklearn.metrics import accuracy_score
for i in range(1,10):
gmm = GaussianMixture(n_components=i)
gmm.fit(X_train)
score = gmm.score_samples(X_test)
y_pred = gmm.predict(X_test)
print('Accuracy:{0:.3f}'.format(accuracy_score(y_test, y_pred)))
plt.fill(X_test, np.exp(score), c='blue')
plt.title("Gaussian {}".format(i))
plt.show()
#Best Gaussian
gmm = GaussianMixture(n_components=3)
gmm.fit(X_train)
score = gmm.score_samples(X_test)
y_pred = gmm.predict(X_test)
print('Accuracy:{0:.3f}'.format(accuracy_score(y_test, y_pred)))
plt.fill(X_test, np.exp(score), c='blue')
plt.title("Gaussian 2")
plt.show()
"""#Kernel"""
kernels = ['tophat', 'gaussian']
bandwidths = [0.01, 0.05, 0.1, 0.5, 1, 2, 5]
for kern in kernels:
for b in bandwidths:
kde_skl = KernelDensity(kernel= kern, bandwidth=b)
kde_skl.fit(X_train)
score = kde_skl.score_samples(X_test)
plt.fill(X_test, np.exp(score), c='green')
plt.title("Kernel: {} and bandwidth: {}".format(kern, b))
plt.show()
#Best KernelDensity
kde_skl = KernelDensity(kernel= 'gaussian', bandwidth=0.05)
kde_skl.fit(X_train)
score = kde_skl.score_samples(X_test)
plt.fill(X_test, np.exp(score), c='green')
plt.title("Kernel: gaussian and bandwidth: 0.05")
plt.show() | [
"sklearn.datasets.load_iris",
"sklearn.mixture.GaussianMixture",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KernelDensity",
"numpy.exp",
"matplotlib.pyplot.title",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.show"
] | [((239, 250), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (248, 250), False, 'from sklearn.datasets import load_iris\n'), ((326, 398), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data.data', 'data.target'], {'test_size': '(0.33)', 'random_state': '(3)'}), '(data.data, data.target, test_size=0.33, random_state=3)\n', (342, 398), False, 'from sklearn.model_selection import train_test_split\n'), ((870, 901), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(3)'}), '(n_components=3)\n', (885, 901), False, 'from sklearn.mixture import GaussianMixture\n'), ((1090, 1113), 'matplotlib.pyplot.title', 'plt.title', (['"""Gaussian 2"""'], {}), "('Gaussian 2')\n", (1099, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1124), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1122, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1619), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.05)'}), "(kernel='gaussian', bandwidth=0.05)\n", (1584, 1619), False, 'from sklearn.neighbors import KernelDensity\n'), ((1724, 1773), 'matplotlib.pyplot.title', 'plt.title', (['"""Kernel: gaussian and bandwidth: 0.05"""'], {}), "('Kernel: gaussian and bandwidth: 0.05')\n", (1733, 1773), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1782, 1784), True, 'import matplotlib.pyplot as plt\n'), ((552, 583), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'i'}), '(n_components=i)\n', (567, 583), False, 'from sklearn.mixture import GaussianMixture\n'), ((837, 847), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (845, 847), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1078), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (1071, 1078), True, 'import numpy as np\n'), ((1698, 1711), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (1704, 1711), True, 'import numpy as np\n'), ((768, 781), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (774, 781), True, 'import numpy as np\n'), ((1015, 1045), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1029, 1045), False, 'from sklearn.metrics import accuracy_score\n'), ((1285, 1324), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': 'kern', 'bandwidth': 'b'}), '(kernel=kern, bandwidth=b)\n', (1298, 1324), False, 'from sklearn.neighbors import KernelDensity\n'), ((1528, 1538), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1536, 1538), True, 'import matplotlib.pyplot as plt\n'), ((714, 744), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (728, 744), False, 'from sklearn.metrics import accuracy_score\n'), ((1427, 1440), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (1433, 1440), True, 'import numpy as np\n')] |
import numpy as np
import collections
from PIL import Image
from generic.data_provider.batchifier import AbstractBatchifier
from generic.data_provider.image_preprocessors import get_spatial_feat, resize_image
from generic.data_provider.nlp_utils import padder,padder_3d,padder_4d
from generic.data_provider.nlp_utils import Embeddings,get_embeddings
from gensim.models import word2vec,FastText,KeyedVectors
from src.guesswhat.data_provider.generate_categoryQuestion import Generate_Category
from matplotlib import pyplot as plt
from PIL import Image
import numpy
import time
import os
# TODO corriger erreur lié a embedding aleatoire des mot sans fasttext ou glove
answer_dict = \
{ 'Yes': np.array([1, 0, 0], dtype=np.int32),
'No': np.array([0, 1, 0], dtype=np.int32),
'N/A': np.array([0, 0, 1], dtype=np.int32)
}
class OracleBatchifier(AbstractBatchifier):
def __init__(self, tokenizer_question, sources,glove=None,tokenizer_description = None ,embedding=None, status=list(),args=None,config=None,trainset=None):
self.tokenizer_question = tokenizer_question
self.tokenizer_description = tokenizer_description
self.sources = sources
self.status = status
self.config = config
embedding_name = ""
if config["model"]["fasttext"]:
embedding_name = "fasttext"
elif config["model"]["glove"]:
embedding_name = "glove"
# ted_en-20160408.zip
# all_question_game.txt
##### Embedding_type #####################
# 0 : trained only question
# 1 : pretained with wikipedia data_set
# 2 : pretained + trained question
#
##########################################
if config["model"]["question"]["embedding_type"] == 0:
self.embedding = Embeddings(file_name=["all_question_game.txt" ], embedding_type = config["model"]["question"]["embedding_type"] ,emb_dim=100)
elif config["model"]["question"]["embedding_type"] == 1:
self.embedding = Embeddings(file_name=["ted_en-20160408.zip" ], embedding_type = config["model"]["question"]["embedding_type"] ,emb_dim=100)
elif config["model"]["question"]["embedding_type"] == 2:
self.embedding = Embeddings(file_name=["ted_en-20160408.zip","all_question_game.txt" ], embedding_type = config["model"]["question"]["embedding_type"], emb_dim=100)
self.model_embedding = self.embedding.model
# if self.config["model"]["fasttext"] :
# pass
# # self.generate_category = Generate_Category(self.model_word,"fasttext",self.tokenizer_question,config["model"]["category"]["all_word"])
# elif self.config["model"]["glove"]:
# self.glove = glove
# self.generate_category = Generate_Category(self.glove,"glove",self.tokenizer_question,config["model"]["category"]["all_word"])
def filter(self, games):
if len(self.status) > 0:
return [g for g in games if g.status in self.status]
else:
return games
def apply(self, games):
sources = self.sources
batch = collections.defaultdict(list)
batch_size = len(games)
assert batch_size > 0
for i, game in enumerate(games):
batch['raw'].append(game)
image = game.image
if 'question' in sources :
question = self.tokenizer_question.apply(game.questions[0],use_dict_ques=False)
# print("+++++ words_question = {} ".format(question))
sp_zeros = np.zeros((14))
sp_zeros [0:len(question)] = question
batch["question"].append(sp_zeros)
batch["seq_length_question"].append(len(question))
if 'embedding_vector_ques' in sources:
assert len(game.questions) == 1
# Add glove vectors (NB even <unk> may have a specific glove)
# print("oracle_batchifier | question = {}".format(game.questions[0]))
words = self.tokenizer_question.apply(game.questions[0],tokent_int=False)
if "question_pos" in sources:
# print("/////////// question_pos")
embedding_vectors,embedding_pos = get_embeddings(words,pos=self.config["model"]["question"]["pos"],lemme=self.config["model"]["question"]["lemme"],model_wordd=self.model_wordd,model_worddl=self.model_worddl,model_word=self.model_word,model_wordl=self.model_wordl,model_posd=self.model_posd,model_pos=self.model_pos) # slow (copy gloves in process)
# print("..... question_pos............. embedding_vectors",len(embedding_vectors[0]))
batch['embedding_vector_ques'].append(embedding_vectors)
batch['embedding_vector_ques_pos'].append(embedding_pos)
batch['question_pos'].append(question)
else:
embedding_vectors = self.embedding.get_embedding(words)
# print("embedding = {}".format(np.asarray(embedding_vectors).shape ))
# exit()
# if "embedding_vector_ques" not in batch:
# batch['embedding_vector_ques'] = np.zeros((batch_size,7,100))
batch['embedding_vector_ques'].append(embedding_vectors)
if 'description' in sources:
description = self.tokenizer_question.apply(game.image.description,use_dict_ques=False)
# print("+++++ words_question = {} ".format(question))
batch["description"].append(description)
if 'ques_hist_H0' in sources:
assert len(game.questions) == 1
# description = self.tokenizer_description.apply(game.image.description)
# batch['description'].append(description)
for j in range(6):
question_answer = game.all_last_question[0]
words = []
if len(question_answer) > 1:
word = self.tokenizer_question.apply(game.all_last_question[0][1][0])
words = word
else:
word = self.tokenizer_question.apply(game.all_last_question[0][0])
words = word
sp_zeros = np.zeros((14))
# print("words = {} ".format(words))
sp_zeros [0:len(words)] = words
# print("sp_zeros = {} ".format(sp_zeros))
batch['ques_hist_H{}'.format(j)].append(sp_zeros)
batch['seq_length_question_history_H{}'.format(j)].append(len(words))
# print('embedding_vector_des'in sources)
if 'embedding_vector_des'in sources:
description = self.tokenizer_description.apply(game.image.description,tokent_int=False)
#print("*************** Description =",description)
# batch['description'].append(description)
if "des_pos" in sources:
embedding_vectors,embedding_pos = get_embeddings(description,pos=self.config["model"]["question"]["pos"],lemme=self.config["model"]["question"]["lemme"],model_wordd=self.model_wordd,model_worddl=self.model_worddl,model_word=self.model_word,model_wordl=self.model_wordl,model_posd=self.model_posd,model_pos=self.model_pos) # slow (copy gloves in process)
batch['embedding_vector_des'].append(embedding_vectors)
batch['embedding_vector_des_pos'].append(embedding_pos)
# batch['des_pos'].append(question)
else:
if self.config["model"]["fasttext"] :
#print("++++++----- ++++++++ Dans fasttext ")
embedding_vectors,_ = get_embeddings(description,pos=self.config["model"]["question"]["pos"],lemme=self.config["model"]["question"]["lemme"],model_wordd=self.model_wordd,model_worddl=self.model_worddl,model_word=self.model_word,model_wordl=self.model_wordl,model_posd=self.model_posd,model_pos=self.model_pos) # slow (copy gloves in process)
elif self.config["model"]["glove"] :
#print("++++++----- ++++++++ Dans glove ")
embedding_vectors = self.glove.get_embeddings(description)
# print("------ ELSE".format(embedding_vectors))
# exit()
batch['embedding_vector_des'].append(embedding_vectors)
if 'answer' in sources:
if "answer" not in batch:
batch["answer"] = np.zeros((batch_size,3))
# print("game.amswer = {}".format(game.answers))
# exit()
assert len(game.answers) == 1
batch['answer'][i] = answer_dict[game.answers[0]]
#print(" Correct Answer = ",game.answers[0])
if 'category' in sources:
use_embedding_cat = self.config["model"]["category"]["use_embedding"]
if "category" not in batch:
if use_embedding_cat:
batch['category'] = np.zeros((batch_size,100))
else:
batch['category'] = np.zeros((batch_size))
if use_embedding_cat:
embc = np.asarray(self.embedding.get_embedding([game.object.category]))
# embc = self.tokenizer_question.apply(game.object.category,use_dict_ques=False)
category_input = embc.reshape((100))
else:
category_input = game.object.category_id
# print("category = {} ".format(category_input))
batch['category'][i] = category_input
if 'allcategory' in sources:
allcategory = []
allcategory_hot = np.zeros(shape=(90),dtype=int)
# print("Oracle_batchifier | Allcategory -------------------------------")
for obj in game.objects:
allcategory.append(obj.category_id - 1)
allcategory_hot[allcategory] = 1
batch['allcategory'].append(allcategory_hot)
if 'spatial' in sources:
if 'spatial' not in batch:
batch['spatial'] = np.zeros((batch_size,8),dtype=float)
spat_feat = get_spatial_feat(game.object.bbox, image.width, image.height)
batch['spatial'][i] = spat_feat
if 'crop' in sources:
batch['crop'].append(game.object.get_crop())
batch['image_id'].append(image.get_idimage())
# batch['crop_id'].append(game.object_id)
# print("crop_id=",game.object.get_crop().shape)
# exit()
if 'image' in sources:
features_image = image.get_image()
batch['image'].append(features_image)
batch['image_id'].append(image.get_idimage())
if 'mask' in sources:
assert "image" in batch['image'], "mask input require the image source"
mask = game.object.get_mask()
ft_width, ft_height = batch['image'][-1].shape[1],\
batch['image'][-1].shape[2] # Use the image feature size (not the original img size)
mask = resize_image(Image.fromarray(mask), height=ft_height, width=ft_width)
batch['mask'].append(mask)
# padding = self.embedding.get_embeddings(["<padding>"])[0]
# print("padding | = {}".format(padding))
# pad the questions
# if "question" in sources:
# batch['question'] , batch['seq_length_question'] = padder(batch['question'],max_seq_length=14)
if "question_pos" in sources:
batch['question_pos'], batch['seq_length_ques_pos'] = padder(batch['question_pos'],
padding_symbol=self.tokenizer_question.padding_token)
if "description" in sources:
batch['description'], batch['seq_length_description'] = padder(batch['description'] )
# batch['embedding_vector_pos'], _ = padder_3d(batch['embedding_vector_pos'])
if 'embedding_vector_ques' in sources:
batch['embedding_vector_ques'],s = padder_3d(batch['embedding_vector_ques'],max_seq_length=12)
if 'embedding_vector_ques_hist' in sources:
# print("Shape=",np.asarray(batch['embedding_vector_ques_hist'] ).shape)
batch_hist, size_sentences,max_seq = padder_4d(batch['embedding_vector_ques_hist'],max_seq_length=14)
batch_hist = np.asarray(batch_hist)
size_sentences = np.asarray(size_sentences)
batch['embedding_vector_ques_hist'] = batch_hist
for i in range(6):
batch['ques_hist_H{}'.format(i)] = batch_hist[:,i,:]
batch['seq_length_question_history_H{}'.format(i)] = size_sentences[:,i]
#print("Len=",len(batch['seq_length_question']))
if 'embedding_vector_ques_pos' in sources:
batch['embedding_vector_ques_pos'], _ = padder_3d(batch['embedding_vector_ques_pos'])
if 'embedding_vector_des' in sources:
batch['embedding_vector_des'], batch['seq_length_description'] = padder_3d(batch['embedding_vector_des'])
if 'embedding_vector_des_pos' in sources:
batch['embedding_vector_des_pos'], _ = padder_3d(batch['embedding_vector_des_pos'])
# if 'description' in sources:
# # complete par padding en prenons la taille maximal
# batch['description'], batch['seq_length_description'] = padder_3d(batch['description'])
# print(" Bath = {} ".format(batch.keys()))
# exit()
# print("finish oracle_bachifier .... time=",total)
# print("TotalBatch=",total)
#print("TotalBatch=",total)
return batch
| [
"PIL.Image.fromarray",
"generic.data_provider.nlp_utils.Embeddings",
"generic.data_provider.nlp_utils.get_embeddings",
"numpy.asarray",
"generic.data_provider.nlp_utils.padder_4d",
"numpy.array",
"numpy.zeros",
"collections.defaultdict",
"generic.data_provider.nlp_utils.padder",
"generic.data_prov... | [((698, 733), 'numpy.array', 'np.array', (['[1, 0, 0]'], {'dtype': 'np.int32'}), '([1, 0, 0], dtype=np.int32)\n', (706, 733), True, 'import numpy as np\n'), ((748, 783), 'numpy.array', 'np.array', (['[0, 1, 0]'], {'dtype': 'np.int32'}), '([0, 1, 0], dtype=np.int32)\n', (756, 783), True, 'import numpy as np\n'), ((799, 834), 'numpy.array', 'np.array', (['[0, 0, 1]'], {'dtype': 'np.int32'}), '([0, 0, 1], dtype=np.int32)\n', (807, 834), True, 'import numpy as np\n'), ((3205, 3234), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3228, 3234), False, 'import collections\n'), ((1843, 1970), 'generic.data_provider.nlp_utils.Embeddings', 'Embeddings', ([], {'file_name': "['all_question_game.txt']", 'embedding_type': "config['model']['question']['embedding_type']", 'emb_dim': '(100)'}), "(file_name=['all_question_game.txt'], embedding_type=config[\n 'model']['question']['embedding_type'], emb_dim=100)\n", (1853, 1970), False, 'from generic.data_provider.nlp_utils import Embeddings, get_embeddings\n'), ((12297, 12385), 'generic.data_provider.nlp_utils.padder', 'padder', (["batch['question_pos']"], {'padding_symbol': 'self.tokenizer_question.padding_token'}), "(batch['question_pos'], padding_symbol=self.tokenizer_question.\n padding_token)\n", (12303, 12385), False, 'from generic.data_provider.nlp_utils import padder, padder_3d, padder_4d\n'), ((12549, 12577), 'generic.data_provider.nlp_utils.padder', 'padder', (["batch['description']"], {}), "(batch['description'])\n", (12555, 12577), False, 'from generic.data_provider.nlp_utils import padder, padder_3d, padder_4d\n'), ((12786, 12846), 'generic.data_provider.nlp_utils.padder_3d', 'padder_3d', (["batch['embedding_vector_ques']"], {'max_seq_length': '(12)'}), "(batch['embedding_vector_ques'], max_seq_length=12)\n", (12795, 12846), False, 'from generic.data_provider.nlp_utils import padder, padder_3d, padder_4d\n'), ((13073, 13138), 'generic.data_provider.nlp_utils.padder_4d', 'padder_4d', (["batch['embedding_vector_ques_hist']"], {'max_seq_length': '(14)'}), "(batch['embedding_vector_ques_hist'], max_seq_length=14)\n", (13082, 13138), False, 'from generic.data_provider.nlp_utils import padder, padder_3d, padder_4d\n'), ((13175, 13197), 'numpy.asarray', 'np.asarray', (['batch_hist'], {}), '(batch_hist)\n', (13185, 13197), True, 'import numpy as np\n'), ((13239, 13265), 'numpy.asarray', 'np.asarray', (['size_sentences'], {}), '(size_sentences)\n', (13249, 13265), True, 'import numpy as np\n'), ((13779, 13824), 'generic.data_provider.nlp_utils.padder_3d', 'padder_3d', (["batch['embedding_vector_ques_pos']"], {}), "(batch['embedding_vector_ques_pos'])\n", (13788, 13824), False, 'from generic.data_provider.nlp_utils import padder, padder_3d, padder_4d\n'), ((13966, 14006), 'generic.data_provider.nlp_utils.padder_3d', 'padder_3d', (["batch['embedding_vector_des']"], {}), "(batch['embedding_vector_des'])\n", (13975, 14006), False, 'from generic.data_provider.nlp_utils import padder, padder_3d, padder_4d\n'), ((14118, 14162), 'generic.data_provider.nlp_utils.padder_3d', 'padder_3d', (["batch['embedding_vector_des_pos']"], {}), "(batch['embedding_vector_des_pos'])\n", (14127, 14162), False, 'from generic.data_provider.nlp_utils import padder, padder_3d, padder_4d\n'), ((2072, 2197), 'generic.data_provider.nlp_utils.Embeddings', 'Embeddings', ([], {'file_name': "['ted_en-20160408.zip']", 'embedding_type': "config['model']['question']['embedding_type']", 'emb_dim': '(100)'}), "(file_name=['ted_en-20160408.zip'], embedding_type=config['model'\n ]['question']['embedding_type'], emb_dim=100)\n", (2082, 2197), False, 'from generic.data_provider.nlp_utils import Embeddings, get_embeddings\n'), ((3660, 3672), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (3668, 3672), True, 'import numpy as np\n'), ((10208, 10237), 'numpy.zeros', 'np.zeros', ([], {'shape': '(90)', 'dtype': 'int'}), '(shape=90, dtype=int)\n', (10216, 10237), True, 'import numpy as np\n'), ((10729, 10790), 'generic.data_provider.image_preprocessors.get_spatial_feat', 'get_spatial_feat', (['game.object.bbox', 'image.width', 'image.height'], {}), '(game.object.bbox, image.width, image.height)\n', (10745, 10790), False, 'from generic.data_provider.image_preprocessors import get_spatial_feat, resize_image\n'), ((2299, 2448), 'generic.data_provider.nlp_utils.Embeddings', 'Embeddings', ([], {'file_name': "['ted_en-20160408.zip', 'all_question_game.txt']", 'embedding_type': "config['model']['question']['embedding_type']", 'emb_dim': '(100)'}), "(file_name=['ted_en-20160408.zip', 'all_question_game.txt'],\n embedding_type=config['model']['question']['embedding_type'], emb_dim=100)\n", (2309, 2448), False, 'from generic.data_provider.nlp_utils import Embeddings, get_embeddings\n'), ((4382, 4685), 'generic.data_provider.nlp_utils.get_embeddings', 'get_embeddings', (['words'], {'pos': "self.config['model']['question']['pos']", 'lemme': "self.config['model']['question']['lemme']", 'model_wordd': 'self.model_wordd', 'model_worddl': 'self.model_worddl', 'model_word': 'self.model_word', 'model_wordl': 'self.model_wordl', 'model_posd': 'self.model_posd', 'model_pos': 'self.model_pos'}), "(words, pos=self.config['model']['question']['pos'], lemme=\n self.config['model']['question']['lemme'], model_wordd=self.model_wordd,\n model_worddl=self.model_worddl, model_word=self.model_word, model_wordl\n =self.model_wordl, model_posd=self.model_posd, model_pos=self.model_pos)\n", (4396, 4685), False, 'from generic.data_provider.nlp_utils import Embeddings, get_embeddings\n'), ((6565, 6577), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (6573, 6577), True, 'import numpy as np\n'), ((7349, 7662), 'generic.data_provider.nlp_utils.get_embeddings', 'get_embeddings', (['description'], {'pos': "self.config['model']['question']['pos']", 'lemme': "self.config['model']['question']['lemme']", 'model_wordd': 'self.model_wordd', 'model_worddl': 'self.model_worddl', 'model_word': 'self.model_word', 'model_wordl': 'self.model_wordl', 'model_posd': 'self.model_posd', 'model_pos': 'self.model_pos'}), "(description, pos=self.config['model']['question']['pos'],\n lemme=self.config['model']['question']['lemme'], model_wordd=self.\n model_wordd, model_worddl=self.model_worddl, model_word=self.model_word,\n model_wordl=self.model_wordl, model_posd=self.model_posd, model_pos=\n self.model_pos)\n", (7363, 7662), False, 'from generic.data_provider.nlp_utils import Embeddings, get_embeddings\n'), ((8897, 8922), 'numpy.zeros', 'np.zeros', (['(batch_size, 3)'], {}), '((batch_size, 3))\n', (8905, 8922), True, 'import numpy as np\n'), ((10664, 10702), 'numpy.zeros', 'np.zeros', (['(batch_size, 8)'], {'dtype': 'float'}), '((batch_size, 8), dtype=float)\n', (10672, 10702), True, 'import numpy as np\n'), ((11771, 11792), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (11786, 11792), False, 'from PIL import Image\n'), ((8075, 8388), 'generic.data_provider.nlp_utils.get_embeddings', 'get_embeddings', (['description'], {'pos': "self.config['model']['question']['pos']", 'lemme': "self.config['model']['question']['lemme']", 'model_wordd': 'self.model_wordd', 'model_worddl': 'self.model_worddl', 'model_word': 'self.model_word', 'model_wordl': 'self.model_wordl', 'model_posd': 'self.model_posd', 'model_pos': 'self.model_pos'}), "(description, pos=self.config['model']['question']['pos'],\n lemme=self.config['model']['question']['lemme'], model_wordd=self.\n model_wordd, model_worddl=self.model_worddl, model_word=self.model_word,\n model_wordl=self.model_wordl, model_posd=self.model_posd, model_pos=\n self.model_pos)\n", (8089, 8388), False, 'from generic.data_provider.nlp_utils import Embeddings, get_embeddings\n'), ((9447, 9474), 'numpy.zeros', 'np.zeros', (['(batch_size, 100)'], {}), '((batch_size, 100))\n', (9455, 9474), True, 'import numpy as np\n'), ((9544, 9564), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (9552, 9564), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 14:10:46 2019
@author: gui
"""
import sys, pygame
import numpy as np
from pygame.locals import *
import pygame.freetype
w = 600
h = 600
scale = 100
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
score = max_score = 0
pygame.init()
screen = pygame.display.set_mode((w,h))
pygame.display.set_caption('2048 - Genérico')
#pygame.freetype.init()
GAME_FONT_BIG = pygame.freetype.SysFont('arial', 80)
GAME_FONT = pygame.freetype.SysFont('arial', 48)
GAME_FONT_SMALL = pygame.freetype.SysFont('arial', 36)
GAME_FONT_SMALLEST = pygame.freetype.SysFont('arial', 24)
grid = np.zeros((4, 4))
def end_game(grid):
GAME_FONT_BIG.render_to(screen, (110, 150), 'Game over', BLUE)
GAME_FONT_BIG.render_to(screen, (18, 380), 'Continue? (y / n)', BLUE)
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
if event.type == KEYDOWN and event.key == K_y:
global score
score = 0
grid = np.zeros((4, 4))
return grid
elif event.type == KEYDOWN and event.key == K_n:
sys.exit()
else:
return end_game(grid)
def win_game(grid):
GAME_FONT_BIG.render_to(screen, (110, 150), 'You win!!!', BLUE)
GAME_FONT_BIG.render_to(screen, (18, 380), 'Continue? (y / n)', BLUE)
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
if event.type == KEYDOWN and event.key == K_y:
return grid
elif event.type == KEYDOWN and event.key == K_n:
sys.exit()
else:
return end_game(grid)
def make_new_rect(grid):
pos = (np.random.randint(0, 4),
np.random.randint(0, 4))
if np.count_nonzero(grid) == 16:
return end_game(grid)
if sum(grid[grid == 2048]) == 2048:
return win_game(grid)
elif grid[pos] == 0:
grid[pos] = np.random.choice([2, 4], p = [.8, .2])
return grid
else:
return make_new_rect(grid)
def move_up(grid):
cols = grid.shape[1]
for col in range(cols):
rows = grid.shape[0]
for row in range(1, rows):
for inc in range(row):
# use inc to roll the lines
if grid[row - inc, col] == grid[row - inc - 1, col] or grid[row - inc - 1, col] == 0:
update_score(grid[row - inc, col], grid[row - inc - 1, col])
grid[row - inc - 1, col] += grid[row - inc, col]
grid[row - inc, col] = 0
return make_new_rect(grid)
def move_down(grid):
cols = grid.shape[1]
for col in range(cols):
rows = grid.shape[0]
for row in range(rows - 2, -1, -1):
for inc in range(rows - row - 1):
# use inc to roll the lines
if grid[row + inc, col] == grid[row + inc + 1, col] or grid[row + inc + 1, col] == 0:
update_score(grid[row + inc, col], grid[row + inc + 1, col])
grid[row + inc + 1, col] += grid[row + inc, col]
grid[row + inc, col] = 0
return make_new_rect(grid)
def move_right(grid):
rows = grid.shape[0]
for row in range(rows):
cols = grid.shape[1]
for col in range(cols - 2, -1, -1):
incs = np.arange(cols - col - 1)
if len(incs) == 0:
incs = [0]
for inc in incs:
# use inc to roll the lines
if grid[row, col + inc] == grid[row, col + inc + 1] or grid[row, col + inc + 1] == 0:
update_score(grid[row, col + inc], grid[row, col + inc + 1])
grid[row, col + inc + 1] += grid[row, col + inc]
grid[row, col + inc] = 0
return make_new_rect(grid)
def move_left(grid):
rows = grid.shape[0]
for row in range(rows):
cols = grid.shape[1]
for col in range(1, cols):
for inc in range(col):
# use inc to roll the lines
if grid[row, col - inc] == grid[row, col - inc - 1] or grid[row, col - inc - 1] == 0:
update_score(grid[row, col - inc], grid[row, col - inc - 1])
grid[row, col - inc - 1] += grid[row, col - inc]
grid[row, col - inc] = 0
return make_new_rect(grid)
def update_score(next, previous):
global score
if previous == next:
score += int(previous + next)
return None
clock = pygame.time.Clock()
# Surface((width, height), flags=0, depth=0, masks=None) -> Surface
rect_skin = pygame.Surface ((scale, scale))
rect_skin.fill(WHITE)
GAME_FONT_BIG.render_to(screen, (55, 250), 'Press any key'.format(score), BLUE)
grid = make_new_rect(grid)
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
while True:
max_score = max(max_score, score)
clock.tick(50)
screen.fill((0,0,0))
GAME_FONT_SMALLEST.render_to(screen, (380, 20), 'Score: {}'.format(score), WHITE)
GAME_FONT_SMALLEST.render_to(screen, (380, 50), 'Max Score: {}'.format(max_score), WHITE)
for n_row, row in enumerate(grid):
for n_col, value in enumerate(row):
if grid[n_row, n_col] != 0:
x = n_col * scale + scale
y = n_row * scale + scale
screen.blit(rect_skin, (x, y))
if value < 99:
GAME_FONT.render_to(screen, (x + scale // 3, y + scale // 3),
str(int(value)),
(0, 0, 0))
elif value < 1999:
GAME_FONT_SMALL.render_to(screen, (x + scale // 4, y + scale // 2.5),
str(int(value)),
(0, 0, 0))
else:
GAME_FONT_SMALL.render_to(screen, (x + scale // 6, y + scale // 2.5),
str(int(value)),
(0, 0, 0))
#line(Surface, color, start_pos, end_pos, width=1) -> Rect
for line in range(100, 501, 100):
pygame.draw.line(screen, (0, 200, 0), (100, line), (500, line), (2))
pygame.draw.line(screen, (0, 200, 0), (line, 100), (line, 500), (2))
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
if event.type == pygame.QUIT:
sys.exit()
elif event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
elif event.type == KEYUP and event.key == K_DOWN:
grid = move_down(grid)
elif event.type == KEYUP and event.key == K_UP:
grid = move_up(grid)
elif event.type == KEYUP and event.key == K_RIGHT:
grid = move_right(grid)
elif event.type == KEYUP and event.key == K_LEFT:
grid = move_left(grid)
| [
"pygame.event.clear",
"pygame.init",
"pygame.draw.line",
"pygame.Surface",
"sys.exit",
"pygame.display.set_mode",
"numpy.random.choice",
"pygame.time.Clock",
"numpy.count_nonzero",
"pygame.event.wait",
"numpy.zeros",
"numpy.random.randint",
"pygame.display.set_caption",
"pygame.freetype.Sy... | [((290, 303), 'pygame.init', 'pygame.init', ([], {}), '()\n', (301, 303), False, 'import sys, pygame\n'), ((313, 344), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(w, h)'], {}), '((w, h))\n', (336, 344), False, 'import sys, pygame\n'), ((344, 389), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""2048 - Genérico"""'], {}), "('2048 - Genérico')\n", (370, 389), False, 'import sys, pygame\n'), ((431, 467), 'pygame.freetype.SysFont', 'pygame.freetype.SysFont', (['"""arial"""', '(80)'], {}), "('arial', 80)\n", (454, 467), False, 'import sys, pygame\n'), ((480, 516), 'pygame.freetype.SysFont', 'pygame.freetype.SysFont', (['"""arial"""', '(48)'], {}), "('arial', 48)\n", (503, 516), False, 'import sys, pygame\n'), ((535, 571), 'pygame.freetype.SysFont', 'pygame.freetype.SysFont', (['"""arial"""', '(36)'], {}), "('arial', 36)\n", (558, 571), False, 'import sys, pygame\n'), ((593, 629), 'pygame.freetype.SysFont', 'pygame.freetype.SysFont', (['"""arial"""', '(24)'], {}), "('arial', 24)\n", (616, 629), False, 'import sys, pygame\n'), ((638, 654), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (646, 654), True, 'import numpy as np\n'), ((4461, 4480), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (4478, 4480), False, 'import sys, pygame\n'), ((4562, 4592), 'pygame.Surface', 'pygame.Surface', (['(scale, scale)'], {}), '((scale, scale))\n', (4576, 4592), False, 'import sys, pygame\n'), ((4724, 4747), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4745, 4747), False, 'import sys, pygame\n'), ((4749, 4769), 'pygame.event.clear', 'pygame.event.clear', ([], {}), '()\n', (4767, 4769), False, 'import sys, pygame\n'), ((4778, 4797), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (4795, 4797), False, 'import sys, pygame\n'), ((826, 849), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (847, 849), False, 'import sys, pygame\n'), ((859, 879), 'pygame.event.clear', 'pygame.event.clear', ([], {}), '()\n', (877, 879), False, 'import sys, pygame\n'), ((892, 911), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (909, 911), False, 'import sys, pygame\n'), ((1338, 1361), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1359, 1361), False, 'import sys, pygame\n'), ((1371, 1391), 'pygame.event.clear', 'pygame.event.clear', ([], {}), '()\n', (1389, 1391), False, 'import sys, pygame\n'), ((1404, 1423), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (1421, 1423), False, 'import sys, pygame\n'), ((6331, 6354), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6352, 6354), False, 'import sys, pygame\n'), ((6359, 6379), 'pygame.event.clear', 'pygame.event.clear', ([], {}), '()\n', (6377, 6379), False, 'import sys, pygame\n'), ((6392, 6411), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (6409, 6411), False, 'import sys, pygame\n'), ((1017, 1033), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1025, 1033), True, 'import numpy as np\n'), ((1644, 1667), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1661, 1667), True, 'import numpy as np\n'), ((1680, 1703), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1697, 1703), True, 'import numpy as np\n'), ((1712, 1734), 'numpy.count_nonzero', 'np.count_nonzero', (['grid'], {}), '(grid)\n', (1728, 1734), True, 'import numpy as np\n'), ((6175, 6241), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 200, 0)', '(100, line)', '(500, line)', '(2)'], {}), '(screen, (0, 200, 0), (100, line), (500, line), 2)\n', (6191, 6241), False, 'import sys, pygame\n'), ((6252, 6318), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 200, 0)', '(line, 100)', '(line, 500)', '(2)'], {}), '(screen, (0, 200, 0), (line, 100), (line, 500), 2)\n', (6268, 6318), False, 'import sys, pygame\n'), ((6454, 6464), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6462, 6464), False, 'import sys, pygame\n'), ((1115, 1125), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1123, 1125), False, 'import sys, pygame\n'), ((1556, 1566), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1564, 1566), False, 'import sys, pygame\n'), ((1887, 1925), 'numpy.random.choice', 'np.random.choice', (['[2, 4]'], {'p': '[0.8, 0.2]'}), '([2, 4], p=[0.8, 0.2])\n', (1903, 1925), True, 'import numpy as np\n'), ((3269, 3294), 'numpy.arange', 'np.arange', (['(cols - col - 1)'], {}), '(cols - col - 1)\n', (3278, 3294), True, 'import numpy as np\n'), ((6531, 6541), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6539, 6541), False, 'import sys, pygame\n')] |
"""
DMRG for XXZ model.
"""
from typing import Type, Text
import tensornetwork as tn
import numpy as np
tn.set_default_backend('pytorch')
def initialize_spin_mps(N: int, D: int, dtype: Type[np.number]):
return tn.FiniteMPS.random([2] * N, [D] * (N - 1), dtype=dtype)
def initialize_XXZ_mpo(Jz: np.ndarray, Jxy: np.ndarray, Bz: np.ndarray, dtype: Type[np.number]):
return tn.FiniteXXZ(Jz, Jxy, Bz, dtype=dtype)
def run_twosite_dmrg(N: int, D: int, dtype: Type[np.number], Jz: np.ndarray, Jxy: np.ndarray, Bz: np.ndarray, num_sweeps: int):
mps = initialize_spin_mps(N, 32, dtype)
mpo = initialize_XXZ_mpo(Jz, Jxy, Bz, dtype)
dmrg = tn.FiniteDMRG(mps, mpo)
result = dmrg.run_two_site(
max_bond_dim=D, num_sweeps=num_sweeps, num_krylov_vecs=10, verbose=1)
return result, mps
if __name__ == '__main__':
num_sites, bond_dim, datatype = 20, 16, np.float64
jz = np.ones(num_sites - 1)
jxy = np.ones(num_sites - 1)
bz = np.zeros(num_sites)
n_sweeps = 5
print(f'\nrunning DMRG for XXZ model...')
energy, mps = run_twosite_dmrg(num_sites, bond_dim, datatype, jxy, jz, bz, num_sweeps=n_sweeps)
print(f'\nFinished. Energy: {energy}')
# print(mps.tensors)
| [
"numpy.ones",
"tensornetwork.set_default_backend",
"numpy.zeros",
"tensornetwork.FiniteDMRG",
"tensornetwork.FiniteXXZ",
"tensornetwork.FiniteMPS.random"
] | [((106, 139), 'tensornetwork.set_default_backend', 'tn.set_default_backend', (['"""pytorch"""'], {}), "('pytorch')\n", (128, 139), True, 'import tensornetwork as tn\n'), ((214, 270), 'tensornetwork.FiniteMPS.random', 'tn.FiniteMPS.random', (['([2] * N)', '([D] * (N - 1))'], {'dtype': 'dtype'}), '([2] * N, [D] * (N - 1), dtype=dtype)\n', (233, 270), True, 'import tensornetwork as tn\n'), ((378, 416), 'tensornetwork.FiniteXXZ', 'tn.FiniteXXZ', (['Jz', 'Jxy', 'Bz'], {'dtype': 'dtype'}), '(Jz, Jxy, Bz, dtype=dtype)\n', (390, 416), True, 'import tensornetwork as tn\n'), ((641, 664), 'tensornetwork.FiniteDMRG', 'tn.FiniteDMRG', (['mps', 'mpo'], {}), '(mps, mpo)\n', (654, 664), True, 'import tensornetwork as tn\n'), ((877, 899), 'numpy.ones', 'np.ones', (['(num_sites - 1)'], {}), '(num_sites - 1)\n', (884, 899), True, 'import numpy as np\n'), ((907, 929), 'numpy.ones', 'np.ones', (['(num_sites - 1)'], {}), '(num_sites - 1)\n', (914, 929), True, 'import numpy as np\n'), ((936, 955), 'numpy.zeros', 'np.zeros', (['num_sites'], {}), '(num_sites)\n', (944, 955), True, 'import numpy as np\n')] |
import os
import sys
from datetime import datetime
from shutil import copyfile
import glob
import copy
import yaml
import torch
import networkx as nx
import numpy as np
from models.fourier_nn import FourierNet
from problems.dist_online_dense_problem import DistOnlineDensityProblem
from optimizers.dinno import DiNNO
from optimizers.dsgt import DSGT
from optimizers.dsgd import DSGD
from utils import graph_generation
from floorplans.lidar.lidar import (
Lidar2D,
OnlineTrajectoryLidarDataset,
RandomPoseLidarDataset,
)
torch.set_default_tensor_type(torch.DoubleTensor)
def train_solo(model, loss, train_set, val_set, device, conf):
"""Performs normal training without any communication
Args:
model (torch model): the model to train
loss (torch loss func): the loss function to train with.
trainset (torch dataset): training dataset
valset (torch dataset): validation dataset
device (torch device): device to compute on (cpu or gpu)
conf (dict): configuration dictionary (see yaml descriptions)
Returns:
dict: validation loss (float) after final epoch, and
the trained models density outputs (tensor)
evaluated on a mesh grid.
"""
trainloader = torch.utils.data.DataLoader(
train_set, conf["train_batch_size"], shuffle=True
)
valloader = torch.utils.data.DataLoader(
val_set, conf["val_batch_size"], shuffle=True
)
model = model.to(device)
if conf["optimizer"] == "adam":
opt = torch.optim.Adam(model.parameters(), lr=conf["lr"])
elif conf["optimizer"] == "sgd":
opt = torch.optim.SGD(model.parameters(), lr=conf["lr"])
elif conf["optimizer"] == "adamw":
opt = torch.optim.AdamW(model.parameters(), lr=conf["lr"])
else:
raise NameError("Unknown individual optimizer.")
for _ in range(conf["epochs"]):
for batch in trainloader:
opt.zero_grad()
pd = model.forward(batch[0].to(device))
l = loss(pd.squeeze(), batch[1].to(device))
l.backward()
opt.step()
with torch.no_grad():
vloss = 0.0
for batch in valloader:
pd = model.forward(batch[0].to(device))
vloss += loss(pd.squeeze(), batch[1].to(device)).data.detach()
X, Y = np.meshgrid(val_set.lidar.xs, val_set.lidar.ys)
xlocs = X[::8, ::8].reshape(-1, 1)
ylocs = Y[::8, ::8].reshape(-1, 1)
mesh_poses = np.hstack((xlocs, ylocs))
mesh_inputs = torch.Tensor(mesh_poses)
mesh_inputs = mesh_inputs.to(device)
mesh_dense = model.forward(mesh_inputs)
return {
"validation_loss": vloss,
"mesh_grid_density": mesh_dense,
"mesh_grid": mesh_inputs,
}
def experiment(yaml_pth):
# load the config yaml
with open(yaml_pth) as f:
conf_dict = yaml.safe_load(f)
# Seperate configuration groups
exp_conf = conf_dict["experiment"]
# Set seed for reproducibility
torch.manual_seed(exp_conf["seed"])
# Create the output directory
output_metadir = exp_conf["output_metadir"]
if not os.path.exists(output_metadir):
os.mkdir(output_metadir)
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M")
output_dir = os.path.join(
output_metadir, time_now + "_" + exp_conf["name"]
)
if exp_conf["writeout"]:
os.mkdir(output_dir)
# Save a copy of the conf to the output directory
copyfile(yaml_pth, os.path.join(output_dir, time_now + ".yaml"))
exp_conf["output_dir"] = output_dir # probably bad practice
# Load the datasets, and lidar object
data_conf = exp_conf["data"]
print("Loading the data ...")
img_path = os.path.join(data_conf["data_dir"], "floor_img.png")
lidar = Lidar2D(
img_path,
data_conf["num_beams"],
data_conf["beam_length"],
data_conf["beam_samps"],
data_conf["samp_distribution_factor"],
data_conf["collision_samps"],
data_conf["fine_samps"],
border_width=data_conf["border_width"],
)
data_dir = data_conf["data_dir"]
waypoint_pths = glob.glob(
os.path.join(data_dir, data_conf["waypoint_subdir"], "*.npy")
)
N = len(waypoint_pths)
# Check that N is consistent with the number of
# trajectories that are avaliable.
if N > len(waypoint_pths):
error_str = "Requested more nodes than there are waypoint files."
error_str += "Requested {} nodes, and found {} waypoint files.".format(
N, len(waypoint_pths)
)
raise NameError(error_str)
train_subsets = []
for i in range(N):
waypoints = np.load(waypoint_pths[i])
node_set = OnlineTrajectoryLidarDataset(
lidar,
waypoints,
data_conf["spline_res"],
data_conf["num_scans_in_window"],
round_density=data_conf["round_density"],
)
train_subsets.append(node_set)
# Print the dataset sizes
for i in range(N):
print()
print("Node ", i, "train set size: ", len(train_subsets[i]))
print(
"Node",
i,
"hd ratio: {:.4f}".format(
(
torch.sum(train_subsets[i].scans[:, 2] == 1.0)
/ train_subsets[i].scans.shape[0]
).data.item()
),
)
# Generate the validation set
val_set = RandomPoseLidarDataset(
lidar,
data_conf["num_validation_scans"],
round_density=data_conf["round_density"],
)
# Generate base model
model_conf = exp_conf["model"]
base_model = FourierNet(model_conf["shape"], scale=model_conf["scale"])
# Define base loss
if exp_conf["loss"] == "BCE":
base_loss = torch.nn.BCELoss()
elif exp_conf["loss"] == "MSE":
base_loss = torch.nn.MSELoss()
elif exp_conf["loss"] == "L1":
base_loss = torch.nn.L1Loss()
else:
raise NameError("Unknown loss function.")
# Check for gpu and assign device
if torch.cuda.is_available() and exp_conf["use_cuda"]:
device = torch.device("cuda")
print("Device is set to GPU")
else:
device = torch.device("cpu")
print("Device is set to CPU")
solo_confs = exp_conf["individual_training"]
solo_results = {}
if solo_confs["train_solo"]:
print("Performing individual training ...")
for i in range(N):
solo_results[i] = train_solo(
copy.deepcopy(base_model),
base_loss,
train_subsets[i],
val_set,
device,
solo_confs,
)
if solo_confs["verbose"]:
print(
"Node {} - Validation loss = {:.4f}".format(
i, solo_results[i]["validation_loss"]
)
)
if exp_conf["writeout"]:
torch.save(
solo_results, os.path.join(output_dir, "solo_results.pt")
)
# Run each problem
prob_confs = conf_dict["problem_configs"]
for prob_key in prob_confs:
prob_conf = prob_confs[prob_key]
opt_conf = prob_conf["optimizer_config"]
prob = DistOnlineDensityProblem(
base_model,
base_loss,
train_subsets,
val_set,
device,
prob_conf,
)
if opt_conf["alg_name"] == "dinno":
dopt = DiNNO(prob, device, opt_conf)
elif opt_conf["alg_name"] == "dsgt":
dopt = DSGT(prob, device, opt_conf)
elif opt_conf["alg_name"] == "dsgd":
dopt = DSGD(prob, device, opt_conf)
else:
raise NameError("Unknown distributed opt algorithm.")
print("-------------------------------------------------------")
print("-------------------------------------------------------")
print("Running problem: " + prob_conf["problem_name"])
if opt_conf["profile"]:
with torch.profiler.profile(
schedule=torch.profiler.schedule(
wait=1, warmup=1, active=3, repeat=3
),
on_trace_ready=torch.profiler.tensorboard_trace_handler(
os.path.join(
output_dir, prob_conf["problem_name"] + "opt_profile"
)
),
record_shapes=True,
with_stack=True,
) as prof:
dopt.train(profiler=prof)
else:
dopt.train()
if exp_conf["writeout"]:
prob.save_metrics(output_dir)
if __name__ == "__main__":
yaml_pth = sys.argv[1]
# Load the configuration file, and run the experiment
if os.path.exists(yaml_pth):
experiment(yaml_pth)
else:
raise NameError("YAML configuration file does not exist, exiting!")
| [
"optimizers.dinno.DiNNO",
"numpy.hstack",
"floorplans.lidar.lidar.RandomPoseLidarDataset",
"torch.nn.L1Loss",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.sum",
"copy.deepcopy",
"torch.profiler.schedule",
"os.path.exists",
"floorplans.lidar.lidar.OnlineTrajectoryLidarDataset",
"proble... | [((535, 584), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (564, 584), False, 'import torch\n'), ((1264, 1342), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set', "conf['train_batch_size']"], {'shuffle': '(True)'}), "(train_set, conf['train_batch_size'], shuffle=True)\n", (1291, 1342), False, 'import torch\n'), ((1373, 1447), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_set', "conf['val_batch_size']"], {'shuffle': '(True)'}), "(val_set, conf['val_batch_size'], shuffle=True)\n", (1400, 1447), False, 'import torch\n'), ((3037, 3072), 'torch.manual_seed', 'torch.manual_seed', (["exp_conf['seed']"], {}), "(exp_conf['seed'])\n", (3054, 3072), False, 'import torch\n'), ((3307, 3370), 'os.path.join', 'os.path.join', (['output_metadir', "(time_now + '_' + exp_conf['name'])"], {}), "(output_metadir, time_now + '_' + exp_conf['name'])\n", (3319, 3370), False, 'import os\n'), ((3765, 3817), 'os.path.join', 'os.path.join', (["data_conf['data_dir']", '"""floor_img.png"""'], {}), "(data_conf['data_dir'], 'floor_img.png')\n", (3777, 3817), False, 'import os\n'), ((3831, 4070), 'floorplans.lidar.lidar.Lidar2D', 'Lidar2D', (['img_path', "data_conf['num_beams']", "data_conf['beam_length']", "data_conf['beam_samps']", "data_conf['samp_distribution_factor']", "data_conf['collision_samps']", "data_conf['fine_samps']"], {'border_width': "data_conf['border_width']"}), "(img_path, data_conf['num_beams'], data_conf['beam_length'],\n data_conf['beam_samps'], data_conf['samp_distribution_factor'],\n data_conf['collision_samps'], data_conf['fine_samps'], border_width=\n data_conf['border_width'])\n", (3838, 4070), False, 'from floorplans.lidar.lidar import Lidar2D, OnlineTrajectoryLidarDataset, RandomPoseLidarDataset\n'), ((5498, 5608), 'floorplans.lidar.lidar.RandomPoseLidarDataset', 'RandomPoseLidarDataset', (['lidar', "data_conf['num_validation_scans']"], {'round_density': "data_conf['round_density']"}), "(lidar, data_conf['num_validation_scans'],\n round_density=data_conf['round_density'])\n", (5520, 5608), False, 'from floorplans.lidar.lidar import Lidar2D, OnlineTrajectoryLidarDataset, RandomPoseLidarDataset\n'), ((5715, 5773), 'models.fourier_nn.FourierNet', 'FourierNet', (["model_conf['shape']"], {'scale': "model_conf['scale']"}), "(model_conf['shape'], scale=model_conf['scale'])\n", (5725, 5773), False, 'from models.fourier_nn import FourierNet\n'), ((8883, 8907), 'os.path.exists', 'os.path.exists', (['yaml_pth'], {}), '(yaml_pth)\n', (8897, 8907), False, 'import os\n'), ((2135, 2150), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2148, 2150), False, 'import torch\n'), ((2347, 2394), 'numpy.meshgrid', 'np.meshgrid', (['val_set.lidar.xs', 'val_set.lidar.ys'], {}), '(val_set.lidar.xs, val_set.lidar.ys)\n', (2358, 2394), True, 'import numpy as np\n'), ((2502, 2527), 'numpy.hstack', 'np.hstack', (['(xlocs, ylocs)'], {}), '((xlocs, ylocs))\n', (2511, 2527), True, 'import numpy as np\n'), ((2550, 2574), 'torch.Tensor', 'torch.Tensor', (['mesh_poses'], {}), '(mesh_poses)\n', (2562, 2574), False, 'import torch\n'), ((2903, 2920), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2917, 2920), False, 'import yaml\n'), ((3167, 3197), 'os.path.exists', 'os.path.exists', (['output_metadir'], {}), '(output_metadir)\n', (3181, 3197), False, 'import os\n'), ((3207, 3231), 'os.mkdir', 'os.mkdir', (['output_metadir'], {}), '(output_metadir)\n', (3215, 3231), False, 'import os\n'), ((3423, 3443), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (3431, 3443), False, 'import os\n'), ((4206, 4267), 'os.path.join', 'os.path.join', (['data_dir', "data_conf['waypoint_subdir']", '"""*.npy"""'], {}), "(data_dir, data_conf['waypoint_subdir'], '*.npy')\n", (4218, 4267), False, 'import os\n'), ((4724, 4749), 'numpy.load', 'np.load', (['waypoint_pths[i]'], {}), '(waypoint_pths[i])\n', (4731, 4749), True, 'import numpy as np\n'), ((4769, 4920), 'floorplans.lidar.lidar.OnlineTrajectoryLidarDataset', 'OnlineTrajectoryLidarDataset', (['lidar', 'waypoints', "data_conf['spline_res']", "data_conf['num_scans_in_window']"], {'round_density': "data_conf['round_density']"}), "(lidar, waypoints, data_conf['spline_res'],\n data_conf['num_scans_in_window'], round_density=data_conf['round_density'])\n", (4797, 4920), False, 'from floorplans.lidar.lidar import Lidar2D, OnlineTrajectoryLidarDataset, RandomPoseLidarDataset\n'), ((5852, 5870), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (5868, 5870), False, 'import torch\n'), ((6125, 6150), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6148, 6150), False, 'import torch\n'), ((6194, 6214), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6206, 6214), False, 'import torch\n'), ((6280, 6299), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6292, 6299), False, 'import torch\n'), ((7343, 7437), 'problems.dist_online_dense_problem.DistOnlineDensityProblem', 'DistOnlineDensityProblem', (['base_model', 'base_loss', 'train_subsets', 'val_set', 'device', 'prob_conf'], {}), '(base_model, base_loss, train_subsets, val_set,\n device, prob_conf)\n', (7367, 7437), False, 'from problems.dist_online_dense_problem import DistOnlineDensityProblem\n'), ((3248, 3262), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3260, 3262), False, 'from datetime import datetime\n'), ((3529, 3573), 'os.path.join', 'os.path.join', (['output_dir', "(time_now + '.yaml')"], {}), "(output_dir, time_now + '.yaml')\n", (3541, 3573), False, 'import os\n'), ((5927, 5945), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (5943, 5945), False, 'import torch\n'), ((7581, 7610), 'optimizers.dinno.DiNNO', 'DiNNO', (['prob', 'device', 'opt_conf'], {}), '(prob, device, opt_conf)\n', (7586, 7610), False, 'from optimizers.dinno import DiNNO\n'), ((6001, 6018), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (6016, 6018), False, 'import torch\n'), ((6580, 6605), 'copy.deepcopy', 'copy.deepcopy', (['base_model'], {}), '(base_model)\n', (6593, 6605), False, 'import copy\n'), ((7076, 7119), 'os.path.join', 'os.path.join', (['output_dir', '"""solo_results.pt"""'], {}), "(output_dir, 'solo_results.pt')\n", (7088, 7119), False, 'import os\n'), ((7675, 7703), 'optimizers.dsgt.DSGT', 'DSGT', (['prob', 'device', 'opt_conf'], {}), '(prob, device, opt_conf)\n', (7679, 7703), False, 'from optimizers.dsgt import DSGT\n'), ((7768, 7796), 'optimizers.dsgd.DSGD', 'DSGD', (['prob', 'device', 'opt_conf'], {}), '(prob, device, opt_conf)\n', (7772, 7796), False, 'from optimizers.dsgd import DSGD\n'), ((8185, 8246), 'torch.profiler.schedule', 'torch.profiler.schedule', ([], {'wait': '(1)', 'warmup': '(1)', 'active': '(3)', 'repeat': '(3)'}), '(wait=1, warmup=1, active=3, repeat=3)\n', (8208, 8246), False, 'import torch\n'), ((8379, 8446), 'os.path.join', 'os.path.join', (['output_dir', "(prob_conf['problem_name'] + 'opt_profile')"], {}), "(output_dir, prob_conf['problem_name'] + 'opt_profile')\n", (8391, 8446), False, 'import os\n'), ((5293, 5339), 'torch.sum', 'torch.sum', (['(train_subsets[i].scans[:, 2] == 1.0)'], {}), '(train_subsets[i].scans[:, 2] == 1.0)\n', (5302, 5339), False, 'import torch\n')] |
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import argparse
import datetime
import pickle
import csv
X, y = [], []
def load_dataset(infile):
global X, y
with open(infile) as fp:
reader = csv.DictReader(fp)
for each_line in reader:
X.append(np.array([
float(each_line["light luminosity"]),
float(each_line["IR Status"]),
float(each_line["Ultrasonic sensor status"]),
float(each_line["time of the day"].replace(":",""))
]))
y.append(each_line["predicted light luminosity"])
X = np.array(X)
def train(outfile):
x_train, x_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
reg = LinearRegression().fit(x_train, y_train)
preds = reg.predict(x_test)
# R-Squared is a statistical measure of fit that indicates how much
# variation of a dependent variable is explained by the independent
# variable(s) in a regression model.
r2score = r2_score(y_test, preds)
root_mean_sqerror = np.sqrt(mean_squared_error(y_test, preds))
print("R squared score: ", r2score)
pickle.dump(reg, open(outfile, 'wb'))
print("Training complete and the model is stored at %s" % outfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str)
parser.add_argument("--output", type=str)
args = parser.parse_args()
print(args.input, args.output)
load_dataset(args.input)
train(args.output)
| [
"csv.DictReader",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"sklearn.metrics.r2_score",
"sklearn.linear_model.LinearRegression"
] | [((773, 784), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (781, 784), True, 'import numpy as np\n'), ((844, 881), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (860, 881), False, 'from sklearn.model_selection import train_test_split\n'), ((1164, 1187), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (1172, 1187), False, 'from sklearn.metrics import r2_score\n'), ((1450, 1475), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1473, 1475), False, 'import argparse\n'), ((367, 385), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (381, 385), False, 'import csv\n'), ((1220, 1253), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'preds'], {}), '(y_test, preds)\n', (1238, 1253), False, 'from sklearn.metrics import mean_squared_error\n'), ((890, 908), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (906, 908), False, 'from sklearn.linear_model import LinearRegression\n')] |
import pandas as pd
import numpy as np
from os.path import join, exists, split
from os import mkdir, makedirs, listdir
import gc
import matplotlib.pyplot as plt
import seaborn
from copy import deepcopy
from time import time
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('split_name')
parser.add_argument('f')
parser.add_argument('--t_reset',type=int, default=25)
args = parser.parse_args()
split_name = args.split_name
f = args.f
t_reset = args.t_reset
# split_name = 'temporal_4'
# f = 'batch_38.h5'
# t_reset = 25
fixed_rec = 0.9
model_name = 'shap_top500_features'
delta_t = 0
window_size = 480
data_version = 'v6b'
result_version = '181108'
t_postevent = np.timedelta64(2, 'h')
wsize_upper_h = (window_size+delta_t) * np.timedelta64(1, 'm')
wsize_lower_h = delta_t * np.timedelta64(1, 'm')
bern_path = '/cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/'
alarm_path = join(bern_path, 'circews_analysis', 'alarm_score_for_calibration_new',
data_version, 'merged_0_reset_%d'%t_reset, model_name, split_name)
ep_path = join(bern_path, '3a_endpoints', data_version,'reduced')
res_dir = lambda s: 'WorseStateFromZero_0.0_8.0_%s_lightgbm_full'%s
pred_path = join(bern_path, '8_predictions', result_version, 'reduced',
split_name, res_dir(model_name))
with pd.HDFStore(join(pred_path, f), mode='r') as tmp:
pids = [int(key[2:]) for key in tmp.keys()]
gc.collect()
df_store = pd.HDFStore(join(pred_path, f), mode='r')
pids = [int(key[2:]) for key in df_store.keys()]
df_store.close()
gc.collect()
stats = dict()
lst_period_type = ['critical_window', 'maintenance_window', 'uncritical_window', 'patients_wo_events']
for period_type in lst_period_type:
stats.update({period_type: dict(valid_los=[], cnt_alarm=[], los=[])})
stats.update(cnt_catched_event=0, cnt_missed_event=0)
is_critical_win = lambda t, ts: np.logical_and(ts< t-wsize_lower_h,
ts>=t-wsize_upper_h)
is_maintenance_win = lambda t, ts: np.logical_and(ts>t,
ts<=t+t_postevent)
is_uncritical_win = lambda t, ts, mode: ts<t-wsize_upper_h if mode=='before' else ts>t+t_postevent
t_start = time()
for n, pid in enumerate(pids):
df = pd.read_hdf(join(alarm_path, f), 'p%d'%pid).reset_index()
# df = pd.read_hdf(join(alarm_path, 'rec_%g'%fixed_rec+f), 'p%d'%pid).reset_index()
df.set_index('AbsDatetime', inplace=True)
for col in ['InEvent', 'Stable']:
df.loc[:,col] = df[col].astype(int)
if df.InEvent.sum()==0:
# assert('Yes' not in df.IsAlarmTrue.unique())
stats['patients_wo_events']['valid_los'].append( df.Stable.sum() / 12)
stats['patients_wo_events']['cnt_alarm'].append( df.Alarm.sum() )
stats['patients_wo_events']['los'].append( len(df)/12 )
else:
stable_sum = 0
beg_onset = df.index[np.where(np.array([0]+np.diff(df.InEvent.values).tolist())==1)[0]]
end_onset = df.index[np.where(np.diff(df.InEvent.values)==-1)[0]]
if df.iloc[0].InEvent==1:
beg_onset = np.concatenate([[df.index[0]], beg_onset])
if df.iloc[-1].InEvent==1:
end_onset = np.concatenate([end_onset, [df.index[-1]]])
assert(len(beg_onset)==len(end_onset))
### Critical window
for i, dt in enumerate(beg_onset):
dt = np.datetime64(dt)
win_pre_event = df[is_critical_win(dt, df.index.values)]
if len(win_pre_event)==0:
continue
if len(win_pre_event)==0 or win_pre_event.Stable.sum()==0:
continue
if ~ df.loc[dt,'Onset']:
pass
elif win_pre_event.Alarm.sum()>0 and df.loc[dt,'Onset'] and df.loc[dt,'CatchedOnset']:
stats['cnt_catched_event'] += 1
elif win_pre_event.Alarm.sum()==0 and df.loc[dt,'Onset'] and ~df.loc[dt,'CatchedOnset']:
stats['cnt_missed_event'] += 1
else:
print('Alarm number', win_pre_event.Alarm.sum(),'; Onset status', df.loc[dt, 'CatchedOnset'])
print(dt)
raise Exception('Problem!!!!')
if i > 0:
win_pre_event = win_pre_event[win_pre_event.index>end_onset[i-1]]
stable_sum += win_pre_event.Stable.sum() / 12
stats['critical_window']['valid_los'].append( win_pre_event.Stable.sum() / 12 )
stats['critical_window']['los'].append( len(df)/12 )
stats['critical_window']['cnt_alarm'].append( win_pre_event.Alarm.sum() )
### Uncritical window
for i, dt in enumerate(beg_onset):
dt = np.datetime64(dt)
win_pre_event = df[is_uncritical_win(dt, df.index.values, 'before')]
if len(win_pre_event)==0:
continue
if i > 0:
win_pre_event = win_pre_event[win_pre_event.index>end_onset[i-1]+t_postevent]
if len(win_pre_event)==0 or win_pre_event.Stable.sum()==0:
continue
stable_sum += win_pre_event.Stable.sum() / 12
stats['uncritical_window']['valid_los'].append(win_pre_event.Stable.sum() / 12)
stats['uncritical_window']['los'].append( len(df)/12 )
stats['uncritical_window']['cnt_alarm'].append(win_pre_event.Alarm.sum())
win_post_last_event = df[is_uncritical_win(np.datetime64(end_onset[-1]),df.index.values,'after')]
stable_sum += win_post_last_event.Stable.sum() / 12
stats['uncritical_window']['valid_los'].append(win_post_last_event.Stable.sum() / 12)
stats['uncritical_window']['los'].append( len(df)/12 )
stats['uncritical_window']['cnt_alarm'].append(win_post_last_event.Alarm.sum())
### Maintenance window
for i, dt in enumerate(end_onset):
dt = np.datetime64(dt)
win_post_event = df[is_maintenance_win(dt, df.index.values)]
if len(win_post_event)==0:
continue
if i < len(beg_onset) - 1:
win_post_event = win_post_event[win_post_event.index<beg_onset[i+1]-wsize_upper_h]
if len(win_post_event)==0 or win_post_event.Stable.sum()==0:
continue
stable_sum += win_post_event.Stable.sum() / 12
stats['maintenance_window']['valid_los'].append(win_post_event.Stable.sum() / 12)
stats['maintenance_window']['los'].append( len(df)/12 )
stats['maintenance_window']['cnt_alarm'].append(win_post_event.Alarm.sum())
assert(np.abs(df.Stable.sum()/12-stable_sum)<1e-10)
if (n+1)%10==0:
print('Process %d patients, time: %4.4g sec'%(n+1, time()-t_start))
gc.collect()
# with open(join(alarm_path, 'rec_%g'%fixed_rec+f.replace('.h5', '.pkl')), 'wb') as tmp:
# pickle.dump(stats, tmp)
with open(join(alarm_path, f.replace('.h5', '.pkl')), 'wb') as tmp:
pickle.dump(stats, tmp)
| [
"pickle.dump",
"argparse.ArgumentParser",
"numpy.logical_and",
"os.path.join",
"numpy.diff",
"numpy.datetime64",
"gc.collect",
"numpy.concatenate",
"numpy.timedelta64",
"time.time"
] | [((265, 290), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (288, 290), False, 'import argparse\n'), ((938, 960), 'numpy.timedelta64', 'np.timedelta64', (['(2)', '"""h"""'], {}), "(2, 'h')\n", (952, 960), True, 'import numpy as np\n'), ((1179, 1322), 'os.path.join', 'join', (['bern_path', '"""circews_analysis"""', '"""alarm_score_for_calibration_new"""', 'data_version', "('merged_0_reset_%d' % t_reset)", 'model_name', 'split_name'], {}), "(bern_path, 'circews_analysis', 'alarm_score_for_calibration_new',\n data_version, 'merged_0_reset_%d' % t_reset, model_name, split_name)\n", (1183, 1322), False, 'from os.path import join, exists, split\n'), ((1346, 1402), 'os.path.join', 'join', (['bern_path', '"""3a_endpoints"""', 'data_version', '"""reduced"""'], {}), "(bern_path, '3a_endpoints', data_version, 'reduced')\n", (1350, 1402), False, 'from os.path import join, exists, split\n'), ((1838, 1850), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1848, 1850), False, 'import gc\n'), ((2515, 2521), 'time.time', 'time', ([], {}), '()\n', (2519, 2521), False, 'from time import time\n'), ((1001, 1023), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (1015, 1023), True, 'import numpy as np\n'), ((1050, 1072), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (1064, 1072), True, 'import numpy as np\n'), ((1704, 1716), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1714, 1716), False, 'import gc\n'), ((1742, 1760), 'os.path.join', 'join', (['pred_path', 'f'], {}), '(pred_path, f)\n', (1746, 1760), False, 'from os.path import join, exists, split\n'), ((2171, 2234), 'numpy.logical_and', 'np.logical_and', (['(ts < t - wsize_lower_h)', '(ts >= t - wsize_upper_h)'], {}), '(ts < t - wsize_lower_h, ts >= t - wsize_upper_h)\n', (2185, 2234), True, 'import numpy as np\n'), ((2312, 2357), 'numpy.logical_and', 'np.logical_and', (['(ts > t)', '(ts <= t + t_postevent)'], {}), '(ts > t, ts <= t + t_postevent)\n', (2326, 2357), True, 'import numpy as np\n'), ((7313, 7336), 'pickle.dump', 'pickle.dump', (['stats', 'tmp'], {}), '(stats, tmp)\n', (7324, 7336), False, 'import pickle\n'), ((1614, 1632), 'os.path.join', 'join', (['pred_path', 'f'], {}), '(pred_path, f)\n', (1618, 1632), False, 'from os.path import join, exists, split\n'), ((7099, 7111), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7109, 7111), False, 'import gc\n'), ((3408, 3450), 'numpy.concatenate', 'np.concatenate', (['[[df.index[0]], beg_onset]'], {}), '([[df.index[0]], beg_onset])\n', (3422, 3450), True, 'import numpy as np\n'), ((3510, 3553), 'numpy.concatenate', 'np.concatenate', (['[end_onset, [df.index[-1]]]'], {}), '([end_onset, [df.index[-1]]])\n', (3524, 3553), True, 'import numpy as np\n'), ((3690, 3707), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (3703, 3707), True, 'import numpy as np\n'), ((5022, 5039), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (5035, 5039), True, 'import numpy as np\n'), ((6234, 6251), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (6247, 6251), True, 'import numpy as np\n'), ((2575, 2594), 'os.path.join', 'join', (['alarm_path', 'f'], {}), '(alarm_path, f)\n', (2579, 2594), False, 'from os.path import join, exists, split\n'), ((5782, 5810), 'numpy.datetime64', 'np.datetime64', (['end_onset[-1]'], {}), '(end_onset[-1])\n', (5795, 5810), True, 'import numpy as np\n'), ((3314, 3340), 'numpy.diff', 'np.diff', (['df.InEvent.values'], {}), '(df.InEvent.values)\n', (3321, 3340), True, 'import numpy as np\n'), ((7074, 7080), 'time.time', 'time', ([], {}), '()\n', (7078, 7080), False, 'from time import time\n'), ((3231, 3257), 'numpy.diff', 'np.diff', (['df.InEvent.values'], {}), '(df.InEvent.values)\n', (3238, 3257), True, 'import numpy as np\n')] |
#-----------------------------------------------------------------------------#
# #
# I M P O R T L I B R A R I E S #
# #
#-----------------------------------------------------------------------------#
import torch, torchvision
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision.models.resnet import ResNet, BasicBlock
import numpy as np
#*****************************************************************************#
# #
# description: #
# create VGG model using the specifications provided. #
# #
#*****************************************************************************#
class VGG(nn.Module):
def __init__(self, cfg, size=512, out=10):
super(VGG, self).__init__()
self.features = self.make_layers(cfg)
self.classifier = nn.Sequential(
#nn.Dropout(),
nn.Linear(size, size),
nn.ReLU(True),
#nn.Dropout(),
nn.Linear(size, size),
nn.ReLU(True),
nn.Linear(size, out),
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, np.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(self, cfg, batch_norm=True):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg11s():
return VGG([32, 'M', 64, 'M', 128, 128, 'M', 128, 128, 'M', 128, 128, 'M'], size=128)
def vgg11():
return VGG([64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'])
def vgg16():
return VGG([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'])
#*****************************************************************************#
# #
# description: #
# create VGG model using the specifications provided. #
# #
#*****************************************************************************#
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride, norm_layer):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = norm_layer(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
norm_layer(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
#*****************************************************************************#
# #
# description: #
# create MobileNetV2 model using the specifications provided. #
# #
#*****************************************************************************#
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
def __init__(self, num_classes=10, norm_layer=nn.BatchNorm2d,shrink=1):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.norm_layer = norm_layer
self.cfg = [(1, 16//shrink, 1, 1),
(6, 24//shrink, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32//shrink, 3, 2),
(6, 64//shrink, 4, 2),
(6, 96//shrink, 3, 1),
(6, 160//shrink, 3, 2),
(6, 320//shrink, 1, 1)]
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = self.norm_layer(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(self.cfg[-1][1], 1280//shrink, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = self.norm_layer(1280//shrink)
self.linear = nn.Linear(1280//shrink, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride, self.norm_layer))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def mobilenetv2():
return MobileNetV2(norm_layer=nn.BatchNorm2d)
def mobilenetv2s():
return MobileNetV2(norm_layer=nn.BatchNorm2d, shrink=2)
def mobilenetv2xs():
return MobileNetV2(norm_layer=nn.BatchNorm2d, shrink=4)
def mobilenetv2_gn():
return MobileNetV2(norm_layer=lambda x : nn.GroupNorm(num_groups=2, num_channels=x))
#*****************************************************************************#
# #
# description: #
# create LENET cifar model using the specifications provided. #
# #
#*****************************************************************************#
class lenet_cifar(nn.Module):
def __init__(self):
super(lenet_cifar, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.binary = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
#*****************************************************************************#
# #
# description: #
# create LENET large model using the specifications provided. #
# #
#*****************************************************************************#
class lenet_large(nn.Module):
def __init__(self):
super(lenet_large, self).__init__()
self.conv1 = nn.Conv2d(3, 20, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(20, 50, 5)
self.fc1 = nn.Linear(50 * 5 * 5, 512)
self.fc2 = nn.Linear(512, 10)
def f(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 50 * 5 * 5)
x = F.relu(self.fc1(x))
return x
def forward(self, x):
x = self.f(x)
x = self.fc2(x)
return x
#*****************************************************************************#
# #
# description: #
# create LENET mnist model using the specifications provided. #
# #
#*****************************************************************************#
class lenet_mnist(torch.nn.Module):
def __init__(self):
super(lenet_mnist, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
#self.bn1 = torch.nn.BatchNorm2d(6)
#self.bn2 = torch.nn.BatchNorm2d(16)
self.fc1 = torch.nn.Linear(16 * 4 * 4, 120)
self.fc2 = torch.nn.Linear(120, 84)
self.fc3 = torch.nn.Linear(84, 10)
self.binary = torch.nn.Linear(84, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
#*****************************************************************************#
# #
# description: #
# function to apply group normalization. #
# #
#*****************************************************************************#
def apply_gn(model):
for n, c in model.named_children():
if isinstance(c, nn.Sequential) or \
isinstance(c, torch.nn.modules.container.Sequential) or \
isinstance(c, torchvision.models.resnet.BasicBlock):
#print("-->", n)
apply_gn(c)
if isinstance(c, nn.BatchNorm2d):
#print(n, c.num_features)
setattr(model, n, torch.nn.GroupNorm(num_groups=4, num_channels=c.num_features))
#*****************************************************************************#
# #
# description: #
# create ResNet models using the specifications provided. #
# #
#*****************************************************************************#
def resnet8():
return ResNet(BasicBlock, [1,1,1,1], num_classes=10)
def resnet18():
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=10)
class Model(nn.Module):
def __init__(self, feature_dim=128, group_norm=False):
super(Model, self).__init__()
self.f = []
for name, module in resnet8().named_children():
if name == 'conv1':
module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
if not isinstance(module, nn.Linear) and not isinstance(module, nn.MaxPool2d):
self.f.append(module)
# encoder
self.f = nn.Sequential(*self.f)
# projection head
self.g = nn.Sequential(nn.Linear(512, 512, bias=False), nn.BatchNorm1d(512),
nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
if group_norm:
apply_gn(self)
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
class resnet8_bn(nn.Module):
def __init__(self, num_class=10, pretrained_path=None, group_norm=False):
super(resnet8_bn, self).__init__()
# encoder
self.f = Model(group_norm=group_norm).f
# classifier
self.fc = nn.Linear(512, num_class, bias=True)
if pretrained_path:
self.load_state_dict(torch.load(pretrained_path, map_location='cpu'), strict=False)
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.fc(feature)
return out
class resnet8_gn(nn.Module):
def __init__(self, num_class=10, pretrained_path=None, group_norm=True):
super(resnet8_gn, self).__init__()
# encoder
self.f = Model(group_norm=group_norm).f
# classifier
self.fc = nn.Linear(512, num_class, bias=True)
if pretrained_path:
self.load_state_dict(torch.load(pretrained_path, map_location='cpu'), strict=False)
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.fc(feature)
return out
#*****************************************************************************#
# #
# description: #
# create AlexNet model using the specifications provided. #
# #
#*****************************************************************************#
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
self.classifier = nn.Sequential(
#nn.Dropout(),
nn.Linear(256 * 2 * 2, 4096),
nn.ReLU(inplace=True),
#nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 2 * 2)
x = self.classifier(x)
return x
class simclrVGG11(nn.Module):
def __init__(self, n_classes=10, group_norm=False):
super(simclrVGG11, self).__init__()
self.f = self.make_layers([64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'])
# projection head
self.fc = nn.Linear(512, n_classes, bias=True)#nn.Sequential(nn.Linear(512, 128, bias=False), nn.ReLU(inplace=True), nn.Linear(128, n_classes, bias=True))
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, np.sqrt(2. / n))
m.bias.data.zero_()
if group_norm:
apply_gn(self)
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.fc(feature)
return out
#*****************************************************************************#
# #
# description: #
# create and return the desired model along with optimizer & hyperparams. #
# #
#*****************************************************************************#
def get_model(model):
return { "vgg16" : (vgg16, optim.Adam, {"lr":1e-3}),
"vgg11s" : (vgg11s, optim.Adam, {"lr":1e-3}),
"vgg11" : (vgg11, optim.Adam, {"lr":1e-3}),
"resnet18" : (resnet18, optim.Adam, {"lr":1e-3}),
"alexnet" : (AlexNet, optim.Adam, {"lr":1e-3}),
"lenet_cifar" : (lenet_cifar, optim.Adam, {"lr":0.001,
"weight_decay":0.0
}),
"lenet_large" : (lenet_large, optim.SGD, {"lr":0.01,
"momentum":0.9,
"weight_decay":0.0
}),
"lenet_mnist" : (lenet_mnist, optim.Adam, {"lr":0.001,
"weight_decay":0.0
}),
"mobilenetv2" : (mobilenetv2, optim.SGD, {"lr":0.01,
"momentum":0.9,
"weight_decay":5e-4
}),
"mobilenetv2s" : (mobilenetv2s, optim.SGD, {"lr":0.01,
"momentum":0.9,
"weight_decay":5e-4
}),
"mobilenetv2xs" : (mobilenetv2xs, optim.SGD, {"lr":0.01,
"momentum":0.9,
"weight_decay":5e-4
}),
"mobilenetv2_gn" : (mobilenetv2_gn, optim.SGD, {"lr":0.01,
"momentum":0.9,
"weight_decay":5e-4
}),
"resnet8_gn" : (resnet8_gn, optim.SGD, {"lr":0.1,
"momentum":0.9,
"weight_decay":5e-4
}),
"resnet8_bn" : (resnet8_bn, optim.Adam, {"lr" : 0.001}),
"simclr_vgg11" : (simclrVGG11, optim.Adam, {"lr" : 0.001,
"weight_decay" :5e-4
})
}[model]
def print_model(model):
n = 0
print("Model:")
for key, value in model.named_parameters():
print(' -', '{:30}'.format(key), list(value.shape), "Requires Grad:", value.requires_grad)
n += value.numel()
print("Total number of Parameters: ", n)
print() | [
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"torch.nn.BatchNorm2d",
"numpy.sqrt",
"torch.nn.Sequential",
"torch.load",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.functional.normalize",
"torch.nn.BatchNorm1d",
"torchvision.models.resnet.ResNet",
"torch.nn.MaxPool2d",
"torch.nn.... | [((12003, 12051), 'torchvision.models.resnet.ResNet', 'ResNet', (['BasicBlock', '[1, 1, 1, 1]'], {'num_classes': '(10)'}), '(BasicBlock, [1, 1, 1, 1], num_classes=10)\n', (12009, 12051), False, 'from torchvision.models.resnet import ResNet, BasicBlock\n'), ((12078, 12126), 'torchvision.models.resnet.ResNet', 'ResNet', (['BasicBlock', '[2, 2, 2, 2]'], {'num_classes': '(10)'}), '(BasicBlock, [2, 2, 2, 2], num_classes=10)\n', (12084, 12126), False, 'from torchvision.models.resnet import ResNet, BasicBlock\n'), ((2439, 2461), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2452, 2461), True, 'import torch.nn as nn\n'), ((3536, 3612), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'planes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)\n', (3545, 3612), True, 'import torch.nn as nn\n'), ((3672, 3770), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'groups': 'planes', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=stride, padding=1, groups=\n planes, bias=False)\n', (3681, 3770), True, 'import torch.nn as nn\n'), ((3825, 3902), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'out_planes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n', (3834, 3902), True, 'import torch.nn as nn\n'), ((3970, 3985), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (3983, 3985), True, 'import torch.nn as nn\n'), ((5619, 5683), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 32, kernel_size=3, stride=1, padding=1, bias=False)\n', (5628, 5683), True, 'import torch.nn as nn\n'), ((5798, 5893), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.cfg[-1][1]', '(1280 // shrink)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(self.cfg[-1][1], 1280 // shrink, kernel_size=1, stride=1, padding\n =0, bias=False)\n', (5807, 5893), True, 'import torch.nn as nn\n'), ((5958, 5996), 'torch.nn.Linear', 'nn.Linear', (['(1280 // shrink)', 'num_classes'], {}), '(1280 // shrink, num_classes)\n', (5967, 5996), True, 'import torch.nn as nn\n'), ((6360, 6382), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (6373, 6382), True, 'import torch.nn as nn\n'), ((6611, 6631), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', '(4)'], {}), '(out, 4)\n', (6623, 6631), True, 'import torch.nn.functional as F\n'), ((7672, 7690), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(6)', '(5)'], {}), '(3, 6, 5)\n', (7681, 7690), True, 'import torch.nn as nn\n'), ((7711, 7729), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (7723, 7729), True, 'import torch.nn as nn\n'), ((7751, 7770), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (7760, 7770), True, 'import torch.nn as nn\n'), ((7790, 7816), 'torch.nn.Linear', 'nn.Linear', (['(16 * 5 * 5)', '(120)'], {}), '(16 * 5 * 5, 120)\n', (7799, 7816), True, 'import torch.nn as nn\n'), ((7836, 7854), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (7845, 7854), True, 'import torch.nn as nn\n'), ((7874, 7891), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (7883, 7891), True, 'import torch.nn as nn\n'), ((7914, 7930), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(2)'], {}), '(84, 2)\n', (7923, 7930), True, 'import torch.nn as nn\n'), ((8789, 8808), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(20)', '(5)'], {}), '(3, 20, 5)\n', (8798, 8808), True, 'import torch.nn as nn\n'), ((8829, 8847), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (8841, 8847), True, 'import torch.nn as nn\n'), ((8869, 8889), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(50)', '(5)'], {}), '(20, 50, 5)\n', (8878, 8889), True, 'import torch.nn as nn\n'), ((8909, 8935), 'torch.nn.Linear', 'nn.Linear', (['(50 * 5 * 5)', '(512)'], {}), '(50 * 5 * 5, 512)\n', (8918, 8935), True, 'import torch.nn as nn\n'), ((8955, 8973), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(10)'], {}), '(512, 10)\n', (8964, 8973), True, 'import torch.nn as nn\n'), ((9866, 9890), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(6)', '(5)'], {}), '(1, 6, 5)\n', (9881, 9890), False, 'import torch, torchvision\n'), ((9911, 9935), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (9929, 9935), False, 'import torch, torchvision\n'), ((9957, 9982), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (9972, 9982), False, 'import torch, torchvision\n'), ((10091, 10123), 'torch.nn.Linear', 'torch.nn.Linear', (['(16 * 4 * 4)', '(120)'], {}), '(16 * 4 * 4, 120)\n', (10106, 10123), False, 'import torch, torchvision\n'), ((10143, 10167), 'torch.nn.Linear', 'torch.nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (10158, 10167), False, 'import torch, torchvision\n'), ((10187, 10210), 'torch.nn.Linear', 'torch.nn.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (10202, 10210), False, 'import torch, torchvision\n'), ((10233, 10255), 'torch.nn.Linear', 'torch.nn.Linear', (['(84)', '(2)'], {}), '(84, 2)\n', (10248, 10255), False, 'import torch, torchvision\n'), ((12613, 12635), 'torch.nn.Sequential', 'nn.Sequential', (['*self.f'], {}), '(*self.f)\n', (12626, 12635), True, 'import torch.nn as nn\n'), ((12959, 12988), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (12972, 12988), False, 'import torch, torchvision\n'), ((13347, 13383), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_class'], {'bias': '(True)'}), '(512, num_class, bias=True)\n', (13356, 13383), True, 'import torch.nn as nn\n'), ((13577, 13606), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (13590, 13606), False, 'import torch, torchvision\n'), ((13914, 13950), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_class'], {'bias': '(True)'}), '(512, num_class, bias=True)\n', (13923, 13950), True, 'import torch.nn as nn\n'), ((14142, 14171), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (14155, 14171), False, 'import torch, torchvision\n'), ((16177, 16213), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'n_classes'], {'bias': '(True)'}), '(512, n_classes, bias=True)\n', (16186, 16213), True, 'import torch.nn as nn\n'), ((17192, 17214), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (17205, 17214), True, 'import torch.nn as nn\n'), ((17283, 17312), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (17296, 17312), False, 'import torch, torchvision\n'), ((1299, 1320), 'torch.nn.Linear', 'nn.Linear', (['size', 'size'], {}), '(size, size)\n', (1308, 1320), True, 'import torch.nn as nn\n'), ((1334, 1347), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1341, 1347), True, 'import torch.nn as nn\n'), ((1388, 1409), 'torch.nn.Linear', 'nn.Linear', (['size', 'size'], {}), '(size, size)\n', (1397, 1409), True, 'import torch.nn as nn\n'), ((1423, 1436), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1430, 1436), True, 'import torch.nn as nn\n'), ((1450, 1470), 'torch.nn.Linear', 'nn.Linear', (['size', 'out'], {}), '(size, out)\n', (1459, 1470), True, 'import torch.nn as nn\n'), ((12693, 12724), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {'bias': '(False)'}), '(512, 512, bias=False)\n', (12702, 12724), True, 'import torch.nn as nn\n'), ((12726, 12745), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (12740, 12745), True, 'import torch.nn as nn\n'), ((12778, 12799), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (12785, 12799), True, 'import torch.nn as nn\n'), ((12801, 12839), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'feature_dim'], {'bias': '(True)'}), '(512, feature_dim, bias=True)\n', (12810, 12839), True, 'import torch.nn as nn\n'), ((13034, 13062), 'torch.nn.functional.normalize', 'F.normalize', (['feature'], {'dim': '(-1)'}), '(feature, dim=-1)\n', (13045, 13062), True, 'import torch.nn.functional as F\n'), ((13064, 13088), 'torch.nn.functional.normalize', 'F.normalize', (['out'], {'dim': '(-1)'}), '(out, dim=-1)\n', (13075, 13088), True, 'import torch.nn.functional as F\n'), ((14861, 14913), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(3, 64, kernel_size=3, stride=2, padding=1)\n', (14870, 14913), True, 'import torch.nn as nn\n'), ((14927, 14948), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (14934, 14948), True, 'import torch.nn as nn\n'), ((14962, 14989), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (14974, 14989), True, 'import torch.nn as nn\n'), ((15003, 15047), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(192)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(64, 192, kernel_size=3, padding=1)\n', (15012, 15047), True, 'import torch.nn as nn\n'), ((15061, 15082), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (15068, 15082), True, 'import torch.nn as nn\n'), ((15096, 15123), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (15108, 15123), True, 'import torch.nn as nn\n'), ((15137, 15182), 'torch.nn.Conv2d', 'nn.Conv2d', (['(192)', '(384)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(192, 384, kernel_size=3, padding=1)\n', (15146, 15182), True, 'import torch.nn as nn\n'), ((15196, 15217), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (15203, 15217), True, 'import torch.nn as nn\n'), ((15231, 15276), 'torch.nn.Conv2d', 'nn.Conv2d', (['(384)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(384, 256, kernel_size=3, padding=1)\n', (15240, 15276), True, 'import torch.nn as nn\n'), ((15290, 15311), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (15297, 15311), True, 'import torch.nn as nn\n'), ((15325, 15370), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 256, kernel_size=3, padding=1)\n', (15334, 15370), True, 'import torch.nn as nn\n'), ((15384, 15405), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (15391, 15405), True, 'import torch.nn as nn\n'), ((15419, 15446), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (15431, 15446), True, 'import torch.nn as nn\n'), ((15538, 15566), 'torch.nn.Linear', 'nn.Linear', (['(256 * 2 * 2)', '(4096)'], {}), '(256 * 2 * 2, 4096)\n', (15547, 15566), True, 'import torch.nn as nn\n'), ((15580, 15601), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (15587, 15601), True, 'import torch.nn as nn\n'), ((15642, 15663), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (15651, 15663), True, 'import torch.nn as nn\n'), ((15677, 15698), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (15684, 15698), True, 'import torch.nn as nn\n'), ((15712, 15740), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'num_classes'], {}), '(4096, num_classes)\n', (15721, 15740), True, 'import torch.nn as nn\n'), ((2144, 2195), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, v, kernel_size=3, padding=1)\n', (2153, 2195), True, 'import torch.nn as nn\n'), ((4097, 4182), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False\n )\n', (4106, 4182), True, 'import torch.nn as nn\n'), ((7023, 7065), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(2)', 'num_channels': 'x'}), '(num_groups=2, num_channels=x)\n', (7035, 7065), True, 'import torch.nn as nn\n'), ((11430, 11491), 'torch.nn.GroupNorm', 'torch.nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': 'c.num_features'}), '(num_groups=4, num_channels=c.num_features)\n', (11448, 11491), False, 'import torch, torchvision\n'), ((12384, 12448), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n', (12393, 12448), True, 'import torch.nn as nn\n'), ((13447, 13494), 'torch.load', 'torch.load', (['pretrained_path'], {'map_location': '"""cpu"""'}), "(pretrained_path, map_location='cpu')\n", (13457, 13494), False, 'import torch, torchvision\n'), ((14012, 14059), 'torch.load', 'torch.load', (['pretrained_path'], {'map_location': '"""cpu"""'}), "(pretrained_path, map_location='cpu')\n", (14022, 14059), False, 'import torch, torchvision\n'), ((16897, 16948), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, v, kernel_size=3, padding=1)\n', (16906, 16948), True, 'import torch.nn as nn\n'), ((1701, 1717), 'numpy.sqrt', 'np.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (1708, 1717), True, 'import numpy as np\n'), ((2062, 2099), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (2074, 2099), True, 'import torch.nn as nn\n'), ((16541, 16557), 'numpy.sqrt', 'np.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (16548, 16557), True, 'import numpy as np\n'), ((16815, 16852), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (16827, 16852), True, 'import torch.nn as nn\n'), ((2266, 2283), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (2280, 2283), True, 'import torch.nn as nn\n'), ((2285, 2306), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2292, 2306), True, 'import torch.nn as nn\n'), ((2369, 2390), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2376, 2390), True, 'import torch.nn as nn\n'), ((17019, 17036), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (17033, 17036), True, 'import torch.nn as nn\n'), ((17038, 17059), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (17045, 17059), True, 'import torch.nn as nn\n'), ((17122, 17143), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (17129, 17143), True, 'import torch.nn as nn\n')] |
import numpy as np
import pandas as pd
from vimms.old_unused_experimental.PythonMzmine import get_base_scoring_df
from vimms.Roi import make_roi
QCB_MZML2CHEMS_DICT = {'min_ms1_intensity': 1.75E5,
'mz_tol': 2,
'mz_units': 'ppm',
'min_length': 1,
'min_intensity': 0,
'start_rt': 0,
'stop_rt': 1560}
def get_rois(mzml, min_roi_length, mzml2chems_dict=QCB_MZML2CHEMS_DICT):
good_roi, junk_roi = make_roi(mzml, mz_tol=mzml2chems_dict['mz_tol'], mz_units=mzml2chems_dict['mz_units'],
min_length=min_roi_length, min_intensity=mzml2chems_dict['min_intensity'],
start_rt=mzml2chems_dict['start_rt'], stop_rt=mzml2chems_dict['stop_rt'])
return good_roi, junk_roi
def mzml2classificationdata(mzmls, mzml_picked_peaks_files, min_roi_length=5, mzml2chems_dict=QCB_MZML2CHEMS_DICT,
mz_slack=0.01, drift_window_lengths=[5], rt_peak_tol=2, include_status=True):
rois = []
for i in range(len(mzmls)):
good_roi, junk_roi = get_rois(mzmls[i], min_roi_length, mzml2chems_dict)
rois.extend(good_roi)
picked_peaks = get_base_scoring_df(mzml_picked_peaks_files[i])
df_new = rois2classificationdata2(good_roi, picked_peaks, mz_slack=mz_slack,
drift_window_lengths=drift_window_lengths, rt_peak_tol=rt_peak_tol,
include_status=include_status)
if i == 0:
df = df_new
else:
df = pd.concat([df, df_new])
return df, rois
class get_prob_classifier(object):
def __init__(self, mzmls, mzml_picked_peaks_files, min_roi_length=5, mzml2chems_dict=QCB_MZML2CHEMS_DICT,
mz_slack=0.01, roi_change_n=5, rt_peak_tol=2):
self.roi_change_n = roi_change_n
df, rois = mzml2classificationdata(mzmls, mzml_picked_peaks_files, min_roi_length, mzml2chems_dict,
mz_slack, [roi_change_n], rt_peak_tol, include_status=True)
df = df.dropna(thresh=2)
base_classes = ['Decrease', 'Increase', 'Noise', 'Top']
self.probabilities = []
for i in range(int(max(df.iloc[:, 0]) + 1)):
i_classes = df['rt_status'].iloc[np.where(df.iloc[:, 0] == i)[0]]
probs = np.array([sum(i_classes == base) for base in base_classes]) / len(i_classes)
self.probabilities.append(probs)
def predict(self, value):
return self.probabilities[value]
def calculate_window_change(intensities, drift_window_len):
return sum((np.array(intensities)[-(drift_window_len - 1):] - np.array(intensities)[-drift_window_len:-1]) > 0)
def find_possible_peaks(roi, picked_peaks, mz_slack):
rt_check1 = (picked_peaks['rt min'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt min'])
rt_check2 = (picked_peaks['rt max'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt max'])
rt_check3 = (picked_peaks['rt min'] <= roi.rt_list[0]) & (picked_peaks['rt max'] >= roi.rt_list[-1])
rt_check = rt_check1 | rt_check2 | rt_check3
# logger.debug('rt len ' + len(rt_check))
# logger.debug('rt check ' + rt_check)
# plus and minus one is just slack for the initial check
initial_mz_check = (picked_peaks['m/z max'] + 1 >= roi.get_mean_mz()) & (
roi.get_mean_mz() >= picked_peaks['m/z min'] - 1)
# logger.debug('mz len ' + len(initial_mz_check))
# logger.debug('mz check ' + initial_mz_check)
possible_peaks = np.where(np.logical_and(rt_check, initial_mz_check))[0]
updated_possible_peaks = []
for j in possible_peaks:
peak = picked_peaks.iloc[j]
check_peak = np.nonzero((peak['rt min'] < roi.rt_list) & (roi.rt_list < peak['rt max']))[0]
mean_mz = np.mean(np.array(roi.mz_list)[check_peak])
if peak['m/z min'] - mz_slack < mean_mz < peak['m/z max'] + mz_slack:
updated_possible_peaks.append(j)
return updated_possible_peaks
def rois2classificationdata2(rois, picked_peaks, mz_slack=0.01, drift_window_lengths=[5], rt_peak_tol=2,
include_status=True):
roi_change_list = [[] for i in range(len(drift_window_lengths))]
rt_status_list = []
for roi in rois:
# get drift data
for window in range(len(drift_window_lengths)):
roi_change_list[window].extend([None for i in range(drift_window_lengths[window] - 1)])
roi_change = [calculate_window_change(roi.intensity_list[:i], drift_window_lengths[window])
for i in range(drift_window_lengths[window], roi.n + 1)]
roi_change_list[window].extend(roi_change)
# get possible peaks
if include_status:
possible_peaks = find_possible_peaks(roi, picked_peaks, mz_slack)
possible_peaks_list = picked_peaks.iloc[possible_peaks]
# get data
if not possible_peaks:
rt_status_list.extend([0 for rt in roi.rt_list])
else:
for rt in roi.rt_list:
rt_status = 0
for j in range(len(possible_peaks_list.index)):
if possible_peaks_list['rt centre'].iloc[j] - rt_peak_tol <= rt <= \
possible_peaks_list['rt centre'].iloc[j] + rt_peak_tol:
rt_status = max(3, rt_status)
elif possible_peaks_list['rt min'].iloc[j] <= rt <= possible_peaks_list['rt centre'].iloc[j]:
rt_status = max(2, rt_status)
elif possible_peaks_list['rt centre'].iloc[j] <= rt <= possible_peaks_list['rt max'].iloc[j]:
rt_status = max(1, rt_status)
else:
rt_status = max(0, rt_status)
rt_status_list.append(rt_status)
# convert rt status to classes
if include_status:
rt_status_list = np.array(rt_status_list)
rt_status_list_str = np.array(['Unknown' for i in range(len(rt_status_list))], dtype="<U10")
rt_status_list_str[np.where(rt_status_list == 0)[0]] = 'Noise'
rt_status_list_str[np.where(rt_status_list == 1)[0]] = 'Decrease'
rt_status_list_str[np.where(rt_status_list == 2)[0]] = 'Increase'
rt_status_list_str[np.where(rt_status_list == 3)[0]] = 'Top'
# save as data frame
df = pd.DataFrame()
for window in range(len(drift_window_lengths)):
df['roi_change_' + str(drift_window_lengths[window])] = roi_change_list[window]
if include_status:
df['rt_status'] = rt_status_list_str
return df
# def get_intensity_difference(roi_intensities, n, positive=True):
# # add exception for short roi
# difference = []
# for i in range(len(roi_intensities) - n):
# difference.append(np.log(roi_intensities[i + n]) - np.log(roi_intensities[i]))
# if positive:
# return max(difference)
# else:
# return min(difference)
#
#
# def get_max_increasing(roi_intensities, n_skip=0, increasing_TF=True):
# # add exception for short roi
# max_increasing = 0
# for i in range(len(roi_intensities)):
# current_increasing = 0
# current_skip = 0
# if len(roi_intensities[i:]) <= max_increasing:
# break
# for j in range(1, len(roi_intensities[i:])):
# if (roi_intensities[i:][j] > roi_intensities[i:][j - 1 - current_skip]) == increasing_TF:
# current_increasing += 1 + current_skip
# current_skip = 0
# else:
# current_skip += 1
# if current_skip > n_skip:
# max_increasing = max(max_increasing, current_increasing)
# break
# return max_increasing
#
#
# def get_intensity_list(roi, max_length):
# if max_length is None:
# return roi.intensity_list
# else:
# return roi.intensity_list[0:max_length]
# def rois2classificationdata(rois, picked_peaks, mz_slack=0.01):
# base_roi = []
# base_status = []
# split_roi = []
# split_status = []
# for roi in rois:
# rt_check1 = (picked_peaks['rt min'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt min'])
# rt_check2 = (picked_peaks['rt max'] >= roi.rt_list[0]) & (roi.rt_list[-1] >= picked_peaks['rt max'])
# rt_check3 = (picked_peaks['rt min'] <= roi.rt_list[0]) & (picked_peaks['rt max'] >= roi.rt_list[-1])
# rt_check = rt_check1 | rt_check2 | rt_check3
# # plus and minus one is just slack for the initial check
# initial_mz_check = (picked_peaks['m/z max'] + 1 >= roi.get_mean_mz()) & (
# roi.get_mean_mz() >= picked_peaks['m/z min'] - 1)
# possible_peaks = np.nonzero(rt_check & initial_mz_check)[0]
# if len(possible_peaks) == 0:
# base_roi.append(roi)
# split_roi.append(roi)
# base_status.append(0)
# split_status.append(0)
# else:
# updated_possible_peaks = []
# for j in possible_peaks:
# peak = picked_peaks.iloc[j]
# check_peak = np.nonzero((peak['rt min'] < roi.rt_list) & (roi.rt_list < peak['rt max']))[0]
# mean_mz = np.mean(np.array(roi.mz_list)[check_peak])
# if peak['m/z min'] - mz_slack < mean_mz < peak['m/z max'] + mz_slack:
# updated_possible_peaks.append(j)
# if len(updated_possible_peaks) == 0:
# base_roi.append(roi)
# split_roi.append(roi)
# base_status.append(0)
# split_status.append(0)
# else:
# if len(updated_possible_peaks) == 1:
# base_roi.append(roi)
# split_roi.append(roi)
# base_status.append(1)
# split_status.append(1)
# if len(updated_possible_peaks) > 1:
# base_roi.append(roi)
# base_status.append(1)
# df = picked_peaks.iloc[updated_possible_peaks]
# df = df.sort_values(by=['rt min'])
# splits = (np.array(df['rt min'][1:]) + np.array(df['rt max'][0:-1])) / 2
# splits = np.insert(np.insert(splits, 0, 0), len(splits) + 1, 2000)
# for j in range(len(splits) - 1):
# check_range1 = roi.rt_list > splits[j]
# check_range2 = roi.rt_list < splits[j + 1]
# mz = np.array(roi.mz_list)[np.nonzero(check_range1 & check_range2)[0]].tolist()
# rt = np.array(roi.rt_list)[np.nonzero(check_range1 & check_range2)[0]].tolist()
# intensity = np.array(roi.intensity_list)[np.nonzero(check_range1 & check_range2)].tolist()
# split_roi.append(Roi(mz, rt, intensity))
# split_status.append(1)
# return base_roi, base_status, split_roi, split_status
#
#
# def get_roi_classification_params(rois, roi_param_dict):
# df = pd.DataFrame()
# if roi_param_dict['include_log_max_intensity']:
# df['log_max_intensity'] = np.log([roi.get_max_intensity() for roi in rois])
# if roi_param_dict['include_log_intensity_difference']:
# df['log_intensity_difference'] = np.log(df['log_max_intensity']) - np.log([roi.get_min_intensity() for roi in rois])
# if roi_param_dict['consecutively_change_max'] > 0:
# for i in range(roi_param_dict['consecutively_change_max']):
# df['n_increase_' + str(i)] = [get_max_increasing(roi.intensity_list, i, True) for roi in rois]
# df['n_decrease_' + str(i)] = [get_max_increasing(roi.intensity_list, i, False) for roi in rois]
# df['n_interaction_' + str(i)] = df['n_increase_' + str(i)] * df['n_decrease_' + str(i)]
# if roi_param_dict['intensity_change_max'] > 0:
# for i in range(roi_param_dict['intensity_change_max']):
# df['intensity_increase_' + str(i)] = [get_intensity_difference(roi.intensity_list, i+1, True) for roi in rois]
# df['intensity_decrease_' + str(i)] = [get_intensity_difference(roi.intensity_list, i+1, False) for roi in rois]
# df['intensity_interaction_' + str(i)] = df['intensity_increase_' + str(i)] * df['intensity_decrease_' + str(i)]
# if roi_param_dict['lag_max'] > 0:
# for i in range(roi_param_dict['lag_max']):
# df['autocorrelation_' + str(i+1)] = [roi.get_autocorrelation(i+1) for roi in rois]
# return df
| [
"numpy.logical_and",
"vimms.old_unused_experimental.PythonMzmine.get_base_scoring_df",
"numpy.where",
"numpy.array",
"vimms.Roi.make_roi",
"numpy.nonzero",
"pandas.DataFrame",
"pandas.concat"
] | [((538, 788), 'vimms.Roi.make_roi', 'make_roi', (['mzml'], {'mz_tol': "mzml2chems_dict['mz_tol']", 'mz_units': "mzml2chems_dict['mz_units']", 'min_length': 'min_roi_length', 'min_intensity': "mzml2chems_dict['min_intensity']", 'start_rt': "mzml2chems_dict['start_rt']", 'stop_rt': "mzml2chems_dict['stop_rt']"}), "(mzml, mz_tol=mzml2chems_dict['mz_tol'], mz_units=mzml2chems_dict[\n 'mz_units'], min_length=min_roi_length, min_intensity=mzml2chems_dict[\n 'min_intensity'], start_rt=mzml2chems_dict['start_rt'], stop_rt=\n mzml2chems_dict['stop_rt'])\n", (546, 788), False, 'from vimms.Roi import make_roi\n'), ((6573, 6587), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6585, 6587), True, 'import pandas as pd\n'), ((1275, 1322), 'vimms.old_unused_experimental.PythonMzmine.get_base_scoring_df', 'get_base_scoring_df', (['mzml_picked_peaks_files[i]'], {}), '(mzml_picked_peaks_files[i])\n', (1294, 1322), False, 'from vimms.old_unused_experimental.PythonMzmine import get_base_scoring_df\n'), ((6125, 6149), 'numpy.array', 'np.array', (['rt_status_list'], {}), '(rt_status_list)\n', (6133, 6149), True, 'import numpy as np\n'), ((1665, 1688), 'pandas.concat', 'pd.concat', (['[df, df_new]'], {}), '([df, df_new])\n', (1674, 1688), True, 'import pandas as pd\n'), ((3669, 3711), 'numpy.logical_and', 'np.logical_and', (['rt_check', 'initial_mz_check'], {}), '(rt_check, initial_mz_check)\n', (3683, 3711), True, 'import numpy as np\n'), ((3834, 3909), 'numpy.nonzero', 'np.nonzero', (["((peak['rt min'] < roi.rt_list) & (roi.rt_list < peak['rt max']))"], {}), "((peak['rt min'] < roi.rt_list) & (roi.rt_list < peak['rt max']))\n", (3844, 3909), True, 'import numpy as np\n'), ((3939, 3960), 'numpy.array', 'np.array', (['roi.mz_list'], {}), '(roi.mz_list)\n', (3947, 3960), True, 'import numpy as np\n'), ((6278, 6307), 'numpy.where', 'np.where', (['(rt_status_list == 0)'], {}), '(rt_status_list == 0)\n', (6286, 6307), True, 'import numpy as np\n'), ((6349, 6378), 'numpy.where', 'np.where', (['(rt_status_list == 1)'], {}), '(rt_status_list == 1)\n', (6357, 6378), True, 'import numpy as np\n'), ((6423, 6452), 'numpy.where', 'np.where', (['(rt_status_list == 2)'], {}), '(rt_status_list == 2)\n', (6431, 6452), True, 'import numpy as np\n'), ((6497, 6526), 'numpy.where', 'np.where', (['(rt_status_list == 3)'], {}), '(rt_status_list == 3)\n', (6505, 6526), True, 'import numpy as np\n'), ((2399, 2427), 'numpy.where', 'np.where', (['(df.iloc[:, 0] == i)'], {}), '(df.iloc[:, 0] == i)\n', (2407, 2427), True, 'import numpy as np\n'), ((2724, 2745), 'numpy.array', 'np.array', (['intensities'], {}), '(intensities)\n', (2732, 2745), True, 'import numpy as np\n'), ((2774, 2795), 'numpy.array', 'np.array', (['intensities'], {}), '(intensities)\n', (2782, 2795), True, 'import numpy as np\n')] |
from typing import Optional, Callable, Any, List, Dict
import numpy as np
from functools import partial
import torch.nn as nn
import torch
from torch import Tensor
from ..layers.activations import lookup_act
from ..initialisations import lookup_normal_init
from .abs_block import AbsBlock
__all__ = ['FullyConnected', 'MultiBlock']
class AbsBody(AbsBlock):
def __init__(self, n_in:int, feat_map:Dict[str,List[int]],
lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init,
lookup_act:Callable[[str],Any]=lookup_act, freeze:bool=False, bn_class:Callable[[int],nn.Module]=nn.BatchNorm1d):
super().__init__(lookup_init=lookup_init, freeze=freeze)
self.n_in,self.feat_map,self.lookup_act,self.bn_class = n_in,feat_map,lookup_act,bn_class
class FullyConnected(AbsBody):
r'''
Fully connected set of hidden layers. Designed to be passed as a 'body' to :class:`~lumin.nn.models.model_builder.ModelBuilder`.
Supports batch normalisation and dropout.
Order is dense->activation->BN->DO, except when res is true in which case the BN is applied after the addition.
Can optionaly have skip connections between each layer (res=true).
Alternatively can concatinate layers (dense=true)
growth_rate parameter can be used to adjust the width of layers according to width+(width*(depth-1)*growth_rate)
Arguments:
n_in: number of inputs to the block
feat_map: dictionary mapping input features to the model to outputs of head block
depth: number of hidden layers. If res==True and depth is even, depth will be increased by one.
width: base width of each hidden layer
do: if not None will add dropout layers with dropout rates do
bn: whether to use batch normalisation
act: string representation of argument to pass to lookup_act
res: whether to add an additative skip connection every two dense layers. Mutually exclusive with dense.
dense: whether to perform layer-wise concatinations after every layer. Mutually exclusion with res.
growth_rate: rate at which width of dense layers should increase with depth beyond the initial layer. Ignored if res=True. Can be negative.
lookup_init: function taking choice of activation function, number of inputs, and number of outputs an returning a function to initialise layer weights.
lookup_act: function taking choice of activation function and returning an activation function layer
freeze: whether to start with module parameters set to untrainable
bn_class: class to use for BatchNorm, default is `nn.BatchNorm1d`
Examples::
>>> body = FullyConnected(n_in=32, feat_map=head.feat_map, depth=4,
... width=100, act='relu')
>>>
>>> body = FullyConnected(n_in=32, feat_map=head.feat_map, depth=4,
... width=200, act='relu', growth_rate=-0.3)
>>>
>>> body = FullyConnected(n_in=32, feat_map=head.feat_map, depth=4,
... width=100, act='swish', do=0.1, res=True)
>>>
>>> body = FullyConnected(n_in=32, feat_map=head.feat_map, depth=6,
... width=32, act='selu', dense=True,
... growth_rate=0.5)
>>>
>>> body = FullyConnected(n_in=32, feat_map=head.feat_map, depth=6,
... width=50, act='prelu', bn=True,
... lookup_init=lookup_uniform_init)
'''
def __init__(self, n_in:int, feat_map:Dict[str,List[int]], depth:int, width:int, do:float=0, bn:bool=False, act:str='relu', res:bool=False,
dense:bool=False, growth_rate:int=0, lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init,
lookup_act:Callable[[str],Any]=lookup_act, freeze:bool=False, bn_class:Callable[[int],nn.Module]=nn.BatchNorm1d):
super().__init__(n_in=n_in, feat_map=feat_map, lookup_init=lookup_init, lookup_act=lookup_act, freeze=freeze, bn_class=bn_class)
self.depth,self.width,self.do,self.bn,self.act,self.res,self.dense,self.growth_rate = depth,width,do,bn,act,res,dense,growth_rate
if self.res:
self.depth = 1+int(np.floor(self.depth/2)) # One upscale layer + each subsequent block will contain 2 layers
self.res_bns = nn.ModuleList([self.bn_class(self.width) for d in range(self.depth-1)])
self.layers = nn.ModuleList([self._get_layer(idx=d, fan_in=self.width, fan_out=self.width)
if d > 0 else self._get_layer(idx=d, fan_in=self.n_in, fan_out=self.width)
for d in range(self.depth)])
elif self.dense:
self.layers = []
for d in range(self.depth):
self.layers.append(self._get_layer(idx=d, fan_in=self.n_in if d == 0 else self.n_in+np.sum([l[0].out_features for l in self.layers]),
fan_out=max(1,self.width+int(self.width*d*self.growth_rate))))
self.layers = nn.ModuleList(self.layers)
else:
self.layers = nn.Sequential(*[self._get_layer(idx=d, fan_in=self.width+int(self.width*(d-1)*self.growth_rate),
fan_out=self.width+int(self.width*d*self.growth_rate))
if d > 0 else self._get_layer(idx=d, fan_in=self.n_in, fan_out=self.width)
for d in range(self.depth)])
if self.freeze: self.freeze_layers()
def _get_layer(self, idx:int, fan_in:Optional[int]=None, fan_out:Optional[int]=None) -> nn.Module:
fan_in = self.width if fan_in is None else fan_in
fan_out = self.width if fan_out is None else fan_out
if fan_in < 1: fan_in = 1
if fan_out < 1: fan_out = 1
layers = []
for i in range(2 if self.res and idx > 0 else 1):
layers.append(nn.Linear(fan_in, fan_out))
self.lookup_init(self.act, fan_in, fan_out)(layers[-1].weight)
nn.init.zeros_(layers[-1].bias)
if self.act != 'linear': layers.append(self.lookup_act(self.act))
if self.bn and i == 0: layers.append(self.bn_class(fan_out)) # In case of residual, BN will be added after addition
if self.do:
if self.act == 'selu': layers.append(nn.AlphaDropout(self.do))
else: layers.append(nn.Dropout(self.do))
return nn.Sequential(*layers)
def forward(self, x:Tensor) -> Tensor:
if self.dense:
for l in self.layers[:-1]: x = torch.cat((l(x), x), -1)
x = self.layers[-1](x)
elif self.res:
for i, l in enumerate(self.layers):
if i > 0:
x = l(x)+x
x = self.res_bns[i-1](x) # Renormalise after addition
else:
x = l(x)
else:
x = self.layers(x)
return x
def get_out_size(self) -> int:
r'''
Get size width of output layer
Returns:
Width of output layer
'''
return self.layers[-1][0].out_features
class MultiBlock(AbsBody):
r'''
Body block allowing outputs of head block to be split amongst a series of body blocks.
Output is the concatination of all sub-body blocks.
Optionally, single-neuron 'bottleneck' layers can be used to pass an input to each sub-block based on a learned function of the input features that block
would otherwise not receive, i.e. a highly compressed representation of the rest of teh feature space.
Arguments:
n_in: number of inputs to the block
feat_map: dictionary mapping input features to the model to outputs of head block
blocks: list of uninstantciated :class:`~lumin.nn.models.blocks.body.AbsBody` blocks to which to pass a subsection of the total inputs. Note that
partials should be used to set any relevant parameters at initialisation time
feats_per_block: list of lists of names of features to pass to each :class:`~lumin.nn.models.blocks.body.AbsBody`, not that the feat_map provided by
:class:`~lumin.nn.models.blocks.head.AbsHead` will map features to their relavant head outputs
bottleneck: if true, each block will receive the output of a single neuron which takes as input all the features which each given block does not
directly take as inputs
bottleneck_act: if set to a string representation of an activation function, the output of each bottleneck neuron will be passed throguh the defined
activation function before being passed to their associated blocks
lookup_init: function taking choice of activation function, number of inputs, and number of outputs an returning a function to initialise layer weights.
lookup_act: function taking choice of activation function and returning an activation function layer
freeze: whether to start with module parameters set to untrainable
Examples::
>>> body = MultiBlock(
... blocks=[partial(FullyConnected, depth=1, width=50, act='swish'),
... partial(FullyConnected, depth=6, width=55, act='swish',
... dense=True, growth_rate=-0.1)],
... feats_per_block=[[f for f in train_feats if 'DER_' in f],
... [f for f in train_feats if 'PRI_' in f]])
>>>
>>> body = MultiBlock(
... blocks=[partial(FullyConnected, depth=1, width=50, act='swish'),
... partial(FullyConnected, depth=6, width=55, act='swish',
... dense=True, growth_rate=-0.1)],
... feats_per_block=[[f for f in train_feats if 'DER_' in f],
... [f for f in train_feats if 'PRI_' in f]],
... bottleneck=True)
>>>
>>> body = MultiBlock(
... blocks=[partial(FullyConnected, depth=1, width=50, act='swish'),
... partial(FullyConnected, depth=6, width=55, act='swish',
... dense=True, growth_rate=-0.1)],
... feats_per_block=[[f for f in train_feats if 'DER_' in f],
... [f for f in train_feats if 'PRI_' in f]],
... bottleneck=True, bottleneck_act='swish')
'''
def __init__(self, n_in:int, feat_map:Dict[str,List[int]], blocks:List[partial], feats_per_block:List[List[str]],
bottleneck_sz:int=0, bottleneck_act:Optional[str]=None,
lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init,
lookup_act:Callable[[str],Any]=lookup_act, freeze:bool=False):
super().__init__(n_in=n_in, feat_map=feat_map, lookup_init=lookup_init, lookup_act=lookup_act, freeze=freeze)
self.feats_per_block,self.bottleneck_sz,self.bottleneck_act = feats_per_block,bottleneck_sz,bottleneck_act
self.blocks,self.n_out,self.masks,self.bottleneck_blocks = [],0,[],None
if self.bottleneck_sz > 0:
self.bottleneck_blocks,self.bottleneck_masks = [],[]
for fpb in self.feats_per_block:
tmp_map = {f: self.feat_map[f] for f in self.feat_map if f not in fpb}
self.bottleneck_masks.append([i for f in tmp_map for i in tmp_map[f]])
self.bottleneck_blocks.append(self._get_bottleneck(self.bottleneck_masks[-1]))
self.bottleneck_blocks = nn.ModuleList(self.bottleneck_blocks)
for i, b in enumerate(blocks):
tmp_map = {f: self.feat_map[f] for f in self.feat_map if f in self.feats_per_block[i]}
self.masks.append([i for f in tmp_map for i in tmp_map[f]])
self.blocks.append(b(n_in=len(self.masks[-1])+self.bottleneck_sz, feat_map=tmp_map, lookup_init=self.lookup_init,
lookup_act=self.lookup_act, freeze=self.freeze))
self.n_out += self.blocks[-1].get_out_size()
self.blocks = nn.ModuleList(self.blocks)
def _get_bottleneck(self, mask:List[int]) -> nn.Module:
layers = [nn.Linear(len(mask), self.bottleneck_sz)]
if self.bottleneck_act is None:
init = self.lookup_init('linear', len(mask), self.bottleneck_sz)
else:
init = self.lookup_init(self.bottleneck_act, len(mask), self.bottleneck_sz)
layers.append(self.lookup_act(self.bottleneck_act))
init(layers[0].weight)
nn.init.zeros_(layers[0].bias)
return nn.Sequential(*layers)
def get_out_size(self) -> int:
r'''
Get size width of output layer
Returns:
Total number of outputs across all blocks
'''
return self.n_out
def forward(self, x:Tensor) -> Tensor:
y = None
for i, b in enumerate(self.blocks):
if self.bottleneck_sz:
a = self.bottleneck_blocks[i](x[:,self.bottleneck_masks[i]])
tmp_x = torch.cat((x[:,self.masks[i]], a), -1)
else:
tmp_x = x[:,self.masks[i]]
out = b(tmp_x)
if y is None: y = out
else: y = torch.cat((y, out), -1)
return y
| [
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.ModuleList",
"numpy.floor",
"torch.nn.init.zeros_",
"numpy.sum",
"torch.nn.Linear",
"torch.nn.AlphaDropout",
"torch.cat"
] | [((6802, 6824), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (6815, 6824), True, 'import torch.nn as nn\n'), ((12441, 12467), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.blocks'], {}), '(self.blocks)\n', (12454, 12467), True, 'import torch.nn as nn\n'), ((12911, 12941), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['layers[0].bias'], {}), '(layers[0].bias)\n', (12925, 12941), True, 'import torch.nn as nn\n'), ((12957, 12979), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (12970, 12979), True, 'import torch.nn as nn\n'), ((6369, 6400), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['layers[-1].bias'], {}), '(layers[-1].bias)\n', (6383, 6400), True, 'import torch.nn as nn\n'), ((11905, 11942), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.bottleneck_blocks'], {}), '(self.bottleneck_blocks)\n', (11918, 11942), True, 'import torch.nn as nn\n'), ((5313, 5339), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.layers'], {}), '(self.layers)\n', (5326, 5339), True, 'import torch.nn as nn\n'), ((6254, 6280), 'torch.nn.Linear', 'nn.Linear', (['fan_in', 'fan_out'], {}), '(fan_in, fan_out)\n', (6263, 6280), True, 'import torch.nn as nn\n'), ((13432, 13471), 'torch.cat', 'torch.cat', (['(x[:, self.masks[i]], a)', '(-1)'], {}), '((x[:, self.masks[i]], a), -1)\n', (13441, 13471), False, 'import torch\n'), ((13623, 13646), 'torch.cat', 'torch.cat', (['(y, out)', '(-1)'], {}), '((y, out), -1)\n', (13632, 13646), False, 'import torch\n'), ((4466, 4490), 'numpy.floor', 'np.floor', (['(self.depth / 2)'], {}), '(self.depth / 2)\n', (4474, 4490), True, 'import numpy as np\n'), ((6687, 6711), 'torch.nn.AlphaDropout', 'nn.AlphaDropout', (['self.do'], {}), '(self.do)\n', (6702, 6711), True, 'import torch.nn as nn\n'), ((6766, 6785), 'torch.nn.Dropout', 'nn.Dropout', (['self.do'], {}), '(self.do)\n', (6776, 6785), True, 'import torch.nn as nn\n'), ((5139, 5187), 'numpy.sum', 'np.sum', (['[l[0].out_features for l in self.layers]'], {}), '([l[0].out_features for l in self.layers])\n', (5145, 5187), True, 'import numpy as np\n')] |
#!/usr/bin/python
import argparse
import os
import numpy as np
from dolfyn.adv.rotate import orient2euler
import dolfyn.adv.api as avm
from dolfyn.adv.motion import correct_motion
# TODO: add option to rotate into earth or principal frame (include
# principal_angle_True in output).
script_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser(
description="""
Perform motion correction of a Nortek Vector(.vec)
file and save the output in earth(u: East, v: North, w: up)
coordinates in units of m / s as a Matlab(TM)(.mat) file.
""")
parser.add_argument(
'-f',
default=0.03333,
help="""
Specify the high-pass filter frequency that is applied to the
acceleration prior to integrating acceleration (Accel) to get
velocity. Default '0.03333' (Hz) = 30sec.
"""
)
## parser.add_argument(
## '-F',
## default=0.01,
## help="""
## Specify the high-pass filter frequency that is applied to the integrated
## acceleration (uacc) to remove drift that remains after integration. Default
## '0.01' (Hz) = 100sec
## """
## )
parser.add_argument(
'-O',
default=None,
help="""
Specify the 'orientation' configuration file (default:
'<filename>.orient', or 'vector.orient', in that
order). Cable-Head Vector probes the orientation of, and distance
between, the head to the body is arbitrary. This option specifies
the file which defines these variables. For more information on
how to measure these variables, consult the
'dolfyn-src-dir/examples/motion_correct_example.orient'
"""
)
parser.add_argument(
'--fixed-head',
action='store_true',
help="""
This specifies that the 'fixed-head' orientation/geometry should be used to
compute head motion.
"""
)
parser.add_argument(
'--mat',
action='store_true',
help="""
Save the earth-frame motion-corrected data in Matlab format (default).
"""
)
## parser.add_argument(
## '--csv',
## action='store_true',
## help="""
## Save the earth-frame motion-corrected data in csv (comma-separated value) format.
## """
## )
parser.add_argument(
'--hdf5',
action='store_true',
help="""
Save the earth-frame motion-corrected data in the dolfyn-structured hdf5 format.
"""
)
## parser.add_argument(
## '--out-earth',
## action='store_true',
## help="""
## This specifies that the output data should be return in an earth
## (u:East, v:North, w:up) coordinate system (default: True).
## """
## )
###########
# I removed this option because the data in a raw file is often
# noisey, which will lead to inaccurate estimates of the principal
# angle. Data should be cleaned prior to rotating into the principal
# frame.
## parser.add_argument(
## '--out-principal',
## action='store_false',
## help="""
## This specifies that the output data should be returned in a
## 'principal axes' frame (u:streamwise, v:cross-stream, w:up)
## coordinate system.
## """
## )
parser.add_argument(
'filename',
help="""The filename(s) of the the Nortek Vector file(s) to be
processed(they probably end with '.vec').""",
action='append'
)
args = parser.parse_args()
## if bool(args.out_principal) and bool(args.out_earth):
# raise Exception('--out-principal and --out-earth can not both be
# selected. You must choose one output frame.')
declin = 0
if args.fixed_head != bool(args.O):
# Either args.fixed_head is True or args.O should be a string.
if bool(args.O):
exec(open(args.O).read()) # ROTMAT and VEC should be in this file.
rmat = np.array(ROTMAT)
vec = np.array(VEC)
if 'DECLINATION' in vars():
declin = DECLINATION
del VEC, ROTMAT
else:
rmat = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], dtype=np.float32)
vec = np.array([0, 0, -0.21]) # in meters
else:
raise Exception("""You must either specify --fixed-head, or specify
an 'orientation' config file.""")
if not (args.mat or args.hdf5):
args.mat = True
# Now loop over the specified file names:
for fnm in args.filename:
dat = avm.read_nortek(fnm)
# Set the geometry
dat.props['body2head_rotmat'] = rmat
dat.props['body2head_vec'] = vec
dat.props['declination'] = declin
# Set matlab 'datenum' time.
dat.add_data('datenum', dat.mpltime.matlab_datenum, 'main')
# Perform motion correction.
if hasattr(dat, 'orientmat'):
print('Performing motion correction...')
correct_motion(dat, accel_filtfreq=args.f) # Perform the motion correction.
# Compute pitch,roll,heading from orientmat.
dat.pitch[:], dat.roll[:], dat.heading[:] = orient2euler(dat.orientmat)
else:
print("""
!!!--Warning--!!!: Orientation matrix('orientmat')
not found. Motion correction cannot be performed on this file
""")
if args.mat:
outnm = fnm.rstrip('.vec').rstrip('.VEC') + '.mat'
print('Saving to %s.' % outnm)
# Save the data.
dat.save_mat(outnm, groups=['main', 'orient'])
if args.hdf5:
outnm = fnm.rstrip('.vec').rstrip('.VEC') + '.hdf5'
print('Saving to %s.' % outnm)
# Save the data.
dat.save(outnm,)
del dat
| [
"dolfyn.adv.motion.correct_motion",
"dolfyn.adv.rotate.orient2euler",
"argparse.ArgumentParser",
"os.path.dirname",
"numpy.array",
"dolfyn.adv.api.read_nortek"
] | [((299, 324), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (314, 324), False, 'import os\n'), ((335, 574), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n Perform motion correction of a Nortek Vector(.vec)\n file and save the output in earth(u: East, v: North, w: up)\n coordinates in units of m / s as a Matlab(TM)(.mat) file.\n """'}), '(description=\n """\n Perform motion correction of a Nortek Vector(.vec)\n file and save the output in earth(u: East, v: North, w: up)\n coordinates in units of m / s as a Matlab(TM)(.mat) file.\n """\n )\n', (358, 574), False, 'import argparse\n'), ((4250, 4270), 'dolfyn.adv.api.read_nortek', 'avm.read_nortek', (['fnm'], {}), '(fnm)\n', (4265, 4270), True, 'import dolfyn.adv.api as avm\n'), ((3659, 3675), 'numpy.array', 'np.array', (['ROTMAT'], {}), '(ROTMAT)\n', (3667, 3675), True, 'import numpy as np\n'), ((3690, 3703), 'numpy.array', 'np.array', (['VEC'], {}), '(VEC)\n', (3698, 3703), True, 'import numpy as np\n'), ((3822, 3883), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)\n', (3830, 3883), True, 'import numpy as np\n'), ((3948, 3971), 'numpy.array', 'np.array', (['[0, 0, -0.21]'], {}), '([0, 0, -0.21])\n', (3956, 3971), True, 'import numpy as np\n'), ((4633, 4675), 'dolfyn.adv.motion.correct_motion', 'correct_motion', (['dat'], {'accel_filtfreq': 'args.f'}), '(dat, accel_filtfreq=args.f)\n', (4647, 4675), False, 'from dolfyn.adv.motion import correct_motion\n'), ((4815, 4842), 'dolfyn.adv.rotate.orient2euler', 'orient2euler', (['dat.orientmat'], {}), '(dat.orientmat)\n', (4827, 4842), False, 'from dolfyn.adv.rotate import orient2euler\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import itertools
import tvm
import tvm.relay.testing
from tvm import relay
from tvm.relay.op.contrib import dnnl
import tvm.testing
has_dnnl_codegen = pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True), reason="DNNL codegen not available"
)
run_module = tvm.testing.parameter(
pytest.param(False, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm()]),
pytest.param(True, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm()]),
ids=["compile", "run"],
)
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT) or isinstance(o, list):
return [vmobj_to_list(f) for f in o]
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def assert_result_dict_holds(result_dict):
for k1, k2 in itertools.combinations(result_dict, 2):
res1 = vmobj_to_list(result_dict[k1])
res2 = vmobj_to_list(result_dict[k2])
for r1, r2 in zip(res1, res2):
tvm.testing.assert_allclose(r1, r2, rtol=1e-3, atol=1e-3)
def run_and_verify(mod, input, params, target, run_module):
def check_dnnl_used(mod):
num_dnnl_subgraphs = sum(
[1 if "dnnl" in gv.name_hint else 0 for gv in mod.get_global_vars()]
)
assert num_dnnl_subgraphs >= 1
dev = tvm.cpu()
result_dict = dict()
for mode in ["graph", "vm"]:
for use_dnnl in [False, True]:
result_key = mode + ("_dnnl" if use_dnnl else "")
if use_dnnl:
mod = dnnl.partition_for_dnnl(mod, params)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(mode, mod=mod, device=dev, target=target).evaluate()
if run_module:
if isinstance(input, dict):
result_dict[result_key] = func(**input, **params)
else:
result_dict[result_key] = func(input, **params)
if run_module:
assert_result_dict_holds(result_dict)
def run_and_verify_func(config, run_module, target="llvm", dtype="float32"):
"""Test a Relay func by compiling, running, and comparing TVM and DNNL outputs.
Parameters
----------
config : Tuple[relay.Function, Dict[str, NDArray], List[str]]
A tuple containing 1) The function to test, 2) A dictionary of var names to input shapes and
3) A list of which vars should be considered params.
run_module: bool
If True, the built module will be run after being compiled.
"""
f, input_shapes, is_param = config
params = {x: np.random.uniform(-1, 1, input_shapes[x]).astype(dtype) for x in is_param}
input_dict = {
k: np.random.uniform(-1, 1, v).astype(dtype)
for k, v in input_shapes.items()
if k not in is_param
}
run_and_verify(f, input_dict, params, target, run_module)
def get_conv2d(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv2d(
x,
kernel,
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=k_shape[0],
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
if activation == "relu":
return relay.nn.relu(out), dic, param_lst
elif activation == "tanh":
return relay.tanh(out), dic, param_lst
elif activation == "sigmoid":
return relay.sigmoid(out), dic, param_lst
else:
return out, dic, param_lst
def get_conv2d_weights_const(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.ones(k_shape).astype(dtype))
out = relay.nn.conv2d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
dic = {"x": x_shape}
param_lst = []
return out, dic, param_lst
def get_conv2d_bias(
x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv2d(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
if activation == "relu":
return relay.nn.relu(out), dic, param_lst
elif activation == "tanh":
return relay.tanh(out), dic, param_lst
elif activation == "sigmoid":
return relay.sigmoid(out), dic, param_lst
else:
return out, dic, param_lst
def get_conv2d_bias_bn_relu(x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), dtype="float32"):
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
conv2d_bias_bn, _, _ = relay.nn.batch_norm(
conv2d_bias,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
return relay.nn.relu(conv2d_bias_bn), dic, param_lst
def get_dense(x_shape=(1, 16), k_shape=(32, 16), activation=None, dtype="float32"):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.dense(x, kernel, units=k_shape[0])
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return out, dic, param_lst
def get_dense_bias(x_shape=(1, 16), k_shape=(32, 16), activation=None, dtype="float32"):
dense, dic, param_lst = get_dense(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(dense, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return out, dic, param_lst
def test_dnnl_not_compatible(run_module, target="llvm", dtype="float32"):
xshape = (1, 32, 14, 14)
x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
x = relay.var("x", shape=(xshape), dtype=dtype)
y = relay.add(x, x)
z = relay.cast(relay.cast(y, "int32"), "float32")
out = relay.nn.relu(z)
f = relay.Function([x], out)
mod = tvm.IRModule()
mod["main"] = f
mod = dnnl.partition_for_dnnl(mod)
for mode in ["graph", "vm"]:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(mode, mod=mod, device=tvm.cpu(0), target=target).evaluate()
if run_module:
results = func(x_data)
def test_multiple_outputs(run_module, dtype="float32"):
def get_graph():
x = relay.var("x", shape=(1, 3), dtype=dtype)
y = relay.var("y", shape=(1, 3), dtype=dtype)
z = relay.add(x, y)
w = relay.add(z, y)
out = relay.Tuple((z, w))
f = tvm.IRModule.from_expr(out)
return f, {"x": (1, 3), "y": (1, 3)}, []
run_and_verify_func(get_graph(), run_module=run_module, dtype=dtype)
def test_unary(run_module):
def get_graph(op, x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = op(x)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
for op in [
relay.nn.relu,
relay.tanh,
relay.sigmoid,
]:
run_and_verify_func(get_graph(op), run_module=run_module)
def test_conv2d(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
for k_shape, groups in [((16, 32, 3, 3), 1), ((32, 1, 3, 3), 32)]:
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
for dilation in [(1, 1), (2, 2)]:
conv2d, dic, param_lst = get_conv2d(
x_shape=x_shape,
k_shape=k_shape,
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
dtype=dtype,
)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_weights_const(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
conv2d, dic, param_lst = get_conv2d_weights_const(x_shape, k_shape, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_pattern(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
activation_lst = [None, "relu", "tanh", "sigmoid"]
for a in activation_lst:
conv2d, dic, param_lst = get_conv2d(x_shape, k_shape, activation=a, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, activation=a, dtype=dtype)
conv2d_bias = tvm.IRModule.from_expr(conv2d_bias)
config = conv2d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias_bn_relu, dic, param_lst = get_conv2d_bias_bn_relu(x_shape, k_shape, dtype=dtype)
conv2d_bias_bn_relu = tvm.IRModule.from_expr(conv2d_bias_bn_relu)
config = conv2d_bias_bn_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense, dic, param_lst = get_dense(x_shape, k_shape=(1, 16), dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense_pattern(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense_bias, dic, param_lst = get_dense_bias(x_shape, k_shape, dtype=dtype)
dense_bias = tvm.IRModule.from_expr(dense_bias)
config = dense_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
if __name__ == "__main__":
import sys
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| [
"tvm.relay.nn.dense",
"tvm.relay.Tuple",
"tvm.relay.op.contrib.dnnl.partition_for_dnnl",
"tvm.relay.Function",
"tvm.relay.create_executor",
"tvm.relay.cast",
"tvm.relay.nn.conv2d",
"tvm.relay.add",
"pytest.main",
"tvm.IRModule",
"tvm.transform.PassContext",
"tvm.relay.sigmoid",
"tvm.IRModule... | [((1700, 1738), 'itertools.combinations', 'itertools.combinations', (['result_dict', '(2)'], {}), '(result_dict, 2)\n', (1722, 1738), False, 'import itertools\n'), ((2222, 2231), 'tvm.cpu', 'tvm.cpu', ([], {}), '()\n', (2229, 2231), False, 'import tvm\n'), ((4040, 4082), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'x_shape', 'dtype': 'dtype'}), "('x', shape=x_shape, dtype=dtype)\n", (4049, 4082), False, 'from tvm import relay\n'), ((4099, 4146), 'tvm.relay.var', 'relay.var', (['"""kernel"""'], {'shape': 'k_shape', 'dtype': 'dtype'}), "('kernel', shape=k_shape, dtype=dtype)\n", (4108, 4146), False, 'from tvm import relay\n'), ((4160, 4306), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'kernel'], {'kernel_size': 'k_shape[2:4]', 'groups': 'groups', 'padding': 'padding', 'strides': 'strides', 'dilation': 'dilation', 'channels': 'k_shape[0]'}), '(x, kernel, kernel_size=k_shape[2:4], groups=groups, padding\n =padding, strides=strides, dilation=dilation, channels=k_shape[0])\n', (4175, 4306), False, 'from tvm import relay\n'), ((4957, 4999), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'x_shape', 'dtype': 'dtype'}), "('x', shape=x_shape, dtype=dtype)\n", (4966, 4999), False, 'from tvm import relay\n'), ((5071, 5216), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'kernel'], {'channels': 'k_shape[0]', 'kernel_size': 'k_shape[2:4]', 'groups': 'groups', 'padding': 'padding', 'strides': 'strides', 'dilation': 'dilation'}), '(x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4],\n groups=groups, padding=padding, strides=strides, dilation=dilation)\n', (5086, 5216), False, 'from tvm import relay\n'), ((5584, 5635), 'tvm.relay.var', 'relay.var', (['"""bias"""'], {'shape': '(k_shape[0],)', 'dtype': 'dtype'}), "('bias', shape=(k_shape[0],), dtype=dtype)\n", (5593, 5635), False, 'from tvm import relay\n'), ((5647, 5676), 'tvm.relay.nn.bias_add', 'relay.nn.bias_add', (['conv', 'bias'], {}), '(conv, bias)\n', (5664, 5676), False, 'from tvm import relay\n'), ((6493, 6654), 'tvm.relay.nn.batch_norm', 'relay.nn.batch_norm', (['conv2d_bias'], {'gamma': 'gamma', 'beta': 'beta', 'moving_mean': 'moving_mean', 'moving_var': 'moving_var', 'axis': '(1)', 'center': '(True)', 'scale': '(True)', 'epsilon': '(1e-05)'}), '(conv2d_bias, gamma=gamma, beta=beta, moving_mean=\n moving_mean, moving_var=moving_var, axis=1, center=True, scale=True,\n epsilon=1e-05)\n', (6512, 6654), False, 'from tvm import relay\n'), ((6890, 6932), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'x_shape', 'dtype': 'dtype'}), "('x', shape=x_shape, dtype=dtype)\n", (6899, 6932), False, 'from tvm import relay\n'), ((6949, 6996), 'tvm.relay.var', 'relay.var', (['"""kernel"""'], {'shape': 'k_shape', 'dtype': 'dtype'}), "('kernel', shape=k_shape, dtype=dtype)\n", (6958, 6996), False, 'from tvm import relay\n'), ((7010, 7053), 'tvm.relay.nn.dense', 'relay.nn.dense', (['x', 'kernel'], {'units': 'k_shape[0]'}), '(x, kernel, units=k_shape[0])\n', (7024, 7053), False, 'from tvm import relay\n'), ((7351, 7402), 'tvm.relay.var', 'relay.var', (['"""bias"""'], {'shape': '(k_shape[0],)', 'dtype': 'dtype'}), "('bias', shape=(k_shape[0],), dtype=dtype)\n", (7360, 7402), False, 'from tvm import relay\n'), ((7414, 7444), 'tvm.relay.nn.bias_add', 'relay.nn.bias_add', (['dense', 'bias'], {}), '(dense, bias)\n', (7431, 7444), False, 'from tvm import relay\n'), ((7718, 7759), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'xshape', 'dtype': 'dtype'}), "('x', shape=xshape, dtype=dtype)\n", (7727, 7759), False, 'from tvm import relay\n'), ((7771, 7786), 'tvm.relay.add', 'relay.add', (['x', 'x'], {}), '(x, x)\n', (7780, 7786), False, 'from tvm import relay\n'), ((7853, 7869), 'tvm.relay.nn.relu', 'relay.nn.relu', (['z'], {}), '(z)\n', (7866, 7869), False, 'from tvm import relay\n'), ((7879, 7903), 'tvm.relay.Function', 'relay.Function', (['[x]', 'out'], {}), '([x], out)\n', (7893, 7903), False, 'from tvm import relay\n'), ((7915, 7929), 'tvm.IRModule', 'tvm.IRModule', ([], {}), '()\n', (7927, 7929), False, 'import tvm\n'), ((7962, 7990), 'tvm.relay.op.contrib.dnnl.partition_for_dnnl', 'dnnl.partition_for_dnnl', (['mod'], {}), '(mod)\n', (7985, 7990), False, 'from tvm.relay.op.contrib import dnnl\n'), ((10191, 10221), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['conv2d'], {}), '(conv2d)\n', (10213, 10221), False, 'import tvm\n'), ((11192, 11235), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['conv2d_bias_bn_relu'], {}), '(conv2d_bias_bn_relu)\n', (11214, 11235), False, 'import tvm\n'), ((11537, 11566), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['dense'], {}), '(dense)\n', (11559, 11566), False, 'import tvm\n'), ((11765, 11794), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['dense'], {}), '(dense)\n', (11787, 11794), False, 'import tvm\n'), ((12090, 12119), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['dense'], {}), '(dense)\n', (12112, 12119), False, 'import tvm\n'), ((12325, 12359), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['dense_bias'], {}), '(dense_bias)\n', (12347, 12359), False, 'import tvm\n'), ((1024, 1067), 'tvm.get_global_func', 'tvm.get_global_func', (['"""relay.ext.dnnl"""', '(True)'], {}), "('relay.ext.dnnl', True)\n", (1043, 1067), False, 'import tvm\n'), ((6746, 6775), 'tvm.relay.nn.relu', 'relay.nn.relu', (['conv2d_bias_bn'], {}), '(conv2d_bias_bn)\n', (6759, 6775), False, 'from tvm import relay\n'), ((7807, 7829), 'tvm.relay.cast', 'relay.cast', (['y', '"""int32"""'], {}), "(y, 'int32')\n", (7817, 7829), False, 'from tvm import relay\n'), ((8345, 8386), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(1, 3)', 'dtype': 'dtype'}), "('x', shape=(1, 3), dtype=dtype)\n", (8354, 8386), False, 'from tvm import relay\n'), ((8400, 8441), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(1, 3)', 'dtype': 'dtype'}), "('y', shape=(1, 3), dtype=dtype)\n", (8409, 8441), False, 'from tvm import relay\n'), ((8455, 8470), 'tvm.relay.add', 'relay.add', (['x', 'y'], {}), '(x, y)\n', (8464, 8470), False, 'from tvm import relay\n'), ((8484, 8499), 'tvm.relay.add', 'relay.add', (['z', 'y'], {}), '(z, y)\n', (8493, 8499), False, 'from tvm import relay\n'), ((8515, 8534), 'tvm.relay.Tuple', 'relay.Tuple', (['(z, w)'], {}), '((z, w))\n', (8526, 8534), False, 'from tvm import relay\n'), ((8548, 8575), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['out'], {}), '(out)\n', (8570, 8575), False, 'import tvm\n'), ((8794, 8840), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'x_shape', 'dtype': '"""float32"""'}), "('x', shape=x_shape, dtype='float32')\n", (8803, 8840), False, 'from tvm import relay\n'), ((8877, 8904), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['out'], {}), '(out)\n', (8899, 8904), False, 'import tvm\n'), ((10640, 10670), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['conv2d'], {}), '(conv2d)\n', (10662, 10670), False, 'import tvm\n'), ((10910, 10945), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['conv2d_bias'], {}), '(conv2d_bias)\n', (10932, 10945), False, 'import tvm\n'), ((12534, 12572), 'pytest.main', 'pytest.main', (['([__file__] + sys.argv[1:])'], {}), '([__file__] + sys.argv[1:])\n', (12545, 12572), False, 'import pytest\n'), ((1887, 1946), 'tvm.testing.assert_allclose', 'tvm.testing.assert_allclose', (['r1', 'r2'], {'rtol': '(0.001)', 'atol': '(0.001)'}), '(r1, r2, rtol=0.001, atol=0.001)\n', (1914, 1946), False, 'import tvm\n'), ((4503, 4521), 'tvm.relay.nn.relu', 'relay.nn.relu', (['out'], {}), '(out)\n', (4516, 4521), False, 'from tvm import relay\n'), ((5785, 5803), 'tvm.relay.nn.relu', 'relay.nn.relu', (['out'], {}), '(out)\n', (5798, 5803), False, 'from tvm import relay\n'), ((7660, 7692), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'xshape'], {}), '(-1, 1, xshape)\n', (7677, 7692), True, 'import numpy as np\n'), ((8039, 8077), 'tvm.transform.PassContext', 'tvm.transform.PassContext', ([], {'opt_level': '(3)'}), '(opt_level=3)\n', (8064, 8077), False, 'import tvm\n'), ((2444, 2480), 'tvm.relay.op.contrib.dnnl.partition_for_dnnl', 'dnnl.partition_for_dnnl', (['mod', 'params'], {}), '(mod, params)\n', (2467, 2480), False, 'from tvm.relay.op.contrib import dnnl\n'), ((2499, 2537), 'tvm.transform.PassContext', 'tvm.transform.PassContext', ([], {'opt_level': '(3)'}), '(opt_level=3)\n', (2524, 2537), False, 'import tvm\n'), ((3535, 3576), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'input_shapes[x]'], {}), '(-1, 1, input_shapes[x])\n', (3552, 3576), True, 'import numpy as np\n'), ((3642, 3669), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'v'], {}), '(-1, 1, v)\n', (3659, 3669), True, 'import numpy as np\n'), ((4586, 4601), 'tvm.relay.tanh', 'relay.tanh', (['out'], {}), '(out)\n', (4596, 4601), False, 'from tvm import relay\n'), ((5028, 5044), 'numpy.ones', 'np.ones', (['k_shape'], {}), '(k_shape)\n', (5035, 5044), True, 'import numpy as np\n'), ((5868, 5883), 'tvm.relay.tanh', 'relay.tanh', (['out'], {}), '(out)\n', (5878, 5883), False, 'from tvm import relay\n'), ((6237, 6257), 'numpy.zeros', 'np.zeros', (['k_shape[0]'], {}), '(k_shape[0])\n', (6245, 6257), True, 'import numpy as np\n'), ((6298, 6317), 'numpy.ones', 'np.ones', (['k_shape[0]'], {}), '(k_shape[0])\n', (6305, 6317), True, 'import numpy as np\n'), ((6364, 6384), 'numpy.zeros', 'np.zeros', (['k_shape[0]'], {}), '(k_shape[0])\n', (6372, 6384), True, 'import numpy as np\n'), ((6430, 6449), 'numpy.ones', 'np.ones', (['k_shape[0]'], {}), '(k_shape[0])\n', (6437, 6449), True, 'import numpy as np\n'), ((1198, 1225), 'tvm.testing.requires_llvm', 'tvm.testing.requires_llvm', ([], {}), '()\n', (1223, 1225), False, 'import tvm\n'), ((1279, 1306), 'tvm.testing.requires_llvm', 'tvm.testing.requires_llvm', ([], {}), '()\n', (1304, 1306), False, 'import tvm\n'), ((4669, 4687), 'tvm.relay.sigmoid', 'relay.sigmoid', (['out'], {}), '(out)\n', (4682, 4687), False, 'from tvm import relay\n'), ((5951, 5969), 'tvm.relay.sigmoid', 'relay.sigmoid', (['out'], {}), '(out)\n', (5964, 5969), False, 'from tvm import relay\n'), ((9798, 9828), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['conv2d'], {}), '(conv2d)\n', (9820, 9828), False, 'import tvm\n'), ((2563, 2626), 'tvm.relay.create_executor', 'relay.create_executor', (['mode'], {'mod': 'mod', 'device': 'dev', 'target': 'target'}), '(mode, mod=mod, device=dev, target=target)\n', (2584, 2626), False, 'from tvm import relay\n'), ((8143, 8153), 'tvm.cpu', 'tvm.cpu', (['(0)'], {}), '(0)\n', (8150, 8153), False, 'import tvm\n')] |
import numpy as np
import scipy.stats as stats
from tbainfo import tbarequests
from sim_team import SimTeam
from match_score import Match, TeamScore, AllianceScore
import globals
CARGO_PT = 3
PANEL_PT = 2
AUTO1 = 3
AUTO2 = 6
CLIMB1 = 3
CLIMB2 = 6
CLIMB3 = 12
# returns a normal distribution truncated at the specified min and max
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
return stats.truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
# returns the mean value of randomly chosen points from a distribution
def get_dist(data, num_points):
mu, sigma, max, min = np.mean(data), np.std(data), float(np.max(data)), float(np.min(data))
if mu == 0:
return 0
if max == min:
return mu
s = get_truncated_normal(mean=mu, sd=sigma, low=min, upp=max).rvs(num_points)
return list(s)
# generate objects for each given team and populate with distributed data points
def create_team_objs(alliance, db):
objs = []
for team in alliance:
team_id = db.get_team_id(team)
matches_team_ids = db.get_matches_team_id(team_id, db.get_competition_id(globals.competition), globals.match_cutoff)
team_obj = SimTeam(team_id, team, matches_team_ids)
team_obj.cargo = get_dist(db.get_metric(matches_team_ids, "'Cargo'", 'false'), 100000)
team_obj.panel = get_dist(db.get_metric(matches_team_ids, "'Panel'", 'false'), 100000)
team_obj.cargo_auto = get_dist(db.get_metric(matches_team_ids, "'Cargo'", 'true'), 100000)
team_obj.panel_auto = get_dist(db.get_metric(matches_team_ids, "'Panel'", 'true'), 100000)
team_obj.populate_endgame_auto(db.get_status(matches_team_ids, 'endgame'), db.get_status(matches_team_ids, 'auto'))
objs.append(team_obj)
return objs
def sort_dict(dict, cutoff):
sorted_x = sorted(dict.items(), key=lambda kv: kv[1])
sorted_x = [x[0] for x in sorted_x if x[1] > cutoff]
return sorted_x
# returns predicted endgame points
def get_endgame(team_objs):
L1 = {}
L2 = {}
L3 = {}
statuses = {}
for team in team_objs:
statuses[team.tba_id] = ''
L1[team.tba_id] = team.endgame_status.get('L1')
L2[team.tba_id] = team.endgame_status.get('L2')
L3[team.tba_id] = team.endgame_status.get('L3')
L1 = sort_dict(L1, 0.1)
L2 = sort_dict(L2, 0.15)
L3 = sort_dict(L3, 0.2)
for id in L1:
statuses[id] = 'L1'
for i, id in enumerate(L2):
if i > 1:
break
statuses[id] = 'L2'
for i, id in enumerate(L3):
if i > 0:
break
statuses[id] = 'L3'
return statuses
# returns predicted auto points for hab line crossing only
def get_auto(team_objs):
L1 = {}
L2 = {}
statuses = {}
for team in team_objs:
statuses[team.tba_id] = ''
L1[team.tba_id] = team.auto_status.get('L1')
L2[team.tba_id] = team.auto_status.get('L2')
L1 = sort_dict(L1, 0.1)
L2 = sort_dict(L2, 0.15)
for id in L1:
statuses[id] = 'L1'
for i, id in enumerate(L2):
if i > 1:
break
statuses[id] = 'L2'
return statuses
def run_sim(db, match_id=-1, alliances=-1):
globals.init()
tba = tbarequests('jQusM2aYtJLHX<KEY>CWE')
if match_id != -1:
alliances = tba.get_match_teams(str(match_id))
else:
pass
predicted_score = []
for alliance in alliances:
team_objs = create_team_objs(alliance, db)
team_scores = []
endgame = get_endgame(team_objs)
auto = get_auto(team_objs)
for team in team_objs:
# add a teams' predicted score contribution to the overall alliance score
cargo = np.mean(team.cargo)
panel = np.mean(team.panel)
cargo_auto = np.mean(team.cargo_auto)
panel_auto = np.mean(team.panel_auto)
teamscore = TeamScore(team.tba_id, cargo, panel, cargo_auto, panel_auto, auto.get(team.tba_id), endgame.get(team.tba_id))
team_scores.append(teamscore)
alliancescore = AllianceScore(team_scores[0], team_scores[1], team_scores[2])
predicted_score.append(alliancescore)
match = Match(predicted_score[0], predicted_score[1], match_id)
return match
| [
"numpy.mean",
"sim_team.SimTeam",
"globals.init",
"numpy.std",
"numpy.min",
"numpy.max",
"match_score.Match",
"scipy.stats.truncnorm",
"tbainfo.tbarequests",
"match_score.AllianceScore"
] | [((399, 472), 'scipy.stats.truncnorm', 'stats.truncnorm', (['((low - mean) / sd)', '((upp - mean) / sd)'], {'loc': 'mean', 'scale': 'sd'}), '((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)\n', (414, 472), True, 'import scipy.stats as stats\n'), ((3230, 3244), 'globals.init', 'globals.init', ([], {}), '()\n', (3242, 3244), False, 'import globals\n'), ((3255, 3291), 'tbainfo.tbarequests', 'tbarequests', (['"""jQusM2aYtJLHX<KEY>CWE"""'], {}), "('jQusM2aYtJLHX<KEY>CWE')\n", (3266, 3291), False, 'from tbainfo import tbarequests\n'), ((4229, 4284), 'match_score.Match', 'Match', (['predicted_score[0]', 'predicted_score[1]', 'match_id'], {}), '(predicted_score[0], predicted_score[1], match_id)\n', (4234, 4284), False, 'from match_score import Match, TeamScore, AllianceScore\n'), ((604, 617), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (611, 617), True, 'import numpy as np\n'), ((619, 631), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (625, 631), True, 'import numpy as np\n'), ((1189, 1229), 'sim_team.SimTeam', 'SimTeam', (['team_id', 'team', 'matches_team_ids'], {}), '(team_id, team, matches_team_ids)\n', (1196, 1229), False, 'from sim_team import SimTeam\n'), ((4107, 4168), 'match_score.AllianceScore', 'AllianceScore', (['team_scores[0]', 'team_scores[1]', 'team_scores[2]'], {}), '(team_scores[0], team_scores[1], team_scores[2])\n', (4120, 4168), False, 'from match_score import Match, TeamScore, AllianceScore\n'), ((639, 651), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (645, 651), True, 'import numpy as np\n'), ((660, 672), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (666, 672), True, 'import numpy as np\n'), ((3743, 3762), 'numpy.mean', 'np.mean', (['team.cargo'], {}), '(team.cargo)\n', (3750, 3762), True, 'import numpy as np\n'), ((3783, 3802), 'numpy.mean', 'np.mean', (['team.panel'], {}), '(team.panel)\n', (3790, 3802), True, 'import numpy as np\n'), ((3829, 3853), 'numpy.mean', 'np.mean', (['team.cargo_auto'], {}), '(team.cargo_auto)\n', (3836, 3853), True, 'import numpy as np\n'), ((3879, 3903), 'numpy.mean', 'np.mean', (['team.panel_auto'], {}), '(team.panel_auto)\n', (3886, 3903), True, 'import numpy as np\n')] |
# pylint: disable=no-self-use,invalid-name
import random
from os.path import join
import numpy
from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader
from deep_qa.testing.test_case import DeepQaTestCase
from overrides import overrides
class TestSquadSentenceSelectionReader(DeepQaTestCase):
@overrides
def setUp(self):
super(TestSquadSentenceSelectionReader, self).setUp()
# write a SQuAD json file.
# pylint: disable=bad-continuation
self.sentences = [
"Architecturally, the school has a Catholic character.",
"Atop the Main Building's gold dome is a golden statue of the Virgin Mary.",
"Immediately in front of the Main Building and facing it, is a copper statue of "
"Christ with arms upraised with the legend \\\"Venite Ad Me Omnes\\\".",
"Next to the Main Building is the Basilica of the Sacred Heart.",
"Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection.",
"It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly "
"appeared to Saint Bernadette Soubirous in 1858.",
"At the end of the main drive (and in a direct line that connects through 3 "
"statues and the Gold Dome), is a simple, modern stone statue of Mary.",
"This is another sentence.",
"And another one.",
"Yet another sentence 1.",
"Yet another sentence 2.",
"Yet another sentence 3.",
"Yet another sentence 4.",
"Yet another sentence 5.",
]
# pylint: enable=bad-continuation
self.passage1 = " ".join(self.sentences[:7])
self.passage2 = " ".join(self.sentences[7:])
self.question0 = "To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?"
self.question1 = "What is in front of the Notre Dame Main Building?"
self.questions = [self.question0, self.question1]
json_string = """
{
"data":[
{
"title":"University_of_Notre_Dame",
"paragraphs":[
{
"context":"%s",
"qas":[
{
"answers":[
{
"answer_start":515,
"text":"Saint Bernadette Soubirous"
}
],
"question":"%s",
"id":"5733be284776f41900661182"
},
{
"answers":[
{
"answer_start":188,
"text":"a copper statue of Christ"
}
],
"question":"%s",
"id":"5733be284776f4190066117f"
}
]
},
{
"context":"%s",
"qas":[ ]
}
]
}
]
}
""" % (self.passage1, self.question0, self.question1, self.passage2)
with open(self.TEST_DIR + "squad_data.json", "w") as f:
f.write(json_string)
random.seed(1337)
numpy.random.seed(1337)
def test_reader_should_shuffle_consistently_with_the_same_seed(self):
random.seed(1337)
numpy.random.seed(1337)
reader = SquadSentenceSelectionReader()
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
random.seed(1337)
numpy.random.seed(1337)
reader = SquadSentenceSelectionReader()
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines2 = []
for line in generated_file:
lines2.append(line.strip())
assert lines == lines2
def test_default_squad_sentence_selection_reader(self):
# Note that the ordering of these sentences depends on a a particular shuffling of the
# data (and thus the random seed set above), and could change if you change the number of
# shuffles done in the code. Sorry.
context0 = "###".join(self.sentences[i] for i in [2, 4, 1, 3, 0, 5, 6]).replace("\\\"", "\"")
index0 = "5"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join(self.sentences[i] for i in [0, 3, 4, 6, 2, 1, 5]).replace("\\\"", "\"")
index1 = "4"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader()
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
def test_negative_sentence_choices_all_work(self):
# We're going to make sure that the other negative sentence selection methods don't crash
# here; that's about it.
# Note that the ordering of these sentences depends on a a particular shuffling of the
# data (and thus the random seed set above), and could change if you change the number of
# shuffles done in the code.
context0 = "###".join(self.sentences[i] for i in [3, 4, 0, 13, 5, 9]).replace("\\\"", "\"")
index0 = "4"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join(self.sentences[i] for i in [4, 1, 9, 2, 7, 12]).replace("\\\"", "\"")
index1 = "3"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader(negative_sentence_selection="random-2,pad-to-5")
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
def test_negative_question_choice_works(self):
# We're going to make sure that the other negative sentence selection methods don't crash
# here; that's about it.
context0 = "###".join([self.question0, self.sentences[5]]).replace("\\\"", "\"")
index0 = "1"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join([self.sentences[2], self.question1]).replace("\\\"", "\"")
index1 = "0"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader(negative_sentence_selection="question")
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
def test_negative_random_question_choice_works(self):
# We're going to make sure that the other negative sentence selection methods don't crash
# here; that's about it.
context0 = "###".join([self.question0, self.question1, self.sentences[5]]).replace("\\\"", "\"")
index0 = "2"
expected_line0 = self.question0 + "\t" + context0 + "\t" + index0
context1 = "###".join([self.question1, self.question0, self.sentences[2]]).replace("\\\"", "\"")
index1 = "2"
expected_line1 = self.question1 + "\t" + context1 + "\t" + index1
reader = SquadSentenceSelectionReader(negative_sentence_selection="questions-random-2")
output_filepath = reader.read_file(join(self.TEST_DIR, "squad_data.json"))
with open(output_filepath, "r") as generated_file:
lines = []
for line in generated_file:
lines.append(line.strip())
assert expected_line0 == lines[0]
assert expected_line1 == lines[1]
| [
"deep_qa.data.dataset_readers.squad_sentence_selection_reader.SquadSentenceSelectionReader",
"os.path.join",
"numpy.random.seed",
"random.seed"
] | [((3453, 3470), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (3464, 3470), False, 'import random\n'), ((3479, 3502), 'numpy.random.seed', 'numpy.random.seed', (['(1337)'], {}), '(1337)\n', (3496, 3502), False, 'import numpy\n'), ((3586, 3603), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (3597, 3603), False, 'import random\n'), ((3612, 3635), 'numpy.random.seed', 'numpy.random.seed', (['(1337)'], {}), '(1337)\n', (3629, 3635), False, 'import numpy\n'), ((3653, 3683), 'deep_qa.data.dataset_readers.squad_sentence_selection_reader.SquadSentenceSelectionReader', 'SquadSentenceSelectionReader', ([], {}), '()\n', (3681, 3683), False, 'from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader\n'), ((3940, 3957), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (3951, 3957), False, 'import random\n'), ((3966, 3989), 'numpy.random.seed', 'numpy.random.seed', (['(1337)'], {}), '(1337)\n', (3983, 3989), False, 'import numpy\n'), ((4007, 4037), 'deep_qa.data.dataset_readers.squad_sentence_selection_reader.SquadSentenceSelectionReader', 'SquadSentenceSelectionReader', ([], {}), '()\n', (4035, 4037), False, 'from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader\n'), ((5031, 5061), 'deep_qa.data.dataset_readers.squad_sentence_selection_reader.SquadSentenceSelectionReader', 'SquadSentenceSelectionReader', ([], {}), '()\n', (5059, 5061), False, 'from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader\n'), ((6220, 6297), 'deep_qa.data.dataset_readers.squad_sentence_selection_reader.SquadSentenceSelectionReader', 'SquadSentenceSelectionReader', ([], {'negative_sentence_selection': '"""random-2,pad-to-5"""'}), "(negative_sentence_selection='random-2,pad-to-5')\n", (6248, 6297), False, 'from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader\n'), ((7200, 7268), 'deep_qa.data.dataset_readers.squad_sentence_selection_reader.SquadSentenceSelectionReader', 'SquadSentenceSelectionReader', ([], {'negative_sentence_selection': '"""question"""'}), "(negative_sentence_selection='question')\n", (7228, 7268), False, 'from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader\n'), ((8210, 8288), 'deep_qa.data.dataset_readers.squad_sentence_selection_reader.SquadSentenceSelectionReader', 'SquadSentenceSelectionReader', ([], {'negative_sentence_selection': '"""questions-random-2"""'}), "(negative_sentence_selection='questions-random-2')\n", (8238, 8288), False, 'from deep_qa.data.dataset_readers.squad_sentence_selection_reader import SquadSentenceSelectionReader\n'), ((3727, 3765), 'os.path.join', 'join', (['self.TEST_DIR', '"""squad_data.json"""'], {}), "(self.TEST_DIR, 'squad_data.json')\n", (3731, 3765), False, 'from os.path import join\n'), ((4081, 4119), 'os.path.join', 'join', (['self.TEST_DIR', '"""squad_data.json"""'], {}), "(self.TEST_DIR, 'squad_data.json')\n", (4085, 4119), False, 'from os.path import join\n'), ((5105, 5143), 'os.path.join', 'join', (['self.TEST_DIR', '"""squad_data.json"""'], {}), "(self.TEST_DIR, 'squad_data.json')\n", (5109, 5143), False, 'from os.path import join\n'), ((6341, 6379), 'os.path.join', 'join', (['self.TEST_DIR', '"""squad_data.json"""'], {}), "(self.TEST_DIR, 'squad_data.json')\n", (6345, 6379), False, 'from os.path import join\n'), ((7312, 7350), 'os.path.join', 'join', (['self.TEST_DIR', '"""squad_data.json"""'], {}), "(self.TEST_DIR, 'squad_data.json')\n", (7316, 7350), False, 'from os.path import join\n'), ((8332, 8370), 'os.path.join', 'join', (['self.TEST_DIR', '"""squad_data.json"""'], {}), "(self.TEST_DIR, 'squad_data.json')\n", (8336, 8370), False, 'from os.path import join\n')] |
import gym
# 生成仿真环境
env = gym.make('Taxi-v3')
# 重置仿真环境
obs = env.reset()
# 渲染环境当前状态
#env.render()
m = env.observation_space.n # size of the state space
n = env.action_space.n # size of action space
print(m,n)
print("出租车问题状态数量为{:d},动作数量为{:d}。".format(m, n))
import numpy as np
# Intialize the Q-table and hyperparameters
# Q表,大小为 m*n
Q = np.zeros([m,n])
Q2=np.zeros([m,n])
# 回报的折扣率
gamma = 0.97
# 分幕式训练中最大幕数
max_episode = 1000
# 每一幕最长步数
max_steps = 1000
# 学习率参数
alpha = 0.7
# 随机探索概率
epsilon = 0
for i in range(max_episode):
# Start with new environment
s = env.reset()
done = False
counter = 0
for _ in range(max_steps):
# Choose an action using epsilon greedy policy
p = np.random.rand()
if p>epsilon or np.any(Q[s,:])==False:
a = env.action_space.sample()
else:
a = np.argmax(Q[s, :])
# 请根据 epsilon-贪婪算法 选择动作 a
# p > epsilon 或尚未学习到某个状态的价值时,随机探索
# 其它情况,利用已经觉得的价值函数进行贪婪选择 (np.argmax)
# ======= 将代码补充到这里
# ======= 补充代码结束
#env.step(a) //根据所选动作action执行一步
# 返回新的状态、回报、以及是否完成
s_new, r, done, _ = env.step(a)
#r是执行a后得到的奖励,s是执行之前的状态
# 请根据贝尔曼方程,更新Q表 (np.max)
# ======= 将代码补充到这里
Q[s,a] =(1-alpha)*Q[s,a]+alpha*(r+gamma*np.max(Q[s_new,:]))
# ======= 补充代码结束
# print(Q[s,a],r)
s = s_new
if done:
break
print(Q)
s = env.reset()
done = False
env.render()
#Test the learned Agent
for i in range(max_steps):
a = np.argmax(Q[s,:])
s, _, done, _ = env.step(a)
#env.render()
if done:
break
rewards = []# ======= 将代码补充到这里
rewards2=[]
for _ in range(100):
s = env.reset()
done = False
#env.render()
rprestep = []
#Test the learned Agent
for i in range(max_steps):
a = np.argmax(Q[s, :])
s, reward0, done, _ = env.step(a)
rprestep.append(reward0)
env.render()
if done:
break
print('----------- ')
rewards.append(np.sum(rprestep))
r_mean = np.mean(rewards)
r_var = np.var(rewards)
print(rewards)
# # ======= 补充代码结束
print("平均回报为{},回报的方差为{}。".format(r_mean, r_var))
env.close()
| [
"numpy.mean",
"numpy.random.rand",
"numpy.argmax",
"numpy.any",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"gym.make",
"numpy.var"
] | [((30, 49), 'gym.make', 'gym.make', (['"""Taxi-v3"""'], {}), "('Taxi-v3')\n", (38, 49), False, 'import gym\n'), ((357, 373), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (365, 373), True, 'import numpy as np\n'), ((377, 393), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (385, 393), True, 'import numpy as np\n'), ((2149, 2165), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (2156, 2165), True, 'import numpy as np\n'), ((2175, 2190), 'numpy.var', 'np.var', (['rewards'], {}), '(rewards)\n', (2181, 2190), True, 'import numpy as np\n'), ((1598, 1616), 'numpy.argmax', 'np.argmax', (['Q[s, :]'], {}), '(Q[s, :])\n', (1607, 1616), True, 'import numpy as np\n'), ((748, 764), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (762, 764), True, 'import numpy as np\n'), ((1919, 1937), 'numpy.argmax', 'np.argmax', (['Q[s, :]'], {}), '(Q[s, :])\n', (1928, 1937), True, 'import numpy as np\n'), ((2121, 2137), 'numpy.sum', 'np.sum', (['rprestep'], {}), '(rprestep)\n', (2127, 2137), True, 'import numpy as np\n'), ((888, 906), 'numpy.argmax', 'np.argmax', (['Q[s, :]'], {}), '(Q[s, :])\n', (897, 906), True, 'import numpy as np\n'), ((790, 805), 'numpy.any', 'np.any', (['Q[s, :]'], {}), '(Q[s, :])\n', (796, 805), True, 'import numpy as np\n'), ((1346, 1365), 'numpy.max', 'np.max', (['Q[s_new, :]'], {}), '(Q[s_new, :])\n', (1352, 1365), True, 'import numpy as np\n')] |
"""
The `methods` script contains functions for estimating the period of a star.
"""
import lightkurve as lk
import astropy.units as u
import numpy as np
from scipy.signal import find_peaks
from scipy import interpolate
from scipy.optimize import curve_fit
from scipy.ndimage import gaussian_filter1d
import warnings
import jazzhands
from .utils import _gaussian_fn, _safety
def simple_astropy_lombscargle(j, sector, period_range):
"""
Following some criteria from Feinstein+2020 [1-3] and Nielsen+2013 [4]
1) Period must be less than 12 days
- Set maximum LombScargle period to 12 days
2) The FWHM of the Gaussian fit to the peak power must be < 40% peak period
3) Secondary peak must be 10% weaker than the primary peak
4) Peak must be at least 4x above the time-series RMS noise, where
$\sigma_{\textrm PS} = 4\sigma^2_{\textrm RMS} / N$
where $N$ is the number of data points in the light curve.
We calculate a periodogram using Lightkurve. We set the maximum period to 12 (to comply with
condition (1). We set the normalization to 'psd' to reduce the dynamic range, making the
data easier to fit. We set an oversample factor of 100 to improve the smoothness of the fit.
We fit using Scipy.Optimize.curve_fit. This is a bit of a hack-- really a PSD like this should
be fit using a Gamma function. However for a clear signal such as this that would be overkill.
Curve_fit will do the job perfectly well.
Parameters
----------
j: class
The `janet` class containing the metadata on our star.
sector: int
The sector for which to calculate the simple astropy lombscargle period.
If 'all', calculates for all sectors stitched together.
"""
if j.verbose:
print(f'### Running Simple Astropy Lomb-Scargle on Sector {sector} on star {j.gaiaid} ###')
# Call the relevant light curve
clc = j.void[f'clc_{sector}']
pg = clc.to_periodogram(minimum_period = period_range[0], maximum_period = period_range[1], normalization='psd', oversample_factor=100,
freq_unit = 1/u.day)
# Select the region around the highest peak
max_period = pg.period_at_max_power.value
max_power = pg.max_power.value
s = (pg.period.value > 0.6*max_period) & (pg.period.value < 1.4*max_period)
p = pg[s].period.value
P = pg[s].power.value
# Store the periodogram for plotting
j.void[f'pg_{sector}'] = pg
j.void[f'p_{sector}'] = p
j.void[f'P_{sector}'] = P
# Fit a Gaussian
## Params are mu, sigma, Amplitude
lolim = 0.8*max_period
if lolim < period_range[0]:
lolim = period_range[0]
uplim = 1.2*max_period
if uplim > period_range[1]:
uplim = period_range[1]
popt, pcov = curve_fit(_gaussian_fn, p, P, p0 = [max_period, 0.1*max_period, max_power],
bounds = ([lolim, 0., 0.9*max_power],[uplim, 0.25*max_period, 1.1*max_power]))
j.results.loc[sector, 'SLS'] = popt[0]
j.results.loc[sector, 'e_SLS'] = popt[1]
j.results.loc[sector, 'h_SLS'] = popt[2]
j.results.loc[sector, 'f_SLS'] = 0
# Perform quality checks
## Condition (2)
if popt[1]*2.355 > 0.4*popt[0]:
j.results.loc[sector, 'f_SLS'] += 2
## Condition (3)
peaks, _ = find_peaks(pg.power.value, height = 0.9*max_power)
if len(peaks) > 1:
j.results.loc[sector, 'f_SLS'] += 3
# Double check if the presence of a second peak has upset the fits
# If so, repeat the fit in a smaller range
peaks, _ = find_peaks(P, height=0.9*max_power)
if len(peaks) > 1:
s = (pg.period.value > 0.8*max_period) & (pg.period.value < 1.2*max_period)
popt, pcov = curve_fit(_gaussian_fn, pg[s].period.value, pg[s].power.value,
p0 = [max_period, 0.2*max_period, max_power],
bounds = ([lolim, 0., 0.9*max_power],[uplim, 0.25*max_period, 1.1*max_power]))
j.results.loc[sector, 'SLS'] = popt[0]
j.results.loc[sector, 'e_SLS'] = popt[1]
j.results.loc[sector, 'h_SLS'] = popt[2]
## Condition (4)
sig_rms = np.sqrt(np.mean((clc.flux.value - 1)**2))
sig_ps = 4 * sig_rms**2 / len(clc)
if popt[2] < 4 * sig_ps:
j.results.loc[sector, 'f_SLS'] += 4
# Save the gaussian fit
j.void[f'popt_{sector}'] = popt
if j.verbose:
print(f'### Completed Simple Astropy Lomb-Scargle for Sector {sector} on star {j.gaiaid} ###')
_safety(j)
def _calculate_wavelet(clc, period_range, sector, j):
t = clc.time.value
f = clc.flux.value
wt = jazzhands.WaveletTransformer(t, f)
_, _, wwz, wwa = wt.auto_compute(nu_min = 1./period_range[1], nu_max = 1./period_range[0])
j.void[f'{sector}_wt'] = wt
j.void[f'{sector}_wwz'] = wwz
j.void[f'{sector}_wwa'] = wwa
# Create data to fit
w = np.sum(wwz, axis=1)
w /= w.max() #Normalize
p = 1/wt.nus
max_w = np.max(w)
max_p = p[np.argmax(w)]
s = (p > 0.6*max_p) & (p < 1.4*max_p)
w = w[s]
p = p[s]
# Fit a Gaussian
## Params are mu, sigma, Amplitude
lolim = 0.8*max_p
uplim = 1.2*max_p
# If the max period somehow lies outside the period range, don't adjust
# the period limits
if (max_p > period_range[0]) & (max_p < period_range[1]):
if lolim < period_range[0]:
lolim = period_range[0]
if uplim > period_range[1]:
uplim = period_range[1]
popt, pcov = curve_fit(_gaussian_fn, p, w, p0 = [max_p, 0.1*max_p, max_w],
bounds = ([lolim, 0., 0.9*max_w],[uplim, 0.25*max_p, 1.1*max_w]))
return popt, pcov
def simple_wavelet(j, sector, period_range):
"""
We use the 'jazzhands' Python package to perform our wavelet analysis.
The `jazzhands` package performs a wavelet analysis based on the procedures
set out in Foster (1996) and Torrence & Compo (1998).
#TO DO: needs an expansion of the WWZ, what does it mean?
After the wavelet is calculated, we collapse it along the x-axis. We fit the
largest peak in the resulting spectrum using Scipy.Optimize.curve_fit. The
resulting mean and width of the Gaussian function approximating the peak is
reported as the period and associated uncertainty.
Parameters
----------
j: class
The `janet` class containing the metadata on our star.
sector: int
The sector for which to calculate the simple astropy lombscargle period.
If 'all', calculates for all sectors stitched together.
period_range: tuple
The lower and upper limit on period range to search for a rotational
signal. Default is (0.2, 12.) based on the McQuillan et al. (2014)
search range and the limitations of TESS earthshine.
"""
if j.verbose:
print(f'### Running Wavelet Estimation for Sector {sector} on star {j.gaiaid} ###')
# Call the relevant light curve
clc = j.void[f'clc_{sector}']
popt, pcov = _calculate_wavelet(clc, period_range, sector, j)
j.results.loc[sector, 'SW'] = popt[0]
j.results.loc[sector, 'e_SW'] = popt[1]
j.results.loc[sector, 'h_SW'] = popt[2]
# Save the gaussian fit
j.void[f'{sector}_wavelet_popt'] = popt
if j.verbose:
print(f'### Completed Wavelet Estimation for Sector {sector} on star {j.gaiaid} ###')
_safety(j)
def composite_ACF(j, sector, period_range):
"""
For the composite ACF (CACF) estimator, we follow the guidelines presented in
Ceiller et al. (2016, 2017) and Santos et al. (2020, 2021), amongst others.
The CACF is the product between a normalised collapsed wavelet spectrum and
the normalised simple ACF.
We fit the first peak in the resulting spectrum using Scipy.Optimize.curve_fit.
The resulting mean and width of the Gaussian function approximating the peak is
reported as the period and associated uncertainty.
If no peaks are found, no value is reported and a flag is raised when
validating the rotation periods.
Parameters
----------
j: class
The `janet` class containing the metadata on our star.
sector: int
The sector for which to calculate the simple astropy lombscargle period.
If 'all', calculates for all sectors stitched together.
period_range: tuple
The lower and upper limit on period range to search for a rotational
signal. Default is (0.2, 13.7) based on the McQuillan et al. (2014)
search range and the limitations of TESS earthshine.
"""
if j.verbose:
print(f'### Running Composite ACF estimation for Sector {sector} on star {j.gaiaid} ###')
# Extract the wavelet information
w = np.flip(np.sum(j.void[f'{sector}_wwz'], axis=1))
x = np.flip(1./j.void[f'{sector}_wt'].nus)
f = interpolate.interp1d(x, w)
# Calculate the ACF for the relevant sector
lc = j.void[f'clc_{sector}']
acf = np.correlate(lc.flux.value-1, lc.flux.value-1, mode='full')[len(lc)-1:]
lag = lc.time.value - lc.time.value.min()
norm_acf = acf/np.nanmax(acf)
acflc = lk.LightCurve(time=lag, flux=norm_acf)
vizacf = acflc[(acflc.time.value <= period_range[1])]
vizacf = vizacf[(vizacf.time.value >= period_range[0])]
# Calculate the composite ACF by interoplating the wavelet onto a new x axis
xnew = vizacf.time.value
wnew = f(xnew)
cacf = vizacf * (wnew/np.nanmax(wnew))
# Smooth the CACF
sd = 2.
cacfsmoo = gaussian_filter1d(cacf.flux.value, sigma = sd, mode='nearest')
# Identify the first 10 maxima above a threshold of 0.01
cpeaks = find_peaks(cacfsmoo, height = 0.01, distance=10)
# No peaks found
if len(cpeaks[0]) == 0:
j.results.loc[sector, 'CACF'] = np.nan
j.results.loc[sector, 'e_CACF'] = np.nan
j.results.loc[sector, 'h_CACF'] = np.nan
j.void[f'{sector}_cacf_popt'] = np.nan
else:
peak = cpeaks[0][np.argmax(cpeaks[1]['peak_heights'])]
Px = cacf[peak]['time'].value
Py = cacfsmoo[peak]
lolim = 0.8*Px
if lolim < period_range[0]:
lolim = period_range[0]
uplim = 1.2*Px
if uplim > period_range[1]:
uplim = period_range[1]
popt, pcov = curve_fit(_gaussian_fn, cacf.time.value, cacfsmoo,
p0 = [Px, 0.1*Px, Py],
bounds = ([lolim, 0., 0.9*Py],
[uplim, 0.25*Px, 1.1*Py]))
j.results.loc[sector, 'CACF'] = popt[0]
j.results.loc[sector, 'e_CACF'] = popt[1]
j.results.loc[sector, 'h_CACF'] = popt[2]
j.void[f'{sector}_cacf_popt'] = popt
# Save the metadata
j.void[f'{sector}_vizacf'] = vizacf
j.void[f'{sector}_cacf'] = cacf
j.void[f'{sector}_cacfsmoo'] = cacfsmoo
j.void[f'{sector}_cpeaks'] = cpeaks[0]
if j.verbose:
print(f'### Completed Composite ACF estimation for Sector {sector} on star {j.gaiaid} ###')
_safety(j)
def simple_ACF(j, period_range):
"""
For the ACF estimator, we follow the guidelines presented in Garcia et al.
(2014), which builds upon the work by McQuillan et al. (2013a, b). There is
no easy way to reliably estimate an uncertainty for the ACF, so instead we
will use it as a check on the SLS and WS period estimates.
First, we take the autocorrelation of the time series, and shifting the
time series over itself. We then take a periodogram of the ACF, and use the
period of the peak of highest power as the first-guess ACF period.
The ACF is then smoothed by convolving with a Gaussian Kernel with a
standard deviation of 0.1x the first-guess ACF period. We use a peak-
finding algorithm to identify any peaks in the smoothed spectrum above
an arbitrary threshold of 0.05.
If peaks are found, the first (lowest period) peak is used as the ACF period.
If no peaks are found, no value is reported and a flag is raised when
validating the rotation periods.
Parameters
----------
j: class
The `janet` class containing the metadata on our star.
sector: int
The sector for which to calculate the simple astropy lombscargle period.
If 'all', calculates for all sectors stitched together.
period_range: tuple
The lower and upper limit on period range to search for a rotational
signal. Default is (0.2, 13.7) based on the McQuillan et al. (2014)
search range and the limitations of TESS earthshine.
"""
if j.verbose:
print(f'### Running ACF Estimation on star {j.gaiaid} ###')
clc = j.void['clc_all']
# Calculate the ACF between 0 and 12 days.
acf = np.correlate(clc.flux.value-1, clc.flux.value-1, mode='full')[len(clc)-1:]
lag = clc.time.value - np.nanmin(clc.time.value)
# Cut up and normalize the ACF
secmin = j.sectors[0]
norm_acf = acf/np.nanmax(acf)
acflc = lk.LightCurve(time=lag, flux=norm_acf)
acflc = acflc[acflc.time.value < (j.void[f'clc_{secmin}'].time.value - j.void[f'clc_{secmin}'].time.value.min()).max()]
# Estimate a first-guess period
acfpg = acflc.to_periodogram()
first_guess = acfpg.period_at_max_power
# Limit the search range
if not period_range[0] < first_guess.value < period_range[1]:
warnings.warn("The highest peak in the ACF lies outside the period range of your search.")
vizacf = acflc[(acflc.time.value <= period_range[1])]
vizacf = vizacf[(vizacf.time.value >= period_range[0])]
# Smooth the ACF
sd = 2.
acfsmoo = gaussian_filter1d(vizacf.flux.value, sigma = sd, mode='nearest')
# Identify the first 10 maxima above a threshold of 0.01
peaks, _ = find_peaks(acfsmoo, height = 0.01)
# Save the metadata
j.void['acflc'] = acflc
j.void['vizacf'] = vizacf
j.void['acfsmoo'] = acfsmoo
j.void['peaks'] = peaks
# The first of these maxima (with the shortest period) corresponds to Prot
if len(peaks) >= 1:
acf_period = vizacf.time.value[peaks[0]]
j.results.loc['all', 'ACF'] = acf_period
# No peaks found
else:
j.results.loc['all', 'ACF'] = np.nan
if j.verbose:
print(f'### Completed ACF Estimation on star {j.gaiaid} ###')
_safety(j)
| [
"scipy.optimize.curve_fit",
"jazzhands.WaveletTransformer",
"numpy.flip",
"numpy.mean",
"numpy.argmax",
"numpy.max",
"scipy.interpolate.interp1d",
"numpy.sum",
"lightkurve.LightCurve",
"numpy.correlate",
"numpy.nanmax",
"scipy.signal.find_peaks",
"numpy.nanmin",
"scipy.ndimage.gaussian_fil... | [((2827, 2995), 'scipy.optimize.curve_fit', 'curve_fit', (['_gaussian_fn', 'p', 'P'], {'p0': '[max_period, 0.1 * max_period, max_power]', 'bounds': '([lolim, 0.0, 0.9 * max_power], [uplim, 0.25 * max_period, 1.1 * max_power])'}), '(_gaussian_fn, p, P, p0=[max_period, 0.1 * max_period, max_power],\n bounds=([lolim, 0.0, 0.9 * max_power], [uplim, 0.25 * max_period, 1.1 *\n max_power]))\n', (2836, 2995), False, 'from scipy.optimize import curve_fit\n'), ((3351, 3401), 'scipy.signal.find_peaks', 'find_peaks', (['pg.power.value'], {'height': '(0.9 * max_power)'}), '(pg.power.value, height=0.9 * max_power)\n', (3361, 3401), False, 'from scipy.signal import find_peaks\n'), ((4714, 4748), 'jazzhands.WaveletTransformer', 'jazzhands.WaveletTransformer', (['t', 'f'], {}), '(t, f)\n', (4742, 4748), False, 'import jazzhands\n'), ((4979, 4998), 'numpy.sum', 'np.sum', (['wwz'], {'axis': '(1)'}), '(wwz, axis=1)\n', (4985, 4998), True, 'import numpy as np\n'), ((5057, 5066), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (5063, 5066), True, 'import numpy as np\n'), ((5595, 5733), 'scipy.optimize.curve_fit', 'curve_fit', (['_gaussian_fn', 'p', 'w'], {'p0': '[max_p, 0.1 * max_p, max_w]', 'bounds': '([lolim, 0.0, 0.9 * max_w], [uplim, 0.25 * max_p, 1.1 * max_w])'}), '(_gaussian_fn, p, w, p0=[max_p, 0.1 * max_p, max_w], bounds=([\n lolim, 0.0, 0.9 * max_w], [uplim, 0.25 * max_p, 1.1 * max_w]))\n', (5604, 5733), False, 'from scipy.optimize import curve_fit\n'), ((8898, 8939), 'numpy.flip', 'np.flip', (["(1.0 / j.void[f'{sector}_wt'].nus)"], {}), "(1.0 / j.void[f'{sector}_wt'].nus)\n", (8905, 8939), True, 'import numpy as np\n'), ((8945, 8971), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'w'], {}), '(x, w)\n', (8965, 8971), False, 'from scipy import interpolate\n'), ((9228, 9266), 'lightkurve.LightCurve', 'lk.LightCurve', ([], {'time': 'lag', 'flux': 'norm_acf'}), '(time=lag, flux=norm_acf)\n', (9241, 9266), True, 'import lightkurve as lk\n'), ((9610, 9670), 'scipy.ndimage.gaussian_filter1d', 'gaussian_filter1d', (['cacf.flux.value'], {'sigma': 'sd', 'mode': '"""nearest"""'}), "(cacf.flux.value, sigma=sd, mode='nearest')\n", (9627, 9670), False, 'from scipy.ndimage import gaussian_filter1d\n'), ((9749, 9795), 'scipy.signal.find_peaks', 'find_peaks', (['cacfsmoo'], {'height': '(0.01)', 'distance': '(10)'}), '(cacfsmoo, height=0.01, distance=10)\n', (9759, 9795), False, 'from scipy.signal import find_peaks\n'), ((13097, 13135), 'lightkurve.LightCurve', 'lk.LightCurve', ([], {'time': 'lag', 'flux': 'norm_acf'}), '(time=lag, flux=norm_acf)\n', (13110, 13135), True, 'import lightkurve as lk\n'), ((13738, 13800), 'scipy.ndimage.gaussian_filter1d', 'gaussian_filter1d', (['vizacf.flux.value'], {'sigma': 'sd', 'mode': '"""nearest"""'}), "(vizacf.flux.value, sigma=sd, mode='nearest')\n", (13755, 13800), False, 'from scipy.ndimage import gaussian_filter1d\n'), ((13880, 13912), 'scipy.signal.find_peaks', 'find_peaks', (['acfsmoo'], {'height': '(0.01)'}), '(acfsmoo, height=0.01)\n', (13890, 13912), False, 'from scipy.signal import find_peaks\n'), ((3615, 3652), 'scipy.signal.find_peaks', 'find_peaks', (['P'], {'height': '(0.9 * max_power)'}), '(P, height=0.9 * max_power)\n', (3625, 3652), False, 'from scipy.signal import find_peaks\n'), ((4255, 4289), 'numpy.mean', 'np.mean', (['((clc.flux.value - 1) ** 2)'], {}), '((clc.flux.value - 1) ** 2)\n', (4262, 4289), True, 'import numpy as np\n'), ((5081, 5093), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (5090, 5093), True, 'import numpy as np\n'), ((8849, 8888), 'numpy.sum', 'np.sum', (["j.void[f'{sector}_wwz']"], {'axis': '(1)'}), "(j.void[f'{sector}_wwz'], axis=1)\n", (8855, 8888), True, 'import numpy as np\n'), ((9064, 9127), 'numpy.correlate', 'np.correlate', (['(lc.flux.value - 1)', '(lc.flux.value - 1)'], {'mode': '"""full"""'}), "(lc.flux.value - 1, lc.flux.value - 1, mode='full')\n", (9076, 9127), True, 'import numpy as np\n'), ((9201, 9215), 'numpy.nanmax', 'np.nanmax', (['acf'], {}), '(acf)\n', (9210, 9215), True, 'import numpy as np\n'), ((10394, 10534), 'scipy.optimize.curve_fit', 'curve_fit', (['_gaussian_fn', 'cacf.time.value', 'cacfsmoo'], {'p0': '[Px, 0.1 * Px, Py]', 'bounds': '([lolim, 0.0, 0.9 * Py], [uplim, 0.25 * Px, 1.1 * Py])'}), '(_gaussian_fn, cacf.time.value, cacfsmoo, p0=[Px, 0.1 * Px, Py],\n bounds=([lolim, 0.0, 0.9 * Py], [uplim, 0.25 * Px, 1.1 * Py]))\n', (10403, 10534), False, 'from scipy.optimize import curve_fit\n'), ((12861, 12926), 'numpy.correlate', 'np.correlate', (['(clc.flux.value - 1)', '(clc.flux.value - 1)'], {'mode': '"""full"""'}), "(clc.flux.value - 1, clc.flux.value - 1, mode='full')\n", (12873, 12926), True, 'import numpy as np\n'), ((12963, 12988), 'numpy.nanmin', 'np.nanmin', (['clc.time.value'], {}), '(clc.time.value)\n', (12972, 12988), True, 'import numpy as np\n'), ((13070, 13084), 'numpy.nanmax', 'np.nanmax', (['acf'], {}), '(acf)\n', (13079, 13084), True, 'import numpy as np\n'), ((13480, 13580), 'warnings.warn', 'warnings.warn', (['"""The highest peak in the ACF lies outside the period range of your search."""'], {}), "(\n 'The highest peak in the ACF lies outside the period range of your search.'\n )\n", (13493, 13580), False, 'import warnings\n'), ((3792, 3994), 'scipy.optimize.curve_fit', 'curve_fit', (['_gaussian_fn', 'pg[s].period.value', 'pg[s].power.value'], {'p0': '[max_period, 0.2 * max_period, max_power]', 'bounds': '([lolim, 0.0, 0.9 * max_power], [uplim, 0.25 * max_period, 1.1 * max_power])'}), '(_gaussian_fn, pg[s].period.value, pg[s].power.value, p0=[\n max_period, 0.2 * max_period, max_power], bounds=([lolim, 0.0, 0.9 *\n max_power], [uplim, 0.25 * max_period, 1.1 * max_power]))\n', (3801, 3994), False, 'from scipy.optimize import curve_fit\n'), ((9542, 9557), 'numpy.nanmax', 'np.nanmax', (['wnew'], {}), '(wnew)\n', (9551, 9557), True, 'import numpy as np\n'), ((10076, 10112), 'numpy.argmax', 'np.argmax', (["cpeaks[1]['peak_heights']"], {}), "(cpeaks[1]['peak_heights'])\n", (10085, 10112), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from collections import OrderedDict
from PIL import Image
import numpy as np
def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1):
# helper selecting activation
# neg_slope: for leakyrelu and init of prelu
# n_prelu: for p_relu num_parameters
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type))
return layer
def norm(norm_type, nc):
# helper selecting normalization layer
norm_type = norm_type.lower()
if norm_type == 'batch':
layer = nn.BatchNorm2d(nc, affine=True)
elif norm_type == 'instance':
layer = nn.InstanceNorm2d(nc, affine=False)
else:
raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type))
return layer
def pad(pad_type, padding):
# helper selecting padding layer
# if padding is 'zero', do by conv layers
pad_type = pad_type.lower()
if padding == 0:
return None
if pad_type == 'reflect':
layer = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
layer = nn.ReplicationPad2d(padding)
else:
raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type))
return layer
def get_valid_padding(kernel_size, dilation):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
padding = (kernel_size - 1) // 2
return padding
def sequential(*args):
# Flatten Sequential. It unwraps nn.Sequential.
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, \
pad_type='zero', norm_type=None, act_type='relu', mode='CNA'):
'''
Conv layer with padding, normalization, activation
mode: CNA --> Conv -> Norm -> Act
NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16)
'''
assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode)
padding = get_valid_padding(kernel_size, dilation)
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
padding = padding if pad_type == 'zero' else 0
c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, \
dilation=dilation, bias=bias, groups=groups)
a = act(act_type) if act_type else None
if 'CNA' in mode:
n = norm(norm_type, out_nc) if norm_type else None
return sequential(p, c, n, a)
elif mode == 'NAC':
if norm_type is None and act_type is not None:
a = act(act_type, inplace=False)
# Important!
# input----ReLU(inplace)----Conv--+----output
# |________________________|
# inplace ReLU will modify the input, therefore wrong output
n = norm(norm_type, in_nc) if norm_type else None
return sequential(n, a, p, c)
def conv_block_downsample(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, \
pad_type='zero', norm_type=None, act_type='relu', mode='CNA'):
'''
Conv layer with padding, normalization, activation
mode: CNA --> Conv -> Norm -> Act
NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16)
'''
assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode)
padding = get_valid_padding(kernel_size, dilation)
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
padding = padding if pad_type == 'zero' else 0
c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, \
dilation=dilation, bias=bias, groups=groups)
a = act(act_type) if act_type else None
if 'CNA' in mode:
n = norm(norm_type, out_nc) if norm_type else None
return sequential(p, c, n, a)
elif mode == 'NAC':
if norm_type is None and act_type is not None:
a = act(act_type, inplace=False)
# Important!
# input----ReLU(inplace)----Conv--+----output
# |________________________|
# inplace ReLU will modify the input, therefore wrong output
n = norm(norm_type, in_nc) if norm_type else None
return sequential(n, a, p, c)
class ShortcutBlock(nn.Module):
#Elementwise sum the output of a submodule to its input
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
class ResNetBlock(nn.Module):
'''
ResNet Block, 3-3 style
with extra residual scaling used in EDSR
(Enhanced Deep Residual Networks for Single Image Super-Resolution, CVPRW 17)
'''
def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, stride=1, dilation=1, groups=1, \
bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='CNA', res_scale=1):
super(ResNetBlock, self).__init__()
conv0 = conv_block(in_nc, mid_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
norm_type, act_type, mode)
if mode == 'CNA':
act_type = None
if mode == 'CNAC': # Residual path: |-CNAC-|
act_type = None
norm_type = None
conv1 = conv_block(mid_nc, out_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
norm_type, act_type, mode)
# if in_nc != out_nc:
# self.project = conv_block(in_nc, out_nc, 1, stride, dilation, 1, bias, pad_type, \
# None, None)
# print('Need a projecter in ResNetBlock.')
# else:
# self.project = lambda x:x
self.res = sequential(conv0, conv1)
self.res_scale = res_scale
def forward(self, x):
res = self.res(x).mul(self.res_scale)
return x + res
class De_Resnet(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=22, downscale=4, norm_type=None, act_type=None, \
mode='CNA', res_scale=1):
super(De_Resnet, self).__init__()
# n_downscale = int(math.log(downscale, 2))
# if upscale == 3:
# n_downscale = 1
fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
resnet_blocks = [ResNetBlock(nf, nf, nf, norm_type=norm_type, act_type='relu',\
mode=mode, res_scale=res_scale) for _ in range(nb)]
LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
After_D_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
After_D_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
downsampleConv0 = conv_block_downsample(nf, nf, kernel_size=3, stride=2, norm_type=None, act_type='relu')
downsampleConv1 = conv_block_downsample(nf, nf, kernel_size=3, stride=2, norm_type=None, act_type='relu')
self.model = sequential(fea_conv, ShortcutBlock(sequential(*resnet_blocks, LR_conv)),\
downsampleConv0, downsampleConv1, After_D_conv0, After_D_conv1)
def forward(self, x):
x = self.model(x)
return x
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[0, 1, 2], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
if __name__ == '__main__':
model_path = '/media/4T/Dizzy/BasicSR-master/Final_models/DSDG/4x/PSNR-oriented/Degradation_Resnet_psnr62.1_G.pth'
# location of pre-trained model
model = De_Resnet().cuda()
model.load_state_dict(torch.load(model_path))
# img = np.array(Image.open('/media/4T/Dizzy/AIM/AIM_datasets/DIV2K_val100/DIV2K_valid_HR/0801.png'))/255
# img = np.transpose(img, (2, 0, 1))
# img = np.reshape(img, [1]+list(img.shape))
# img = torch.FloatTensor(img).cuda()
#
# print(img.shape)
# with torch.no_grad():
# lr = model(img)
# print(lr.shape)
# # lr *= 255
# lr = tensor2img(lr)
#
# lr_pil = Image.fromarray(lr)
# lr_pil.show()
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.LeakyReLU",
"torch.nn.Sequential",
"torch.nn.ReflectionPad2d",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.PReLU",
"numpy.transpose",
"torch.nn.ReplicationPad2d"
] | [((2255, 2278), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (2268, 2278), True, 'import torch.nn as nn\n'), ((2919, 3050), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_nc', 'out_nc'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'bias': 'bias', 'groups': 'groups'}), '(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=\n padding, dilation=dilation, bias=bias, groups=groups)\n', (2928, 3050), True, 'import torch.nn as nn\n'), ((4297, 4428), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_nc', 'out_nc'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'bias': 'bias', 'groups': 'groups'}), '(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=\n padding, dilation=dilation, bias=bias, groups=groups)\n', (4306, 4428), True, 'import torch.nn as nn\n'), ((372, 388), 'torch.nn.ReLU', 'nn.ReLU', (['inplace'], {}), '(inplace)\n', (379, 388), True, 'import torch.nn as nn\n'), ((834, 865), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nc'], {'affine': '(True)'}), '(nc, affine=True)\n', (848, 865), True, 'import torch.nn as nn\n'), ((1306, 1333), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['padding'], {}), '(padding)\n', (1324, 1333), True, 'import torch.nn as nn\n'), ((8670, 8718), 'numpy.transpose', 'np.transpose', (['img_np[[0, 1, 2], :, :]', '(1, 2, 0)'], {}), '(img_np[[0, 1, 2], :, :], (1, 2, 0))\n', (8682, 8718), True, 'import numpy as np\n'), ((9342, 9364), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (9352, 9364), False, 'import torch\n'), ((439, 471), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['neg_slope', 'inplace'], {}), '(neg_slope, inplace)\n', (451, 471), True, 'import torch.nn as nn\n'), ((916, 951), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['nc'], {'affine': '(False)'}), '(nc, affine=False)\n', (933, 951), True, 'import torch.nn as nn\n'), ((1384, 1412), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['padding'], {}), '(padding)\n', (1403, 1412), True, 'import torch.nn as nn\n'), ((518, 566), 'torch.nn.PReLU', 'nn.PReLU', ([], {'num_parameters': 'n_prelu', 'init': 'neg_slope'}), '(num_parameters=n_prelu, init=neg_slope)\n', (526, 566), True, 'import torch.nn as nn\n')] |
import pytest
pytest.importorskip('numpy')
import numpy as np
import pytest
import dask.array as da
from dask.array.utils import assert_eq
def test_linspace():
darr = da.linspace(6, 49, chunks=5)
nparr = np.linspace(6, 49)
assert_eq(darr, nparr)
darr = da.linspace(1.4, 4.9, chunks=5, num=13)
nparr = np.linspace(1.4, 4.9, num=13)
assert_eq(darr, nparr)
darr = da.linspace(6, 49, chunks=5, dtype=float)
nparr = np.linspace(6, 49, dtype=float)
assert_eq(darr, nparr)
darr = da.linspace(1.4, 4.9, chunks=5, num=13, dtype=int)
nparr = np.linspace(1.4, 4.9, num=13, dtype=int)
assert_eq(darr, nparr)
assert (sorted(da.linspace(1.4, 4.9, chunks=5, num=13).dask) ==
sorted(da.linspace(1.4, 4.9, chunks=5, num=13).dask))
assert (sorted(da.linspace(6, 49, chunks=5, dtype=float).dask) ==
sorted(da.linspace(6, 49, chunks=5, dtype=float).dask))
def test_arange():
darr = da.arange(77, chunks=13)
nparr = np.arange(77)
assert_eq(darr, nparr)
darr = da.arange(2, 13, chunks=5)
nparr = np.arange(2, 13)
assert_eq(darr, nparr)
darr = da.arange(4, 21, 9, chunks=13)
nparr = np.arange(4, 21, 9)
assert_eq(darr, nparr)
# negative steps
darr = da.arange(53, 5, -3, chunks=5)
nparr = np.arange(53, 5, -3)
assert_eq(darr, nparr)
darr = da.arange(77, chunks=13, dtype=float)
nparr = np.arange(77, dtype=float)
assert_eq(darr, nparr)
darr = da.arange(2, 13, chunks=5, dtype=int)
nparr = np.arange(2, 13, dtype=int)
assert_eq(darr, nparr)
assert (sorted(da.arange(2, 13, chunks=5).dask) ==
sorted(da.arange(2, 13, chunks=5).dask))
assert (sorted(da.arange(77, chunks=13, dtype=float).dask) ==
sorted(da.arange(77, chunks=13, dtype=float).dask))
# 0 size output
darr = da.arange(0, 1, -0.5, chunks=20)
nparr = np.arange(0, 1, -0.5)
assert_eq(darr, nparr)
darr = da.arange(0, -1, 0.5, chunks=20)
nparr = np.arange(0, -1, 0.5)
assert_eq(darr, nparr)
def test_arange_has_dtype():
assert da.arange(5, chunks=2).dtype == np.arange(5).dtype
@pytest.mark.xfail(reason="Casting floats to ints is not supported since edge"
"behavior is not specified or guaranteed by NumPy.")
def test_arange_cast_float_int_step():
darr = da.arange(3.3, -9.1, -.25, chunks=3, dtype='i8')
nparr = np.arange(3.3, -9.1, -.25, dtype='i8')
assert_eq(darr, nparr)
def test_arange_float_step():
darr = da.arange(2., 13., .3, chunks=4)
nparr = np.arange(2., 13., .3)
assert_eq(darr, nparr)
darr = da.arange(7.7, 1.5, -.8, chunks=3)
nparr = np.arange(7.7, 1.5, -.8)
assert_eq(darr, nparr)
darr = da.arange(0, 1, 0.01, chunks=20)
nparr = np.arange(0, 1, 0.01)
assert_eq(darr, nparr)
darr = da.arange(0, 1, 0.03, chunks=20)
nparr = np.arange(0, 1, 0.03)
assert_eq(darr, nparr)
def test_indices_no_chunks():
with pytest.raises(ValueError):
da.indices((1,))
def test_indices_wrong_chunks():
with pytest.raises(ValueError):
da.indices((1,), chunks=tuple())
def test_empty_indicies():
darr = da.indices(tuple(), chunks=tuple())
nparr = np.indices(tuple())
assert darr.shape == nparr.shape
assert darr.dtype == nparr.dtype
assert_eq(darr, nparr)
darr = da.indices(tuple(), float, chunks=tuple())
nparr = np.indices(tuple(), float)
assert darr.shape == nparr.shape
assert darr.dtype == nparr.dtype
assert_eq(darr, nparr)
darr = da.indices((0,), float, chunks=(1,))
nparr = np.indices((0,), float)
assert darr.shape == nparr.shape
assert darr.dtype == nparr.dtype
assert_eq(darr, nparr)
darr = da.indices((0, 1, 2), float, chunks=(1, 1, 2))
nparr = np.indices((0, 1, 2), float)
assert darr.shape == nparr.shape
assert darr.dtype == nparr.dtype
assert_eq(darr, nparr)
def test_indicies():
darr = da.indices((1,), chunks=(1,))
nparr = np.indices((1,))
assert_eq(darr, nparr)
darr = da.indices((1,), float, chunks=(1,))
nparr = np.indices((1,), float)
assert_eq(darr, nparr)
darr = da.indices((2, 1), chunks=(2, 1))
nparr = np.indices((2, 1))
assert_eq(darr, nparr)
darr = da.indices((2, 3), chunks=(1, 2))
nparr = np.indices((2, 3))
assert_eq(darr, nparr)
| [
"dask.array.linspace",
"dask.array.indices",
"pytest.mark.xfail",
"dask.array.utils.assert_eq",
"numpy.indices",
"dask.array.arange",
"numpy.linspace",
"pytest.importorskip",
"pytest.raises",
"numpy.arange"
] | [((14, 42), 'pytest.importorskip', 'pytest.importorskip', (['"""numpy"""'], {}), "('numpy')\n", (33, 42), False, 'import pytest\n'), ((2153, 2290), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Casting floats to ints is not supported since edgebehavior is not specified or guaranteed by NumPy."""'}), "(reason=\n 'Casting floats to ints is not supported since edgebehavior is not specified or guaranteed by NumPy.'\n )\n", (2170, 2290), False, 'import pytest\n'), ((175, 203), 'dask.array.linspace', 'da.linspace', (['(6)', '(49)'], {'chunks': '(5)'}), '(6, 49, chunks=5)\n', (186, 203), True, 'import dask.array as da\n'), ((216, 234), 'numpy.linspace', 'np.linspace', (['(6)', '(49)'], {}), '(6, 49)\n', (227, 234), True, 'import numpy as np\n'), ((239, 261), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (248, 261), False, 'from dask.array.utils import assert_eq\n'), ((274, 313), 'dask.array.linspace', 'da.linspace', (['(1.4)', '(4.9)'], {'chunks': '(5)', 'num': '(13)'}), '(1.4, 4.9, chunks=5, num=13)\n', (285, 313), True, 'import dask.array as da\n'), ((326, 355), 'numpy.linspace', 'np.linspace', (['(1.4)', '(4.9)'], {'num': '(13)'}), '(1.4, 4.9, num=13)\n', (337, 355), True, 'import numpy as np\n'), ((360, 382), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (369, 382), False, 'from dask.array.utils import assert_eq\n'), ((395, 436), 'dask.array.linspace', 'da.linspace', (['(6)', '(49)'], {'chunks': '(5)', 'dtype': 'float'}), '(6, 49, chunks=5, dtype=float)\n', (406, 436), True, 'import dask.array as da\n'), ((449, 480), 'numpy.linspace', 'np.linspace', (['(6)', '(49)'], {'dtype': 'float'}), '(6, 49, dtype=float)\n', (460, 480), True, 'import numpy as np\n'), ((485, 507), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (494, 507), False, 'from dask.array.utils import assert_eq\n'), ((520, 570), 'dask.array.linspace', 'da.linspace', (['(1.4)', '(4.9)'], {'chunks': '(5)', 'num': '(13)', 'dtype': 'int'}), '(1.4, 4.9, chunks=5, num=13, dtype=int)\n', (531, 570), True, 'import dask.array as da\n'), ((583, 623), 'numpy.linspace', 'np.linspace', (['(1.4)', '(4.9)'], {'num': '(13)', 'dtype': 'int'}), '(1.4, 4.9, num=13, dtype=int)\n', (594, 623), True, 'import numpy as np\n'), ((628, 650), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (637, 650), False, 'from dask.array.utils import assert_eq\n'), ((955, 979), 'dask.array.arange', 'da.arange', (['(77)'], {'chunks': '(13)'}), '(77, chunks=13)\n', (964, 979), True, 'import dask.array as da\n'), ((992, 1005), 'numpy.arange', 'np.arange', (['(77)'], {}), '(77)\n', (1001, 1005), True, 'import numpy as np\n'), ((1010, 1032), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (1019, 1032), False, 'from dask.array.utils import assert_eq\n'), ((1045, 1071), 'dask.array.arange', 'da.arange', (['(2)', '(13)'], {'chunks': '(5)'}), '(2, 13, chunks=5)\n', (1054, 1071), True, 'import dask.array as da\n'), ((1084, 1100), 'numpy.arange', 'np.arange', (['(2)', '(13)'], {}), '(2, 13)\n', (1093, 1100), True, 'import numpy as np\n'), ((1105, 1127), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (1114, 1127), False, 'from dask.array.utils import assert_eq\n'), ((1140, 1170), 'dask.array.arange', 'da.arange', (['(4)', '(21)', '(9)'], {'chunks': '(13)'}), '(4, 21, 9, chunks=13)\n', (1149, 1170), True, 'import dask.array as da\n'), ((1183, 1202), 'numpy.arange', 'np.arange', (['(4)', '(21)', '(9)'], {}), '(4, 21, 9)\n', (1192, 1202), True, 'import numpy as np\n'), ((1207, 1229), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (1216, 1229), False, 'from dask.array.utils import assert_eq\n'), ((1263, 1293), 'dask.array.arange', 'da.arange', (['(53)', '(5)', '(-3)'], {'chunks': '(5)'}), '(53, 5, -3, chunks=5)\n', (1272, 1293), True, 'import dask.array as da\n'), ((1306, 1326), 'numpy.arange', 'np.arange', (['(53)', '(5)', '(-3)'], {}), '(53, 5, -3)\n', (1315, 1326), True, 'import numpy as np\n'), ((1331, 1353), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (1340, 1353), False, 'from dask.array.utils import assert_eq\n'), ((1366, 1403), 'dask.array.arange', 'da.arange', (['(77)'], {'chunks': '(13)', 'dtype': 'float'}), '(77, chunks=13, dtype=float)\n', (1375, 1403), True, 'import dask.array as da\n'), ((1416, 1442), 'numpy.arange', 'np.arange', (['(77)'], {'dtype': 'float'}), '(77, dtype=float)\n', (1425, 1442), True, 'import numpy as np\n'), ((1447, 1469), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (1456, 1469), False, 'from dask.array.utils import assert_eq\n'), ((1482, 1519), 'dask.array.arange', 'da.arange', (['(2)', '(13)'], {'chunks': '(5)', 'dtype': 'int'}), '(2, 13, chunks=5, dtype=int)\n', (1491, 1519), True, 'import dask.array as da\n'), ((1532, 1559), 'numpy.arange', 'np.arange', (['(2)', '(13)'], {'dtype': 'int'}), '(2, 13, dtype=int)\n', (1541, 1559), True, 'import numpy as np\n'), ((1564, 1586), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (1573, 1586), False, 'from dask.array.utils import assert_eq\n'), ((1857, 1889), 'dask.array.arange', 'da.arange', (['(0)', '(1)', '(-0.5)'], {'chunks': '(20)'}), '(0, 1, -0.5, chunks=20)\n', (1866, 1889), True, 'import dask.array as da\n'), ((1902, 1923), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(-0.5)'], {}), '(0, 1, -0.5)\n', (1911, 1923), True, 'import numpy as np\n'), ((1928, 1950), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (1937, 1950), False, 'from dask.array.utils import assert_eq\n'), ((1963, 1995), 'dask.array.arange', 'da.arange', (['(0)', '(-1)', '(0.5)'], {'chunks': '(20)'}), '(0, -1, 0.5, chunks=20)\n', (1972, 1995), True, 'import dask.array as da\n'), ((2008, 2029), 'numpy.arange', 'np.arange', (['(0)', '(-1)', '(0.5)'], {}), '(0, -1, 0.5)\n', (2017, 2029), True, 'import numpy as np\n'), ((2034, 2056), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (2043, 2056), False, 'from dask.array.utils import assert_eq\n'), ((2360, 2409), 'dask.array.arange', 'da.arange', (['(3.3)', '(-9.1)', '(-0.25)'], {'chunks': '(3)', 'dtype': '"""i8"""'}), "(3.3, -9.1, -0.25, chunks=3, dtype='i8')\n", (2369, 2409), True, 'import dask.array as da\n'), ((2421, 2460), 'numpy.arange', 'np.arange', (['(3.3)', '(-9.1)', '(-0.25)'], {'dtype': '"""i8"""'}), "(3.3, -9.1, -0.25, dtype='i8')\n", (2430, 2460), True, 'import numpy as np\n'), ((2464, 2486), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (2473, 2486), False, 'from dask.array.utils import assert_eq\n'), ((2530, 2565), 'dask.array.arange', 'da.arange', (['(2.0)', '(13.0)', '(0.3)'], {'chunks': '(4)'}), '(2.0, 13.0, 0.3, chunks=4)\n', (2539, 2565), True, 'import dask.array as da\n'), ((2575, 2600), 'numpy.arange', 'np.arange', (['(2.0)', '(13.0)', '(0.3)'], {}), '(2.0, 13.0, 0.3)\n', (2584, 2600), True, 'import numpy as np\n'), ((2602, 2624), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (2611, 2624), False, 'from dask.array.utils import assert_eq\n'), ((2637, 2672), 'dask.array.arange', 'da.arange', (['(7.7)', '(1.5)', '(-0.8)'], {'chunks': '(3)'}), '(7.7, 1.5, -0.8, chunks=3)\n', (2646, 2672), True, 'import dask.array as da\n'), ((2684, 2709), 'numpy.arange', 'np.arange', (['(7.7)', '(1.5)', '(-0.8)'], {}), '(7.7, 1.5, -0.8)\n', (2693, 2709), True, 'import numpy as np\n'), ((2713, 2735), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (2722, 2735), False, 'from dask.array.utils import assert_eq\n'), ((2748, 2780), 'dask.array.arange', 'da.arange', (['(0)', '(1)', '(0.01)'], {'chunks': '(20)'}), '(0, 1, 0.01, chunks=20)\n', (2757, 2780), True, 'import dask.array as da\n'), ((2793, 2814), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (2802, 2814), True, 'import numpy as np\n'), ((2819, 2841), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (2828, 2841), False, 'from dask.array.utils import assert_eq\n'), ((2854, 2886), 'dask.array.arange', 'da.arange', (['(0)', '(1)', '(0.03)'], {'chunks': '(20)'}), '(0, 1, 0.03, chunks=20)\n', (2863, 2886), True, 'import dask.array as da\n'), ((2899, 2920), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.03)'], {}), '(0, 1, 0.03)\n', (2908, 2920), True, 'import numpy as np\n'), ((2925, 2947), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (2934, 2947), False, 'from dask.array.utils import assert_eq\n'), ((3339, 3361), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (3348, 3361), False, 'from dask.array.utils import assert_eq\n'), ((3534, 3556), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (3543, 3556), False, 'from dask.array.utils import assert_eq\n'), ((3569, 3605), 'dask.array.indices', 'da.indices', (['(0,)', 'float'], {'chunks': '(1,)'}), '((0,), float, chunks=(1,))\n', (3579, 3605), True, 'import dask.array as da\n'), ((3618, 3641), 'numpy.indices', 'np.indices', (['(0,)', 'float'], {}), '((0,), float)\n', (3628, 3641), True, 'import numpy as np\n'), ((3720, 3742), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (3729, 3742), False, 'from dask.array.utils import assert_eq\n'), ((3755, 3801), 'dask.array.indices', 'da.indices', (['(0, 1, 2)', 'float'], {'chunks': '(1, 1, 2)'}), '((0, 1, 2), float, chunks=(1, 1, 2))\n', (3765, 3801), True, 'import dask.array as da\n'), ((3814, 3842), 'numpy.indices', 'np.indices', (['(0, 1, 2)', 'float'], {}), '((0, 1, 2), float)\n', (3824, 3842), True, 'import numpy as np\n'), ((3921, 3943), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (3930, 3943), False, 'from dask.array.utils import assert_eq\n'), ((3978, 4007), 'dask.array.indices', 'da.indices', (['(1,)'], {'chunks': '(1,)'}), '((1,), chunks=(1,))\n', (3988, 4007), True, 'import dask.array as da\n'), ((4020, 4036), 'numpy.indices', 'np.indices', (['(1,)'], {}), '((1,))\n', (4030, 4036), True, 'import numpy as np\n'), ((4041, 4063), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (4050, 4063), False, 'from dask.array.utils import assert_eq\n'), ((4076, 4112), 'dask.array.indices', 'da.indices', (['(1,)', 'float'], {'chunks': '(1,)'}), '((1,), float, chunks=(1,))\n', (4086, 4112), True, 'import dask.array as da\n'), ((4125, 4148), 'numpy.indices', 'np.indices', (['(1,)', 'float'], {}), '((1,), float)\n', (4135, 4148), True, 'import numpy as np\n'), ((4153, 4175), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (4162, 4175), False, 'from dask.array.utils import assert_eq\n'), ((4188, 4221), 'dask.array.indices', 'da.indices', (['(2, 1)'], {'chunks': '(2, 1)'}), '((2, 1), chunks=(2, 1))\n', (4198, 4221), True, 'import dask.array as da\n'), ((4234, 4252), 'numpy.indices', 'np.indices', (['(2, 1)'], {}), '((2, 1))\n', (4244, 4252), True, 'import numpy as np\n'), ((4257, 4279), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (4266, 4279), False, 'from dask.array.utils import assert_eq\n'), ((4292, 4325), 'dask.array.indices', 'da.indices', (['(2, 3)'], {'chunks': '(1, 2)'}), '((2, 3), chunks=(1, 2))\n', (4302, 4325), True, 'import dask.array as da\n'), ((4338, 4356), 'numpy.indices', 'np.indices', (['(2, 3)'], {}), '((2, 3))\n', (4348, 4356), True, 'import numpy as np\n'), ((4361, 4383), 'dask.array.utils.assert_eq', 'assert_eq', (['darr', 'nparr'], {}), '(darr, nparr)\n', (4370, 4383), False, 'from dask.array.utils import assert_eq\n'), ((2989, 3014), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3002, 3014), False, 'import pytest\n'), ((3024, 3040), 'dask.array.indices', 'da.indices', (['(1,)'], {}), '((1,))\n', (3034, 3040), True, 'import dask.array as da\n'), ((3085, 3110), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3098, 3110), False, 'import pytest\n'), ((2099, 2121), 'dask.array.arange', 'da.arange', (['(5)'], {'chunks': '(2)'}), '(5, chunks=2)\n', (2108, 2121), True, 'import dask.array as da\n'), ((2131, 2143), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2140, 2143), True, 'import numpy as np\n'), ((670, 709), 'dask.array.linspace', 'da.linspace', (['(1.4)', '(4.9)'], {'chunks': '(5)', 'num': '(13)'}), '(1.4, 4.9, chunks=5, num=13)\n', (681, 709), True, 'import dask.array as da\n'), ((738, 777), 'dask.array.linspace', 'da.linspace', (['(1.4)', '(4.9)'], {'chunks': '(5)', 'num': '(13)'}), '(1.4, 4.9, chunks=5, num=13)\n', (749, 777), True, 'import dask.array as da\n'), ((804, 845), 'dask.array.linspace', 'da.linspace', (['(6)', '(49)'], {'chunks': '(5)', 'dtype': 'float'}), '(6, 49, chunks=5, dtype=float)\n', (815, 845), True, 'import dask.array as da\n'), ((874, 915), 'dask.array.linspace', 'da.linspace', (['(6)', '(49)'], {'chunks': '(5)', 'dtype': 'float'}), '(6, 49, chunks=5, dtype=float)\n', (885, 915), True, 'import dask.array as da\n'), ((1606, 1632), 'dask.array.arange', 'da.arange', (['(2)', '(13)'], {'chunks': '(5)'}), '(2, 13, chunks=5)\n', (1615, 1632), True, 'import dask.array as da\n'), ((1661, 1687), 'dask.array.arange', 'da.arange', (['(2)', '(13)'], {'chunks': '(5)'}), '(2, 13, chunks=5)\n', (1670, 1687), True, 'import dask.array as da\n'), ((1714, 1751), 'dask.array.arange', 'da.arange', (['(77)'], {'chunks': '(13)', 'dtype': 'float'}), '(77, chunks=13, dtype=float)\n', (1723, 1751), True, 'import dask.array as da\n'), ((1780, 1817), 'dask.array.arange', 'da.arange', (['(77)'], {'chunks': '(13)', 'dtype': 'float'}), '(77, chunks=13, dtype=float)\n', (1789, 1817), True, 'import dask.array as da\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import os
from scipy.stats import chi2_contingency
def chi_squared_yates(
no_Gold, no_Resections, no_No_Surgery,
no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term,
two_outcomes=True, print_numbers=False):
"""
"Chi-Squared Yates correction: chi2-stat, p-value, DOF, expected ndarray same shape as contingency table"
Returns text to add to contingency table.
"""
if two_outcomes:
obs = np.array([
[no_Gold, no_No_Surgery + no_Resections],
[no_Gold_absent_term, no_No_Surgery_absent_term + no_Resections_absent_term]
])
else:
# three outcomes
obs = np.array([
[no_Gold, no_Resections, no_No_Surgery],
[no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term]
])
chi_sq, p_value, dof, exp_arr = chi2_contingency(obs)
# table_chi_sq_text = str("Chi-Sq-stat = ") + str(round(chi_sq,2))
if p_value <0.001:
table_chi_sq_text = "****"
elif p_value <0.01:
table_chi_sq_text = "***"
elif p_value <0.025:
table_chi_sq_text = "**"
elif p_value <0.05:
table_chi_sq_text = "*"
else:
table_chi_sq_text = "-"
if print_numbers:
print("Chi-Squared with Yates correction:")
print("chi2-stat =\t{}".format(chi_sq))
print("p-value =\t{}".format(p_value))
print("DOF =\t{}".format(dof))
print("expected ndarray same shape as contingency table = \n{}".format(exp_arr))
stats_string = "chi2-stat = " + str(round(chi_sq,3)) +\
"\np-value = " + str(round(p_value,9)) +\
"\nDOF = " + str(dof) +\
"\nexpected ndarray = \n" + str(np.around(exp_arr))
return table_chi_sq_text, stats_string
def contingency_table_two_outcomes(term,
no_Gold, no_Resections, no_No_Surgery,
no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term,
save_to_folder='L:\\word_docs\\NLP\\contingency_tables\\',
print_numberss=False,
eps=False,
term_regex_str=""):
if not term_regex_str:
term_regex_str = "term"
conf_arr = np.array([
[no_Gold, no_No_Surgery + no_Resections],
[no_Gold_absent_term, no_No_Surgery_absent_term + no_Resections_absent_term]
])
df_cm = pd.DataFrame(conf_arr,
index = ['present', 'absent'],
columns = ['Entirely Seizure-Free', 'Other'])
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(121)
fig.tight_layout()
ax.set_aspect(1)
res = sn.heatmap(df_cm, annot=True, vmin=0.0, vmax=100.0, fmt='.0f')
plt.yticks([0.5,1.5], [term_regex_str + ' present', 'absent'], va='center')
plt.title('''Contingency Table \n Term: {}
'''.format(term))
# add chi-squared test *'s to the top left cell in 2 by 2 table
table_chi_sq_text, stats_string = chi_squared_yates(
no_Gold, no_Resections, no_No_Surgery,
no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term,
print_numbers=print_numberss)
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
ax.text(
0.25*(left+right), 0.65*(bottom+top), table_chi_sq_text,
horizontalalignment='center',
verticalalignment='center',
fontsize=15, color='black',
transform=ax.transAxes)
# add subplot with only text of stats read out
ax2 = fig.add_subplot(122)
plt.title('''Chi-Squared with Yates correction''')
ax2.text(0.4*(left+right), 0.8*(bottom+top), stats_string,
horizontalalignment='center',
verticalalignment='center',
fontsize=9, color='black')
# #remove axes
sn.despine(left=True, top=True, right=True, bottom=True)
#ax.set_frame_on(False)
plt.axis('off')
# save
if eps:
filename = 'confusion_table_2_' + str(term) + '.eps'
filename_and_path = os.path.join(save_to_folder, filename)
plt.savefig(filename_and_path, format='eps', bbox_inches='tight', dpi=1200)
else:
filename = 'confusion_table_2_' + str(term) + '.png'
filename_and_path = os.path.join(save_to_folder, filename)
plt.savefig(filename_and_path, format='png', bbox_inches='tight')
def contingency_table_three_outcomes(term,
no_Gold, no_Resections, no_No_Surgery,
no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term,
save_to_folder='L:\\word_docs\\NLP\\contingency_tables\\',
print_numberss=False,
eps=False,
term_regex_str=""):
conf_arr = np.array([
[no_Gold, no_Resections, no_No_Surgery],
[no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term]
])
df_cm = pd.DataFrame(conf_arr,
index = ['present', 'absent'],
columns = ['Entirely Seizure-Free', 'Resections', 'No Surgery'])
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(121)
fig.tight_layout()
ax.set_aspect(1)
res = sn.heatmap(df_cm, annot=True, vmin=0.0, vmax=100.0, fmt='.0f')
plt.yticks([0.5,1.5], [term_regex_str + ' present', 'absent'],va='center')
plt.title('''Contingency Table \n Term: {}
'''.format(term))
# add chi-squared *'s to the top left cell in 2 by 2 table
table_chi_sq_text, stats_string = chi_squared_yates(
no_Gold, no_Resections, no_No_Surgery,
no_Gold_absent_term, no_Resections_absent_term, no_No_Surgery_absent_term,
two_outcomes=False, print_numbers=print_numberss)
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
ax.text(
0.5*0.33*(left+right), 0.65*(bottom+top), table_chi_sq_text,
horizontalalignment='center',
verticalalignment='center',
fontsize=15, color='black',
transform=ax.transAxes)
# add subplot with only text of stats read out
ax2 = fig.add_subplot(122)
plt.title('''Chi-Squared''')
ax2.text(0.4*(left+right), 0.8*(bottom+top), stats_string,
horizontalalignment='center',
verticalalignment='center',
fontsize=9, color='black')
# #remove axes
sn.despine(left=True, top=True, right=True, bottom=True)
#ax.set_frame_on(False)
plt.axis('off')
# save
if eps:
filename = 'confusion_table_3_' + str(term) + '.eps'
filename_and_path = os.path.join(save_to_folder, filename)
plt.savefig(filename_and_path, format='eps', bbox_inches='tight', dpi=1200)
else:
filename = 'confusion_table_3_' + str(term) + '.png'
filename_and_path = os.path.join(save_to_folder, filename)
plt.savefig(filename_and_path, format='png', bbox_inches='tight') | [
"matplotlib.pyplot.savefig",
"scipy.stats.chi2_contingency",
"seaborn.despine",
"matplotlib.pyplot.clf",
"os.path.join",
"seaborn.heatmap",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"numpy.around",
"pandas.DataFrame",
"matplotlib.pyplot.... | [((1095, 1116), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['obs'], {}), '(obs)\n', (1111, 1116), False, 'from scipy.stats import chi2_contingency\n'), ((2516, 2651), 'numpy.array', 'np.array', (['[[no_Gold, no_No_Surgery + no_Resections], [no_Gold_absent_term, \n no_No_Surgery_absent_term + no_Resections_absent_term]]'], {}), '([[no_Gold, no_No_Surgery + no_Resections], [no_Gold_absent_term, \n no_No_Surgery_absent_term + no_Resections_absent_term]])\n', (2524, 2651), True, 'import numpy as np\n'), ((2735, 2835), 'pandas.DataFrame', 'pd.DataFrame', (['conf_arr'], {'index': "['present', 'absent']", 'columns': "['Entirely Seizure-Free', 'Other']"}), "(conf_arr, index=['present', 'absent'], columns=[\n 'Entirely Seizure-Free', 'Other'])\n", (2747, 2835), True, 'import pandas as pd\n'), ((2887, 2899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2897, 2899), True, 'import matplotlib.pyplot as plt\n'), ((2905, 2914), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2912, 2914), True, 'import matplotlib.pyplot as plt\n'), ((3002, 3064), 'seaborn.heatmap', 'sn.heatmap', (['df_cm'], {'annot': '(True)', 'vmin': '(0.0)', 'vmax': '(100.0)', 'fmt': '""".0f"""'}), "(df_cm, annot=True, vmin=0.0, vmax=100.0, fmt='.0f')\n", (3012, 3064), True, 'import seaborn as sn\n'), ((3070, 3146), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.5, 1.5]', "[term_regex_str + ' present', 'absent']"], {'va': '"""center"""'}), "([0.5, 1.5], [term_regex_str + ' present', 'absent'], va='center')\n", (3080, 3146), True, 'import matplotlib.pyplot as plt\n'), ((4080, 4126), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi-Squared with Yates correction"""'], {}), "('Chi-Squared with Yates correction')\n", (4089, 4126), True, 'import matplotlib.pyplot as plt\n'), ((4341, 4397), 'seaborn.despine', 'sn.despine', ([], {'left': '(True)', 'top': '(True)', 'right': '(True)', 'bottom': '(True)'}), '(left=True, top=True, right=True, bottom=True)\n', (4351, 4397), True, 'import seaborn as sn\n'), ((4430, 4445), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4438, 4445), True, 'import matplotlib.pyplot as plt\n'), ((5323, 5455), 'numpy.array', 'np.array', (['[[no_Gold, no_Resections, no_No_Surgery], [no_Gold_absent_term,\n no_Resections_absent_term, no_No_Surgery_absent_term]]'], {}), '([[no_Gold, no_Resections, no_No_Surgery], [no_Gold_absent_term,\n no_Resections_absent_term, no_No_Surgery_absent_term]])\n', (5331, 5455), True, 'import numpy as np\n'), ((5540, 5659), 'pandas.DataFrame', 'pd.DataFrame', (['conf_arr'], {'index': "['present', 'absent']", 'columns': "['Entirely Seizure-Free', 'Resections', 'No Surgery']"}), "(conf_arr, index=['present', 'absent'], columns=[\n 'Entirely Seizure-Free', 'Resections', 'No Surgery'])\n", (5552, 5659), True, 'import pandas as pd\n'), ((5711, 5723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5721, 5723), True, 'import matplotlib.pyplot as plt\n'), ((5729, 5738), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5736, 5738), True, 'import matplotlib.pyplot as plt\n'), ((5826, 5888), 'seaborn.heatmap', 'sn.heatmap', (['df_cm'], {'annot': '(True)', 'vmin': '(0.0)', 'vmax': '(100.0)', 'fmt': '""".0f"""'}), "(df_cm, annot=True, vmin=0.0, vmax=100.0, fmt='.0f')\n", (5836, 5888), True, 'import seaborn as sn\n'), ((5894, 5970), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.5, 1.5]', "[term_regex_str + ' present', 'absent']"], {'va': '"""center"""'}), "([0.5, 1.5], [term_regex_str + ' present', 'absent'], va='center')\n", (5904, 5970), True, 'import matplotlib.pyplot as plt\n'), ((6922, 6946), 'matplotlib.pyplot.title', 'plt.title', (['"""Chi-Squared"""'], {}), "('Chi-Squared')\n", (6931, 6946), True, 'import matplotlib.pyplot as plt\n'), ((7161, 7217), 'seaborn.despine', 'sn.despine', ([], {'left': '(True)', 'top': '(True)', 'right': '(True)', 'bottom': '(True)'}), '(left=True, top=True, right=True, bottom=True)\n', (7171, 7217), True, 'import seaborn as sn\n'), ((7250, 7265), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7258, 7265), True, 'import matplotlib.pyplot as plt\n'), ((596, 731), 'numpy.array', 'np.array', (['[[no_Gold, no_No_Surgery + no_Resections], [no_Gold_absent_term, \n no_No_Surgery_absent_term + no_Resections_absent_term]]'], {}), '([[no_Gold, no_No_Surgery + no_Resections], [no_Gold_absent_term, \n no_No_Surgery_absent_term + no_Resections_absent_term]])\n', (604, 731), True, 'import numpy as np\n'), ((853, 985), 'numpy.array', 'np.array', (['[[no_Gold, no_Resections, no_No_Surgery], [no_Gold_absent_term,\n no_Resections_absent_term, no_No_Surgery_absent_term]]'], {}), '([[no_Gold, no_Resections, no_No_Surgery], [no_Gold_absent_term,\n no_Resections_absent_term, no_No_Surgery_absent_term]])\n', (861, 985), True, 'import numpy as np\n'), ((4560, 4598), 'os.path.join', 'os.path.join', (['save_to_folder', 'filename'], {}), '(save_to_folder, filename)\n', (4572, 4598), False, 'import os\n'), ((4607, 4682), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename_and_path'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""', 'dpi': '(1200)'}), "(filename_and_path, format='eps', bbox_inches='tight', dpi=1200)\n", (4618, 4682), True, 'import matplotlib.pyplot as plt\n'), ((4783, 4821), 'os.path.join', 'os.path.join', (['save_to_folder', 'filename'], {}), '(save_to_folder, filename)\n', (4795, 4821), False, 'import os\n'), ((4830, 4895), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename_and_path'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(filename_and_path, format='png', bbox_inches='tight')\n", (4841, 4895), True, 'import matplotlib.pyplot as plt\n'), ((7380, 7418), 'os.path.join', 'os.path.join', (['save_to_folder', 'filename'], {}), '(save_to_folder, filename)\n', (7392, 7418), False, 'import os\n'), ((7427, 7502), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename_and_path'], {'format': '"""eps"""', 'bbox_inches': '"""tight"""', 'dpi': '(1200)'}), "(filename_and_path, format='eps', bbox_inches='tight', dpi=1200)\n", (7438, 7502), True, 'import matplotlib.pyplot as plt\n'), ((7603, 7641), 'os.path.join', 'os.path.join', (['save_to_folder', 'filename'], {}), '(save_to_folder, filename)\n', (7615, 7641), False, 'import os\n'), ((7650, 7715), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename_and_path'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(filename_and_path, format='png', bbox_inches='tight')\n", (7661, 7715), True, 'import matplotlib.pyplot as plt\n'), ((1961, 1979), 'numpy.around', 'np.around', (['exp_arr'], {}), '(exp_arr)\n', (1970, 1979), True, 'import numpy as np\n')] |
# Code to split the dataset into train/validation/test.
import argparse
import os
import time
from collections import defaultdict
import math
import numpy as np
import csv
import hickle as hkl
import glob
from sklearn.model_selection import train_test_split
import pdb
import random
from generate_data import getmap
from tqdm import tqdm
np.random.seed(123)
if __name__ == "__main__":
ego_file = glob.glob('/data/INTERACTION-Dataset-DR-v1_1/processed_data/pkl/*ego*')
y = np.arange(len(ego_file))
ego_temp, ego_test_files, _, _ = train_test_split(ego_file, y, test_size=0.1, random_state=42) # test set : 10% of total data
y = np.arange(len(ego_temp))
ego_train_files, ego_val_files, _, _ = train_test_split(ego_temp, y, test_size=5./90., random_state=42) # validation set : 5% of total data
ego_train_set_path = "/data/INTERACTION-Dataset-DR-v1_1/processed_data/train_test_split/ego_train_set.csv"
ego_val_set_path = "/data/INTERACTION-Dataset-DR-v1_1/processed_data/train_test_split/ego_val_set.csv"
ego_test_set_path = "/data/INTERACTION-Dataset-DR-v1_1/processed_data/train_test_split/ego_test_set.csv"
train_set_path = "/data/INTERACTION-Dataset-DR-v1_1/processed_data/train_test_split/ref_train_set.csv"
val_set_path = "/data/INTERACTION-Dataset-DR-v1_1/processed_data/train_test_split/ref_val_set.csv"
test_set_path = "/data/INTERACTION-Dataset-DR-v1_1/processed_data/train_test_split/ref_test_set.csv"
with open(ego_train_set_path, 'w') as f:
wr = csv.writer(f, delimiter=',')
for row in ego_train_files:
wr.writerow([row])
with open(ego_val_set_path, 'w') as f:
wr = csv.writer(f, delimiter=',')
for row in ego_val_files:
wr.writerow([row])
with open(ego_test_set_path, 'w') as f:
wr = csv.writer(f, delimiter=',')
for row in ego_test_files:
wr.writerow([row])
# Training
ref_train_file = [glob.glob("_".join(filename.split('_')[:-3])+'_ref_*') for filename in ego_train_files]
ref_train_file = sum(ref_train_file, [])
with open(train_set_path, 'w') as f:
wr = csv.writer(f, delimiter=',')
for row in ref_train_file:
wr.writerow([row])
# Validation
ref_val_file = [glob.glob("_".join(filename.split('_')[:-3])+'_ref_*') for filename in ego_val_files]
ref_val_file = sum(ref_val_file, [])
with open(val_set_path, 'w') as f:
wr = csv.writer(f)
for row in ref_val_file:
wr.writerow([row])
# Test
ref_test_file = [glob.glob("_".join(filename.split('_')[:-3])+'_ref_*') for filename in ego_test_files]
ref_test_file = sum(ref_test_file, [])
with open(test_set_path, 'w') as f:
wr = csv.writer(f)
for row in ref_test_file:
wr.writerow([row])
print('ego_train_files', len(ego_train_files),len(set(ego_train_files)))
print('ego_val_files', len(ego_val_files),len(set(ego_val_files)))
print('ego_test_files', len(ego_test_files),len(set(ego_test_files)))
print('ref_train_file', len(ref_train_file),len(set(ref_train_file)))
print('ref_val_file', len(ref_val_file),len(set(ref_val_file)))
print('ref_test_file', len(ref_test_file),len(set(ref_test_file)))
print('train/val/test split done')
| [
"sklearn.model_selection.train_test_split",
"csv.writer",
"numpy.random.seed",
"glob.glob"
] | [((360, 379), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (374, 379), True, 'import numpy as np\n'), ((426, 497), 'glob.glob', 'glob.glob', (['"""/data/INTERACTION-Dataset-DR-v1_1/processed_data/pkl/*ego*"""'], {}), "('/data/INTERACTION-Dataset-DR-v1_1/processed_data/pkl/*ego*')\n", (435, 497), False, 'import glob\n'), ((570, 631), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ego_file', 'y'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(ego_file, y, test_size=0.1, random_state=42)\n', (586, 631), False, 'from sklearn.model_selection import train_test_split\n'), ((741, 809), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ego_temp', 'y'], {'test_size': '(5.0 / 90.0)', 'random_state': '(42)'}), '(ego_temp, y, test_size=5.0 / 90.0, random_state=42)\n', (757, 809), False, 'from sklearn.model_selection import train_test_split\n'), ((1562, 1590), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1572, 1590), False, 'import csv\n'), ((1720, 1748), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1730, 1748), False, 'import csv\n'), ((1881, 1909), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1891, 1909), False, 'import csv\n'), ((2211, 2239), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2221, 2239), False, 'import csv\n'), ((2537, 2550), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2547, 2550), False, 'import csv\n'), ((2845, 2858), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2855, 2858), False, 'import csv\n')] |
from estimator_adaptative import EstimatorAdaptative
from mpl_toolkits.mplot3d import Axes3D
from grid_search import GridSearch
import matplotlib.pyplot as plt
import matplotlib as mpl
from utils import *
import numpy as np
import os
import sys
data_path = '../../databases'
PlotsDirectory = '../plots/Week2/task2/'
if not os.path.exists(PlotsDirectory):
os.makedirs(PlotsDirectory)
names = ['highway', 'fall', 'traffic']
estimation_range = [np.array([1050, 1200]), np.array([1460, 1510]), np.array([950, 1000])]
prediction_range = [np.array([1201, 1350]), np.array([1511, 1560]), np.array([1001, 1050])]
a = [{'min':2, 'max':3, 'step':0.01}, {'min':3, 'max':6, 'step':0.1},{'min':0, 'max':6, 'step':0.05}]
r = [{'min':0.13, 'max':0.21, 'step':0.001}, {'min':0, 'max':0.12, 'step':0.01},{'min':0, 'max':0.4, 'step':0.005}]
for i in range(len(names)):
if len(sys.argv) > 1:
i = names.index(str(sys.argv[1]))
print('computing ' + names[i] +' ...')
[X_est, y_est] = load_data(data_path, names[i], estimation_range[i], grayscale=True)
[X_pred, y_pred] = load_data(data_path, names[i], prediction_range[i], grayscale=True)
alpha_range = np.arange(a[i].get('min'),a[i].get('max'),a[i].get('step'))
rho_range = np.arange(r[i].get('min'),r[i].get('max'),r[i].get('step'))
parameters = {'alpha': alpha_range, 'rho': rho_range}
gs = GridSearch(EstimatorAdaptative(metric="f1"), parameters)
gs.fitAndPredict(X_est, X_pred, None, y_pred)
print('best_metric: '+str(gs.best_score))
print('best_params: '+str(gs.best_params))
scores = np.array(gs.results).reshape(len(parameters['alpha']), len(parameters['rho']))
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(rho_range, alpha_range)
Z = np.array(gs.results).reshape(len(alpha_range), len(rho_range))
# Plot the surface.
ax.set_zlim(0, 1)
ax.set_title(names[i])
ax.set_xlabel('rho')
ax.set_ylabel('alpha')
ax.set_zlabel('F1-score')
ax.ticklabel_format(style='sci')
colormap = plt.cm.viridis
normalize = mpl.colors.Normalize(vmin=0, vmax=max(gs.results))
ax.plot_surface(X, Y, Z, cmap=colormap, norm=normalize)
plt.show()
if len(sys.argv) > 1:
break
| [
"os.path.exists",
"estimator_adaptative.EstimatorAdaptative",
"os.makedirs",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"matplotlib.pyplot.show"
] | [((325, 355), 'os.path.exists', 'os.path.exists', (['PlotsDirectory'], {}), '(PlotsDirectory)\n', (339, 355), False, 'import os\n'), ((361, 388), 'os.makedirs', 'os.makedirs', (['PlotsDirectory'], {}), '(PlotsDirectory)\n', (372, 388), False, 'import os\n'), ((449, 471), 'numpy.array', 'np.array', (['[1050, 1200]'], {}), '([1050, 1200])\n', (457, 471), True, 'import numpy as np\n'), ((473, 495), 'numpy.array', 'np.array', (['[1460, 1510]'], {}), '([1460, 1510])\n', (481, 495), True, 'import numpy as np\n'), ((497, 518), 'numpy.array', 'np.array', (['[950, 1000]'], {}), '([950, 1000])\n', (505, 518), True, 'import numpy as np\n'), ((540, 562), 'numpy.array', 'np.array', (['[1201, 1350]'], {}), '([1201, 1350])\n', (548, 562), True, 'import numpy as np\n'), ((564, 586), 'numpy.array', 'np.array', (['[1511, 1560]'], {}), '([1511, 1560])\n', (572, 586), True, 'import numpy as np\n'), ((588, 610), 'numpy.array', 'np.array', (['[1001, 1050]'], {}), '([1001, 1050])\n', (596, 610), True, 'import numpy as np\n'), ((1681, 1693), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1691, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1775), 'numpy.meshgrid', 'np.meshgrid', (['rho_range', 'alpha_range'], {}), '(rho_range, alpha_range)\n', (1751, 1775), True, 'import numpy as np\n'), ((2202, 2212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2210, 2212), True, 'import matplotlib.pyplot as plt\n'), ((1387, 1419), 'estimator_adaptative.EstimatorAdaptative', 'EstimatorAdaptative', ([], {'metric': '"""f1"""'}), "(metric='f1')\n", (1406, 1419), False, 'from estimator_adaptative import EstimatorAdaptative\n'), ((1590, 1610), 'numpy.array', 'np.array', (['gs.results'], {}), '(gs.results)\n', (1598, 1610), True, 'import numpy as np\n'), ((1784, 1804), 'numpy.array', 'np.array', (['gs.results'], {}), '(gs.results)\n', (1792, 1804), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2012, <NAME>
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
"""
Computation of the impulse response of a DT filter (:mod:`pydsm.ir`)
====================================================================
Compute (approximating by truncation) the impulse response
of a discrete time filter, trying to (roughly) guess the appropriate
truncation.
.. currentmodule:: pydsm.ir
Functions
---------
.. autosummary::
:toctree: generated/
guess_ir_length -- Guess appropriate truncation length
impulse_response -- Compute impulse response of DT filter
"""
from __future__ import division, print_function
import numpy as np
import scipy as sp
__import__("scipy.signal")
__all__ = ["impulse_response", "guess_ir_length"]
def impulse_response(h, m=None, db=80):
"""
Computes the filter impulse response
Parameters
----------
h : tuple_like
the filter definition either in zpk or in nd form.
Returns
-------
ir : ndarray
the truncated impulse response
Other Parameters
----------------
m : int, optional
the number of samples after which the impulse response should be
truncated. Defaults to None, which means *try to guess*
db : real, optional
a hint about how to guess the length where the impuls response
should be truncated (defaults to 80)
Notes
-----
The guess about the lenght where the impulse response can be truncated
is extremely rough. See :func:`guess_ir_length` in this module for
further info.
"""
if len(h) == 3:
(b, a) = sp.signal.zpk2tf(*h)
b = b.real
a = a.real
else:
(b, a) = h
if m is None:
m = guess_ir_length(h, db)
ins = np.zeros(m)
ins[0] = 1
return sp.signal.lfilter(b, a, ins)
def guess_ir_length(h, db=80):
"""
Tries to estimate an appropriate length for the filter response
Parameters
----------
h : tuple_like
the filter definition either in zpk or in nd form.
db : real, optional
a hint about how to guess the length where the impulse response
should be truncated. This is defined on a log scale. The larger
the longer the resulting length. Defaults to 80.
Returns
-------
m : int
a guess about the appropriate number of samples to represent the
filter impulse response with the required accuracy
Notes
-----
The guess is based on the slowlest pole of the filter, considering when
its response is attenuated to -db. This can be by far too optimistic in
some cases and particularly when there are overlapping or similar poles.
Do not try to use this function for filters with poles in 1.
"""
# Put h in zpk form if it is in tf form
if len(h) == 2:
h = sp.signal.tf2zpk(*h)
pp = h[1]
t_z = len(h[0])+1
if len(pp) == 0:
t_p = 0
else:
# Try to estimate length of decay of the filter h.
# The estimation is extremely rough, based on the decay
# rate of the pole with maximum magnitude.
# Thus, it breaks easily when there are poles very close
# one to the other or overlapping.
# Furthermore, this code should not be called if there is
# a pole in z=1.
os = np.seterr(divide='ignore')
sr = np.log(np.abs(pp))
np.seterr(**os)
# Take slowlest pole
wmin = np.min(np.abs(sr))
# 1/omega min is time constant in sample periods.
# Let's multiply the time constant in order to have
# the transient attenuated by db decibels
t_p = int(np.ceil(db/20*np.log(10)/wmin))
return t_p+t_z
| [
"numpy.abs",
"numpy.log",
"numpy.zeros",
"scipy.signal.lfilter",
"scipy.signal.zpk2tf",
"scipy.signal.tf2zpk",
"numpy.seterr"
] | [((2394, 2405), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (2402, 2405), True, 'import numpy as np\n'), ((2432, 2460), 'scipy.signal.lfilter', 'sp.signal.lfilter', (['b', 'a', 'ins'], {}), '(b, a, ins)\n', (2449, 2460), True, 'import scipy as sp\n'), ((2243, 2263), 'scipy.signal.zpk2tf', 'sp.signal.zpk2tf', (['*h'], {}), '(*h)\n', (2259, 2263), True, 'import scipy as sp\n'), ((3473, 3493), 'scipy.signal.tf2zpk', 'sp.signal.tf2zpk', (['*h'], {}), '(*h)\n', (3489, 3493), True, 'import scipy as sp\n'), ((3963, 3989), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (3972, 3989), True, 'import numpy as np\n'), ((4030, 4045), 'numpy.seterr', 'np.seterr', ([], {}), '(**os)\n', (4039, 4045), True, 'import numpy as np\n'), ((4010, 4020), 'numpy.abs', 'np.abs', (['pp'], {}), '(pp)\n', (4016, 4020), True, 'import numpy as np\n'), ((4097, 4107), 'numpy.abs', 'np.abs', (['sr'], {}), '(sr)\n', (4103, 4107), True, 'import numpy as np\n'), ((4309, 4319), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (4315, 4319), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import torch
import numpy as np
import torch.nn.functional as F
from nnlib.load_time_series import load_data
from nnlib.utils.general_utils import reshape_3d_rest
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("conv1D_cuda:2") # Uncomment this to run on GPU
np.random.seed(231)
# dataset = "Adiac"
dataset = "50words"
# dataset = "Herring"
# dataset = "InlineSkate"
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# x = train_set_x[0]
x = np.array([1.0, 2, 3, 4, 5, 6, 7, 8])
filter_size = 4
# full_filter = train_set_x[1]
# filters = full_filter[:filter_size].copy()
# filters = np.random.randn(filter_size)
filters = np.array([1.0, 2, 0, 1])
repetitions = 20
exec_number = 1
b = np.array([0])
stride = 1
mode = "full"
if mode == "valid":
padding = 0
elif mode == "full":
padding = len(filters) - 1
conv_param = {'stride': stride, 'pad': padding}
timings = []
errors = []
fraction = 0.99
x = reshape_3d_rest(x)
filters = reshape_3d_rest(filters)
xtorch = torch.from_numpy(x)
filtertorch = torch.from_numpy(filters)
print(xtorch)
# conv1d = torch.nn.Conv1d(in_channels=1, out_channels=1, kernel_size=filter_size, stride=stride, padding=padding,
# bias=False)
# conv1d.forward(input=xtorch)
result = F.conv1d(input=xtorch, weight=filtertorch, bias=None, stride=stride, padding=padding, dilation=1, groups=1)
result_pytorch = result.numpy()
print("result pytorch: ", result_pytorch)
| [
"nnlib.load_time_series.load_data",
"torch.nn.functional.conv1d",
"torch.from_numpy",
"nnlib.utils.general_utils.reshape_3d_rest",
"numpy.array",
"numpy.random.seed",
"torch.device"
] | [((217, 236), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (229, 236), False, 'import torch\n'), ((310, 329), 'numpy.random.seed', 'np.random.seed', (['(231)'], {}), '(231)\n', (324, 329), True, 'import numpy as np\n'), ((430, 448), 'nnlib.load_time_series.load_data', 'load_data', (['dataset'], {}), '(dataset)\n', (439, 448), False, 'from nnlib.load_time_series import load_data\n'), ((591, 627), 'numpy.array', 'np.array', (['[1.0, 2, 3, 4, 5, 6, 7, 8]'], {}), '([1.0, 2, 3, 4, 5, 6, 7, 8])\n', (599, 627), True, 'import numpy as np\n'), ((771, 795), 'numpy.array', 'np.array', (['[1.0, 2, 0, 1]'], {}), '([1.0, 2, 0, 1])\n', (779, 795), True, 'import numpy as np\n'), ((835, 848), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (843, 848), True, 'import numpy as np\n'), ((1060, 1078), 'nnlib.utils.general_utils.reshape_3d_rest', 'reshape_3d_rest', (['x'], {}), '(x)\n', (1075, 1078), False, 'from nnlib.utils.general_utils import reshape_3d_rest\n'), ((1089, 1113), 'nnlib.utils.general_utils.reshape_3d_rest', 'reshape_3d_rest', (['filters'], {}), '(filters)\n', (1104, 1113), False, 'from nnlib.utils.general_utils import reshape_3d_rest\n'), ((1124, 1143), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1140, 1143), False, 'import torch\n'), ((1158, 1183), 'torch.from_numpy', 'torch.from_numpy', (['filters'], {}), '(filters)\n', (1174, 1183), False, 'import torch\n'), ((1394, 1505), 'torch.nn.functional.conv1d', 'F.conv1d', ([], {'input': 'xtorch', 'weight': 'filtertorch', 'bias': 'None', 'stride': 'stride', 'padding': 'padding', 'dilation': '(1)', 'groups': '(1)'}), '(input=xtorch, weight=filtertorch, bias=None, stride=stride,\n padding=padding, dilation=1, groups=1)\n', (1402, 1505), True, 'import torch.nn.functional as F\n')] |
import os
import cv2
import numpy as np
from PIL import Image
recognizer = cv2.face.LBPHFaceRecognizer_create()
path='dataSet'
def getImagesWithID(path):
imagepaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagepath in imagepaths:
faceImg=Image.open(imagepath).convert('L');
faceNp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagepath)[-1].split('.')[1])
faces.append(faceNp)
print (ID)
IDs.append(ID)
cv2.imshow("training",faceNp)
cv2.waitKey(10)
return np.array(IDs), faces
IDs,faces=getImagesWithID(path)
recognizer.train(faces,np.array(IDs))
recognizer.save('recognizer/TrainingData.yml')
cv2.destroyAllWindows()
| [
"os.listdir",
"PIL.Image.open",
"os.path.join",
"cv2.face.LBPHFaceRecognizer_create",
"cv2.imshow",
"os.path.split",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey"
] | [((77, 113), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (111, 113), False, 'import cv2\n'), ((713, 736), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (734, 736), False, 'import cv2\n'), ((651, 664), 'numpy.array', 'np.array', (['IDs'], {}), '(IDs)\n', (659, 664), True, 'import numpy as np\n'), ((173, 194), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (185, 194), False, 'import os\n'), ((345, 371), 'numpy.array', 'np.array', (['faceImg', '"""uint8"""'], {}), "(faceImg, 'uint8')\n", (353, 371), True, 'import numpy as np\n'), ((509, 539), 'cv2.imshow', 'cv2.imshow', (['"""training"""', 'faceNp'], {}), "('training', faceNp)\n", (519, 539), False, 'import cv2\n'), ((547, 562), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (558, 562), False, 'import cv2\n'), ((574, 587), 'numpy.array', 'np.array', (['IDs'], {}), '(IDs)\n', (582, 587), True, 'import numpy as np\n'), ((203, 219), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (213, 219), False, 'import os\n'), ((294, 315), 'PIL.Image.open', 'Image.open', (['imagepath'], {}), '(imagepath)\n', (304, 315), False, 'from PIL import Image\n'), ((386, 410), 'os.path.split', 'os.path.split', (['imagepath'], {}), '(imagepath)\n', (399, 410), False, 'import os\n')] |
import torch
import torchvision.transforms as T
import numpy as np
import cv2
from PIL import Image
class DictBatch(object):
def __init__(self, data):
"""
:param data: list of Dict of Tensors.
"""
self.keys = list(data[0].keys())
values = list(zip(*[list(d.values()) for d in data]))
for idx, key in enumerate(self.keys):
setattr(self, key, torch.cat(values[idx], dim=0))
def pin_memory(self):
for key in self.keys:
attr = getattr(self, key).pin_memory()
setattr(self, key, attr)
return self
def cuda(self):
for key in self.keys:
attr = getattr(self, key).cuda()
setattr(self, key, attr)
return self
def keys(self):
return self.keys
def __getitem__(self, key):
"""
:param key: str
"""
return getattr(self, key, None)
def collate_dict_wrapper(batch):
return DictBatch(batch)
class ResizeNormalize(object):
def __init__(self, size, use_cuda=False, normalize_rgb_values=False, toPIL=False, rgb_scaler=1.0):
'''
Used to resize, normalize and convert raw pixel observations.
:param x: Numpy array to be processed
:param size: int or tuple, (height,width) size
:param use_cuda: Boolean to determine whether to create Cuda Tensor
:param normalize_rgb_values: Maps the 0-255 values of rgb colours
to interval (0-1)
'''
if isinstance(size, int): size = (size,size)
ts = []
if toPIL: ts.append(T.ToPILImage())
ts.append(T.Resize(size=size))
ts.append(T.ToTensor())
self.scaling_operation = T.Compose(ts)
self.normalize_rgb_values = normalize_rgb_values
self.rgb_scaler = rgb_scaler
self.use_cuda = use_cuda
def __call__(self, x):
x = self.scaling_operation(x)
# WATCHOUT: it is necessary to cast the tensor into float before doing
# the division, otherwise the result is yielded as a uint8 (full of zeros...)
x = x.type(torch.FloatTensor)
x = x / 255. if self.normalize_rgb_values else x
x *= self.rgb_scaler
if self.use_cuda:
return x.cuda()
return x
def __repr__(self):
return self.__class__.__name__ + '()'
class AddEgocentricInvariance(object):
def __init__(self, marker_demisize=2):
'''
Add a central marker to enable egocentric invariance.
:param marker_demisize: Int, half the size of the marker.
'''
self.marker_demisize = marker_demisize
def __call__(self, x):
x = np.array(x)
dim = x.shape[-2]
xmax = x.max()
marker_colour = 0 if x.mean() > 127 else xmax
start = int(dim//2-self.marker_demisize)
end = int(dim//2+self.marker_demisize)
x[start:end, :, ...] = marker_colour
x[:,start:end, ...] = marker_colour
x = Image.fromarray(x.astype('uint8'))
return x
def __repr__(self):
return self.__class__.__name__ + '()'
class Rescale(object) :
def __init__(self, output_size) :
assert( isinstance(output_size, (int, tuple) ) )
self.output_size = output_size
def __call__(self, sample) :
image = sample
h,w = image.shape[:2]
new_h, new_w = self.output_size
img = cv2.resize(image, (new_h, new_w) )
return img
class RescaleNormalize(object):
def __init__(self, size, use_cuda=False, normalize_rgb_values=False):
'''
Used to resize, normalize and convert raw pixel observations.
:param x: Numpy array to be processed
:param size: int or tuple, (height,width) size
:param use_cuda: Boolean to determine whether to create Cuda Tensor
:param normalize_rgb_values: Maps the 0-255 values of rgb colours
to interval (0-1)
'''
if isinstance(size, int): size = (size,size)
ts = []
ts.append(Rescale(output_size=size))
ts.append(T.ToTensor())
self.scaling_operation = T.Compose(ts)
self.normalize_rgb_values = normalize_rgb_values
self.use_cuda = use_cuda
def __call__(self, x):
x = self.scaling_operation(x)
# WATCHOUT: it is necessary to cast the tensor into float before doing
# the division, otherwise the result is yielded as a uint8 (full of zeros...)
x = x.type(torch.FloatTensor)
x = x / 255. if self.normalize_rgb_values else x
if self.use_cuda:
return x.cuda()
return x
def __repr__(self):
return self.__class__.__name__ + '()'
| [
"torchvision.transforms.ToPILImage",
"numpy.array",
"torchvision.transforms.Resize",
"cv2.resize",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Compose",
"torch.cat"
] | [((1766, 1779), 'torchvision.transforms.Compose', 'T.Compose', (['ts'], {}), '(ts)\n', (1775, 1779), True, 'import torchvision.transforms as T\n'), ((2752, 2763), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2760, 2763), True, 'import numpy as np\n'), ((3459, 3492), 'cv2.resize', 'cv2.resize', (['image', '(new_h, new_w)'], {}), '(image, (new_h, new_w))\n', (3469, 3492), False, 'import cv2\n'), ((4214, 4227), 'torchvision.transforms.Compose', 'T.Compose', (['ts'], {}), '(ts)\n', (4223, 4227), True, 'import torchvision.transforms as T\n'), ((1671, 1690), 'torchvision.transforms.Resize', 'T.Resize', ([], {'size': 'size'}), '(size=size)\n', (1679, 1690), True, 'import torchvision.transforms as T\n'), ((1710, 1722), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1720, 1722), True, 'import torchvision.transforms as T\n'), ((4158, 4170), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (4168, 4170), True, 'import torchvision.transforms as T\n'), ((409, 438), 'torch.cat', 'torch.cat', (['values[idx]'], {'dim': '(0)'}), '(values[idx], dim=0)\n', (418, 438), False, 'import torch\n'), ((1637, 1651), 'torchvision.transforms.ToPILImage', 'T.ToPILImage', ([], {}), '()\n', (1649, 1651), True, 'import torchvision.transforms as T\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 15:19:40 2022
@author: turnerp
"""
import traceback
import numpy as np
from skimage import exposure
import cv2
import tifffile
import os
from glob2 import glob
import pandas as pd
import mat4py
import datetime
import json
import matplotlib.pyplot as plt
import hashlib
from skimage import data
from skimage.registration import phase_cross_correlation
from skimage.registration._phase_cross_correlation import _upsampled_dft
from scipy.ndimage import fourier_shift
import scipy
def get_folder(files):
folder = ""
parent_folder = ""
paths = files["path"].tolist()
if len(paths) > 1:
paths = np.array([path.split("\\") for path in paths]).T
for i in range(len(paths)):
if len(set(paths[i].tolist())) != 1:
folder = str(paths[i - 1][0])
parent_folder = str(paths[i - 2][0])
break
else:
folder = paths[0].split("\\")[-2]
parent_folder = paths[0].split("\\")[-3]
return folder, parent_folder
def read_nim_directory(path):
if isinstance(path, list) == False:
path = [path]
if len(path) == 1:
path = os.path.abspath(path[0])
if os.path.isfile(path) == True:
file_paths = [path]
else:
file_paths = glob(path + "*\**\*.tif", recursive=True)
else:
file_paths = path
file_paths = [file for file in file_paths if file.split(".")[-1] == "tif"]
file_names = [path.split("\\")[-1] for path in file_paths]
files = pd.DataFrame(columns=["path",
"file_name",
"folder",
"parent_folder",
"posX",
"posY",
"posZ",
"laser",
"timestamp"])
for i in range(len(file_paths)):
path = file_paths[i]
path = os.path.abspath(path)
file_name = path.split("\\")[-1]
folder = os.path.abspath(path).split("\\")[-2]
parent_folder = os.path.abspath(path).split("\\")[-3]
with tifffile.TiffFile(path) as tif:
tif_tags = {}
for tag in tif.pages[0].tags.values():
name, value = tag.name, tag.value
tif_tags[name] = value
if "ImageDescription" in tif_tags:
metadata = tif_tags["ImageDescription"]
metadata = json.loads(metadata)
laseractive = metadata["LaserActive"]
laserpowers = metadata["LaserPowerPercent"]
laserwavelength_nm = metadata["LaserWavelength_nm"]
timestamp = metadata["timestamp_us"]
posX, posY, posZ = metadata["StagePos_um"]
if True in laseractive:
laseractive = np.array(laseractive, dtype=bool)
laserpowers = np.array(laserpowers, dtype=float)
laserwavelength_nm = np.array(laserwavelength_nm, dtype=str)
# finds maximum active power
power = laserpowers[laseractive == True].max()
laser_index = np.where(laserpowers == power)
laser = laserwavelength_nm[laser_index][0]
else:
laser = "White Light"
file_name = path.split("\\")[-1]
data = [path, file_name, posX, posY, posZ, laser, timestamp]
files.loc[len(files)] = [path, file_name, folder, parent_folder, posX, posY, posZ, laser, timestamp]
files[["posX", "posY", "posZ"]] = files[["posX", "posY", "posZ"]].round(decimals=1)
files = files.sort_values(by=['posX', 'posY', 'timestamp', 'laser'], ascending=True)
files = files.reset_index(drop=True)
files["aquisition"] = 0
positions = files[['posX', 'posY']].drop_duplicates()
channels = files["laser"].drop_duplicates().to_list()
acquisition = 0
lasers = []
for i in range(len(positions)):
posX = positions["posX"].iloc[i]
posY = positions["posY"].iloc[i]
data = files[(files["posX"] == posX) & (files["posY"] == posY)]
indicies = data.index.values
for index in indicies:
laser = files.at[index, 'laser']
if laser in lasers:
acquisition += 1
lasers = [laser]
else:
lasers.append(laser)
files.at[index, 'aquisition'] = acquisition
num_measurements = len(files.aquisition.unique())
import_limit = 10
if import_limit == "None":
import_limit = num_measurements
else:
if int(import_limit) > num_measurements:
import_limit = num_measurements
acquisitions = files.aquisition.unique()[:int(import_limit)]
files = files[files['aquisition'] <= acquisitions[-1]]
folder, parent_folder = get_folder(files)
files["folder"] = folder
files["parent_folder"] = parent_folder
measurements = files.groupby(by=['aquisition'])
channels = files["laser"].drop_duplicates().to_list()
channel_num = str(len(files["laser"].unique()))
print("Found " + str(len(measurements)) + " measurments in NIM Folder with " + channel_num + " channels.")
return files
path = r"\\CMDAQ4.physics.ox.ac.uk\AKGroup\Alison\20220227_multipleabx\cam2"
files = glob(path + "\**\*.tif")
files = read_nim_directory(path)
# # path = files[0]
# # image = tifffile.imread(path)
# for path in files:
# with tifffile.TiffFile(path) as tif:
# tif_tags = {}
# for tag in tif.pages[0].tags.values():
# name, value = tag.name, tag.value
# tif_tags[name] = value
# if "ImageDescription" in tif_tags:
# metadata = tif.pages[0].tags["ImageDescription"].value
# metadata = json.loads(metadata)
# else:
# print(path)
| [
"glob2.glob",
"tifffile.TiffFile",
"json.loads",
"numpy.where",
"os.path.isfile",
"numpy.array",
"pandas.DataFrame",
"os.path.abspath"
] | [((5494, 5520), 'glob2.glob', 'glob', (["(path + '\\\\**\\\\*.tif')"], {}), "(path + '\\\\**\\\\*.tif')\n", (5498, 5520), False, 'from glob2 import glob\n'), ((1575, 1695), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['path', 'file_name', 'folder', 'parent_folder', 'posX', 'posY', 'posZ',\n 'laser', 'timestamp']"}), "(columns=['path', 'file_name', 'folder', 'parent_folder',\n 'posX', 'posY', 'posZ', 'laser', 'timestamp'])\n", (1587, 1695), True, 'import pandas as pd\n'), ((1200, 1224), 'os.path.abspath', 'os.path.abspath', (['path[0]'], {}), '(path[0])\n', (1215, 1224), False, 'import os\n'), ((2047, 2068), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (2062, 2068), False, 'import os\n'), ((1237, 1257), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1251, 1257), False, 'import os\n'), ((1340, 1383), 'glob2.glob', 'glob', (["(path + '*\\\\**\\\\*.tif')"], {'recursive': '(True)'}), "(path + '*\\\\**\\\\*.tif', recursive=True)\n", (1344, 1383), False, 'from glob2 import glob\n'), ((2242, 2265), 'tifffile.TiffFile', 'tifffile.TiffFile', (['path'], {}), '(path)\n', (2259, 2265), False, 'import tifffile\n'), ((2589, 2609), 'json.loads', 'json.loads', (['metadata'], {}), '(metadata)\n', (2599, 2609), False, 'import json\n'), ((2965, 2998), 'numpy.array', 'np.array', (['laseractive'], {'dtype': 'bool'}), '(laseractive, dtype=bool)\n', (2973, 2998), True, 'import numpy as np\n'), ((3029, 3063), 'numpy.array', 'np.array', (['laserpowers'], {'dtype': 'float'}), '(laserpowers, dtype=float)\n', (3037, 3063), True, 'import numpy as np\n'), ((3101, 3140), 'numpy.array', 'np.array', (['laserwavelength_nm'], {'dtype': 'str'}), '(laserwavelength_nm, dtype=str)\n', (3109, 3140), True, 'import numpy as np\n'), ((3289, 3319), 'numpy.where', 'np.where', (['(laserpowers == power)'], {}), '(laserpowers == power)\n', (3297, 3319), True, 'import numpy as np\n'), ((2128, 2149), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (2143, 2149), False, 'import os\n'), ((2190, 2211), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (2205, 2211), False, 'import os\n')] |
import numpy as np
import datetime
from ..nets.lstm_network import ActorCritic
import torch
import torch.optim as optim
from tqdm import trange
from tensorboardX import SummaryWriter
class Agent(object):
def __init__(self, agent_name, input_channels, network_parameters, ppo_parameters=None, n_actions=3):
self.name = agent_name
self.save_path = 'agents/' + self.name + '/'
self.n_actions = n_actions
self.input_channels = input_channels
self.action_space = [i for i in range(n_actions)]
if ppo_parameters:
self.gamma = float(ppo_parameters['GAMMA'])
self.lam = float(ppo_parameters['GAE_LAMBDA'])
self.alpha = float(ppo_parameters['LEARNING_RATE'])
self.ppo_epsilon = float(ppo_parameters['PPO_EPSILON'])
self.entropy_beta = float(ppo_parameters['ENTROPY_BETA'])
self.minibatch_size = int(ppo_parameters['MINI_BATCH_SIZE'])
self.ppo_epochs = int(ppo_parameters['PPO_EPOCHS'])
self.critic_discount = float(ppo_parameters['CRITIC_DISCOUNT'])
self.training_sequence_length = 4
self.writer = SummaryWriter(logdir=self.save_path +'logs')
self.epochs_trained = 0
# Autodetect CUDA
use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
print('Device:', self.device, '\n')
self.network = ActorCritic(input_channels=self.input_channels, n_actions=self.n_actions, parameters=network_parameters).to(self.device)
print(self.network, '\n')
if ppo_parameters:
self.optimizer = optim.Adam(self.network.parameters(), lr=self.alpha)
def choose_action(self, observation, hidden):
dist, value, hidden = self.network(observation, hidden)
action = dist.sample()
log_prob = dist.log_prob(action)
return action, log_prob, value, hidden
def compute_gae(self, next_value, rewards, masks, values):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + self.gamma * values[step + 1] * masks[step] - values[step]
gae = delta + self.gamma * self.lam * masks[step] * gae
returns.insert(0, gae + values[step])
return returns
def ppo_iter(self, states, actions, log_probs, returns, advantages, hiddens):
batch_size = states.size(0)
(hiddens_0, hiddens_1) = hiddens
for _ in range(batch_size // self.minibatch_size):
rand_ids = np.random.randint(0, batch_size-9, self.minibatch_size)
yield [(states[rand_ids+i*3, :], actions[rand_ids+i*3], log_probs[rand_ids+i*3], returns[rand_ids+i*3, :], advantages[rand_ids+i*3, :]) for i in range(self.training_sequence_length)], (hiddens_0[rand_ids, :], hiddens_1[rand_ids])
def learn(self, frame_idx, states, actions, log_probs, returns, advantages, hiddens):
count_steps = 0
sum_returns = 0.0
sum_advantage = 0.0
sum_loss_actor = 0.0
sum_loss_critic = 0.0
sum_entropy = 0.0
sum_loss_total = 0.0
t = trange(self.ppo_epochs, desc=f'{self.name} is learning', unit='update', leave=False)
for _ in t:
for seq, hidden in self.ppo_iter(states, actions, log_probs, returns, advantages, hiddens):
loss = torch.zeros([]).to(self.device)
for (state, action, old_log_probs, return_, advantage) in seq:
dist, value, hidden = self.network(state, hidden, grads=True)
entropy = dist.entropy().mean()
new_log_probs = dist.log_prob(action)
ratio = (new_log_probs - old_log_probs).exp()
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - self.ppo_epsilon, 1.0 + self.ppo_epsilon) * advantage
actor_loss = - torch.min(surr1, surr2).mean()
critic_loss = (return_ - value).pow(2).mean()
loss += self.critic_discount * critic_loss + actor_loss - self.entropy_beta * entropy
loss/=self.training_sequence_length
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
sum_returns += return_.mean()
sum_advantage += advantage.mean()
sum_loss_actor += actor_loss
sum_loss_critic += critic_loss
sum_loss_total += loss
sum_entropy += entropy
count_steps+=1
self.writer.add_scalar("returns", sum_returns / count_steps, frame_idx)
self.writer.add_scalar("advantage", sum_advantage / count_steps, frame_idx)
self.writer.add_scalar("loss_actor", sum_loss_actor / count_steps, frame_idx)
self.writer.add_scalar("loss_critic", sum_loss_critic / count_steps, frame_idx)
self.writer.add_scalar("entropy", sum_entropy / count_steps, frame_idx)
self.writer.add_scalar("loss_total", sum_loss_total / count_steps, frame_idx)
self.writer.flush()
def save_model(self):
torch.save(self.network.state_dict(), self.save_path + 'saved_model/network_weights.pt')
torch.save(self.optimizer.state_dict(), self.save_path + 'saved_model/optimizer_weights.pt')
def load_model(self, testing=False):
self.network.load_state_dict(torch.load(self.save_path + 'saved_model/network_weights.pt'))
if not testing:
self.optimizer.load_state_dict(torch.load(self.save_path + 'saved_model/optimizer_weights.pt'))
print('Brain succesfully loaded\n') | [
"tensorboardX.SummaryWriter",
"torch.load",
"torch.min",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.zeros",
"tqdm.trange",
"torch.clamp",
"torch.device"
] | [((1304, 1329), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1327, 1329), False, 'import torch\n'), ((1352, 1395), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (1364, 1395), False, 'import torch\n'), ((3243, 3331), 'tqdm.trange', 'trange', (['self.ppo_epochs'], {'desc': 'f"""{self.name} is learning"""', 'unit': '"""update"""', 'leave': '(False)'}), "(self.ppo_epochs, desc=f'{self.name} is learning', unit='update',\n leave=False)\n", (3249, 3331), False, 'from tqdm import trange\n'), ((1172, 1217), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'logdir': "(self.save_path + 'logs')"}), "(logdir=self.save_path + 'logs')\n", (1185, 1217), False, 'from tensorboardX import SummaryWriter\n'), ((2641, 2698), 'numpy.random.randint', 'np.random.randint', (['(0)', '(batch_size - 9)', 'self.minibatch_size'], {}), '(0, batch_size - 9, self.minibatch_size)\n', (2658, 2698), True, 'import numpy as np\n'), ((5557, 5618), 'torch.load', 'torch.load', (["(self.save_path + 'saved_model/network_weights.pt')"], {}), "(self.save_path + 'saved_model/network_weights.pt')\n", (5567, 5618), False, 'import torch\n'), ((5687, 5750), 'torch.load', 'torch.load', (["(self.save_path + 'saved_model/optimizer_weights.pt')"], {}), "(self.save_path + 'saved_model/optimizer_weights.pt')\n", (5697, 5750), False, 'import torch\n'), ((3475, 3490), 'torch.zeros', 'torch.zeros', (['[]'], {}), '([])\n', (3486, 3490), False, 'import torch\n'), ((3921, 3987), 'torch.clamp', 'torch.clamp', (['ratio', '(1.0 - self.ppo_epsilon)', '(1.0 + self.ppo_epsilon)'], {}), '(ratio, 1.0 - self.ppo_epsilon, 1.0 + self.ppo_epsilon)\n', (3932, 3987), False, 'import torch\n'), ((4037, 4060), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (4046, 4060), False, 'import torch\n')] |
import numpy as np
from torch.utils.data import Dataset
class GraphDataset(Dataset):
def __init__(self, node_attributes, adj_matrices, labels):
super(GraphDataset, self).__init__()
num_nodes = []
for adj_matrix in adj_matrices:
num_nodes.append(adj_matrix.shape[0])
self.max_size = max(num_nodes)
# Padd data
self.adj_matrices = self.pad_adj_matrices(adj_matrices)
self.node_attributes = self.pad_node_attributes(node_attributes)
self.target = [np.array([label]) for label in labels]
self.num_features = self.node_attributes[0].shape[1]
def pad_adj_matrices(self, adj_matrices):
padded_adj_matrices = []
for adj_matrix in adj_matrices:
num_nodes = adj_matrix.shape[0]
padded_adj_matrix = np.zeros((self.max_size, self.max_size))
padded_adj_matrix[:num_nodes, :num_nodes] = adj_matrix
padded_adj_matrices.append(padded_adj_matrix)
return padded_adj_matrices
def pad_node_attributes(self, node_attributes):
padded_node_attributes = []
for attributes in node_attributes:
padded_attributes = np.zeros((self.max_size, attributes.shape[1]))
padded_attributes[:attributes.shape[0],
:attributes.shape[1]] = attributes
padded_node_attributes.append(padded_attributes)
return padded_node_attributes
def __len__(self):
return len(self.target)
def __getitem__(self, index):
sample = {'adj_matrix': self.adj_matrices[index].astype('float32'),
'node_feature_matrix':
self.node_attributes[index].astype('float32'),
'labels': self.target[index].astype('float32')}
return sample
| [
"numpy.array",
"numpy.zeros"
] | [((531, 548), 'numpy.array', 'np.array', (['[label]'], {}), '([label])\n', (539, 548), True, 'import numpy as np\n'), ((828, 868), 'numpy.zeros', 'np.zeros', (['(self.max_size, self.max_size)'], {}), '((self.max_size, self.max_size))\n', (836, 868), True, 'import numpy as np\n'), ((1195, 1241), 'numpy.zeros', 'np.zeros', (['(self.max_size, attributes.shape[1])'], {}), '((self.max_size, attributes.shape[1]))\n', (1203, 1241), True, 'import numpy as np\n')] |
import re
import os
import sys
import random
import argparse
from datetime import datetime
import spacy
import msgpack, time
import numpy as np
import multiprocessing
import unicodedata
import collections
import torch
from torch.autograd import Variable
from apip import utils
from apip.model import DocReaderModel
parser = argparse.ArgumentParser(
description='Train a Document Reader model.'
)
parser = utils.add_arguments(parser)
args = parser.parse_args()
if not args.drop_nn:
args.dropout_rate = 0.
squad_dir = 'SQuAD'
if args.squad == 2:
squad_dir = 'SQuAD2'
docs = ['Maria had a big lunch today. She ate a sandwich. Maria ate a salad with coffee. Finally, she wandered into a store and ate an ice cream.',\
'Parrot have learned how to reproduce human language. The Bird speaks Japanese now. In fact, the parrot speaks Russian too. And of course, british owner taught this bird how to speak English.',\
'Manager was late for work and his boss was angry about it. It is because at first manager went to a bank. Then manager went to a friends house. Eventually, a manager went to the cafe.',
'It is well known that dry air is mainly made up of nitrogen (78.09%) and oxygen (20.95%). However, many of us could not imagine that the rest of dry air is made of argon, carbon dioxide and other trace gases (0.94%).',
'Africa has varied array of wild animals. The giraffes, the world\'s tallest animal, inhabit Africa. Also African elephants live here. The world\'s fastest land mammal, the cheetah, lives in Africa too.',
'German language is wide spread in Europe. Obviously, it is mainly spoken in Germany. Moreover, it is one of the used languges in Switzerland and Austria as well.',
'Town A is located 150 km away from town B. The towns are connected via rail system. A journey between these towns takes around 1 hour by train.',
'Pulp Fiction is a an American crime film by <NAME>. In the movie <NAME> played Mia. Another main role was given to <NAME>. And lastly, <NAME> also played in the movie and it elevated his career.',
'Bob keeps his postage marks in a case that is green colored. He have been collecting this marks since his childhood. The case is made of wood. The notable thing about it is that it is carved with waves.',
'Alice was listening to Beatles yesterday. It was a sunny day, and the song \"Come Together\" fitted perfectly. Indeed, that song was very cheerful and bright.',
]
ques = ['What did Maria eat for lunch ?',\
'What languages does parrot speak?',\
'Where did manager go before the work?',
'What does dry air comprise?',
'What animals live in Africa?',
'In which countries German language is spoken?',
'How far are the two towns from each other?',
'Who took a part in the movie?',
'How does the Bob\'s case look like?',
'What kind of song was Alisce listening?']
ans = [['sandwich', 'salad with coffee', 'ice cream'],\
['Japanese', 'Russian', 'English'],\
['bank', 'friend house', 'cafe'],
['nitrogen', 'oxygen', 'argon', 'carbon dioxide', 'trace gases'],
['giraffes', 'elephants', 'cheetah'],
['Germany', 'Switzerland', 'Austria'],
['150 km', '1 hour by train'],
['Thurman', 'Jackson', 'Travolta'],
['wood', 'green', 'carved with waves'],
['Come Together', 'cheerful', 'bright']]
args.batch_size = len(docs)
def pre_proc(text):
'''normalize spaces in a string.'''
text = re.sub('\s+', ' ', text)
return text
def token2id(docs, vocab, unk_id=None):
w2id = {w: i for i, w in enumerate(vocab)}
ids = [[w2id[w] if w in w2id else unk_id for w in doc] for doc in docs]
return ids
def normalize_text(text):
return unicodedata.normalize('NFD', text)
def load_data(opt, args, contexts, questions, answers):
# max q len = 60
# max c len = 767
if opt['squad'] == 1:
squad_dir = 'SQuAD'
else:
squad_dir = 'SQuAD2'
with open(os.path.join(squad_dir, 'meta.msgpack'), 'rb') as f:
meta = msgpack.load(f, encoding='utf8')
vocab = meta['vocab']
vocab_ent = meta['vocab_ent']
ids_word = {i:w for i,w in enumerate(vocab)}
embedding = torch.Tensor(meta['embedding'])
opt['pretrained_words'] = True
opt['vocab_size'] = embedding.size(0)
opt['embedding_dim'] = embedding.size(1)
if not opt['fix_embeddings']:
embedding[1] = torch.normal(means=torch.zeros(opt['embedding_dim']), std=1.)
nlp = spacy.load('en')
context_text = [pre_proc(c) for c in contexts]
question_text = [pre_proc(q) for q in questions]
threads = multiprocessing.cpu_count()
context_docs = [doc for doc in nlp.pipe(
iter(context_text), batch_size=64, n_threads=threads)]
question_docs = [doc for doc in nlp.pipe(
iter(question_text), batch_size=64, n_threads=threads)]
question_tokens = [[normalize_text(w.text) for w in doc] for doc in question_docs]
context_tokens = [[normalize_text(w.text) for w in doc] for doc in context_docs]
context_token_span = [[(w.idx, w.idx + len(w.text)) for w in doc] for doc in context_docs]
context_tags = [[w.tag_ for w in doc] for doc in context_docs]
context_ents = [[w.ent_type_ for w in doc] for doc in context_docs]
context_features = []
for question, context in zip(question_docs, context_docs):
question_word = {w.text for w in question}
question_lower = {w.text.lower() for w in question}
question_lemma = {w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower() for w in question}
match_origin = [w.text in question_word for w in context]
match_lower = [w.text.lower() in question_lower for w in context]
match_lemma = [(w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower()) in question_lemma for w in context]
context_features.append(list(zip(match_origin, match_lower, match_lemma)))
log.info('tokens generated')
question_ids = token2id(question_tokens, vocab, unk_id=1)
context_ids = token2id(context_tokens, vocab, unk_id=1)
context_tf = []
for doc in context_tokens:
counter_ = collections.Counter(w.lower() for w in doc)
total = sum(counter_.values())
context_tf.append([counter_[w.lower()] / total for w in doc])
context_features = [[list(w) + [tf] for w, tf in zip(doc, tfs)] for doc, tfs in
zip(context_features, context_tf)]
context_tags = [[w.tag_ for w in doc] for doc in context_docs]
vocab_tag = list(nlp.tagger.tag_names)
context_tag_ids = token2id(context_tags, vocab_tag)
context_ent_ids = token2id(context_ents, vocab_ent)
ans_exists = [1]*len(contexts)
dev = list(zip(
context_ids,
context_features,
context_tag_ids,
context_ent_ids,
question_ids,
ans_exists,
context_text,
context_token_span
))
dev_y = answers
return dev, dev_y, embedding, opt
# set model dir
model_dir = args.model_dir
os.makedirs(model_dir, exist_ok=True)
model_dir = os.path.abspath(model_dir)
timestamp = time.strftime("%mm%dd_%H%M%S")
print("timestamp {}".format(timestamp))
current_dir = os.path.join(args.model_dir, timestamp)
os.makedirs(current_dir)
torch.set_printoptions(precision=10)
# save model configuration
s = "\nParameters:\n"
for k in sorted(args.__dict__):
s += "{} = {} \n".format(k, args.__dict__[k])
with open(os.path.join(args.model_dir, timestamp, "about.txt"),"w") as txtf:
txtf.write(s); print(s)
# set random seed
seed = args.seed if args.seed >= 0 else int(random.random()*1000)
print ('seed:', seed)
random.seed(seed)
torch.manual_seed(seed)
if args.cuda:
torch.cuda.manual_seed(seed)
log = utils.setup_logger(__name__, os.path.join(current_dir,args.log_file))
def main():
log.info('[program starts.]')
dev, dev_y, embedding, opt = load_data(vars(args), args, docs, ques, ans)
log.info('[Data loaded.ql_mask]')
if args.resume:
log.info('[loading previous model...]')
checkpoint = torch.load(os.path.join(model_dir, args.restore_dir, args.resume))
if args.resume_options:
opt = checkpoint['config']
state_dict = checkpoint['state_dict']
model = DocReaderModel(opt, embedding, state_dict)
else:
raise RuntimeError('Include checkpoint of the trained model')
if args.cuda:
model.cuda()
outputs = ""
# evaluate restored model
model.opt['interpret'] = False
batches = utils.BatchGen(dev, batch_size=100, evaluation=True, gpu=args.cuda)
predictions = []
for i, batch in enumerate(batches):
predictions.extend(model.predict(batch)[0])
em, f1 = utils.score(predictions, dev_y)
log.info("[dev EM: {} F1: {}]".format(em, f1))
outputs += "[dev EM: {} F1: {}]\n".format(em, f1)
with open(os.path.join(squad_dir,'meta.msgpack'), 'rb') as f:
meta = msgpack.load(f, encoding='utf8')
vocab = meta['vocab']
ids_word = {i:w for i,w in enumerate(vocab)}
def to_text(inp):
s = ""
for ids in inp.numpy():
s += ids_word[ids] + " "
return s
batches = utils.BatchGen(dev, batch_size=len(docs), evaluation=True, gpu=args.cuda)
for i, batch in enumerate(batches):
model.opt['interpret'] = False
# collect predicted answers for various interpretations
predictions, acts = model.predict_inter(batch)[:2]
truth = np.take(dev_y, batches.indices[i], 0)
for b in range(len(predictions)):
em_v, f1_v = utils.score([predictions[b]], [truth[b]])
log.warn("b={0} a={1} EM: {2:.3f} F1: {3:3f}".format(b, acts[b], em_v, f1_v))
model.opt['interpret'] = True
i_predictions = []
for a in range(args.n_actions):
latent_a = Variable(torch.ones(args.batch_size)*a).long().cuda()
i_predictions.append(model.predict_inter(batch, latent_a=latent_a)[0])
for b in range(args.batch_size):
f1s = []
for a in range(args.n_actions):
em_v, f1_v = utils.score([i_predictions[a][b]], [truth[b]])
f1s.append(f1_v)
outputs += batch[-2][b] + '\n' + to_text(batch[5][b]) + '\n'
outputs += "pred_a={} truth={}".format(acts[b], truth[b]) + '\n'
for a in range(args.n_actions):
outputs += i_predictions[a][b] + '\n'
outputs += '\n'
print(outputs)
if __name__ == '__main__':
main()
| [
"apip.model.DocReaderModel",
"multiprocessing.cpu_count",
"argparse.ArgumentParser",
"torch.set_printoptions",
"spacy.load",
"apip.utils.add_arguments",
"apip.utils.score",
"numpy.take",
"random.random",
"unicodedata.normalize",
"msgpack.load",
"torch.Tensor",
"re.sub",
"torch.manual_seed"... | [((328, 397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a Document Reader model."""'}), "(description='Train a Document Reader model.')\n", (351, 397), False, 'import argparse\n'), ((413, 440), 'apip.utils.add_arguments', 'utils.add_arguments', (['parser'], {}), '(parser)\n', (432, 440), False, 'from apip import utils\n'), ((7094, 7131), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (7105, 7131), False, 'import os\n'), ((7144, 7170), 'os.path.abspath', 'os.path.abspath', (['model_dir'], {}), '(model_dir)\n', (7159, 7170), False, 'import os\n'), ((7183, 7213), 'time.strftime', 'time.strftime', (['"""%mm%dd_%H%M%S"""'], {}), "('%mm%dd_%H%M%S')\n", (7196, 7213), False, 'import msgpack, time\n'), ((7268, 7307), 'os.path.join', 'os.path.join', (['args.model_dir', 'timestamp'], {}), '(args.model_dir, timestamp)\n', (7280, 7307), False, 'import os\n'), ((7308, 7332), 'os.makedirs', 'os.makedirs', (['current_dir'], {}), '(current_dir)\n', (7319, 7332), False, 'import os\n'), ((7333, 7369), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': '(10)'}), '(precision=10)\n', (7355, 7369), False, 'import torch\n'), ((7712, 7729), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7723, 7729), False, 'import random\n'), ((7730, 7753), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (7747, 7753), False, 'import torch\n'), ((3552, 3577), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'text'], {}), "('\\\\s+', ' ', text)\n", (3558, 3577), False, 'import re\n'), ((3810, 3844), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (3831, 3844), False, 'import unicodedata\n'), ((4280, 4311), 'torch.Tensor', 'torch.Tensor', (["meta['embedding']"], {}), "(meta['embedding'])\n", (4292, 4311), False, 'import torch\n'), ((4565, 4581), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (4575, 4581), False, 'import spacy\n'), ((4701, 4728), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4726, 4728), False, 'import multiprocessing\n'), ((7772, 7800), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (7794, 7800), False, 'import torch\n'), ((7837, 7877), 'os.path.join', 'os.path.join', (['current_dir', 'args.log_file'], {}), '(current_dir, args.log_file)\n', (7849, 7877), False, 'import os\n'), ((8594, 8661), 'apip.utils.BatchGen', 'utils.BatchGen', (['dev'], {'batch_size': '(100)', 'evaluation': '(True)', 'gpu': 'args.cuda'}), '(dev, batch_size=100, evaluation=True, gpu=args.cuda)\n', (8608, 8661), False, 'from apip import utils\n'), ((8788, 8819), 'apip.utils.score', 'utils.score', (['predictions', 'dev_y'], {}), '(predictions, dev_y)\n', (8799, 8819), False, 'from apip import utils\n'), ((4121, 4153), 'msgpack.load', 'msgpack.load', (['f'], {'encoding': '"""utf8"""'}), "(f, encoding='utf8')\n", (4133, 4153), False, 'import msgpack, time\n'), ((7511, 7563), 'os.path.join', 'os.path.join', (['args.model_dir', 'timestamp', '"""about.txt"""'], {}), "(args.model_dir, timestamp, 'about.txt')\n", (7523, 7563), False, 'import os\n'), ((8331, 8373), 'apip.model.DocReaderModel', 'DocReaderModel', (['opt', 'embedding', 'state_dict'], {}), '(opt, embedding, state_dict)\n', (8345, 8373), False, 'from apip.model import DocReaderModel\n'), ((9007, 9039), 'msgpack.load', 'msgpack.load', (['f'], {'encoding': '"""utf8"""'}), "(f, encoding='utf8')\n", (9019, 9039), False, 'import msgpack, time\n'), ((9550, 9587), 'numpy.take', 'np.take', (['dev_y', 'batches.indices[i]', '(0)'], {}), '(dev_y, batches.indices[i], 0)\n', (9557, 9587), True, 'import numpy as np\n'), ((4053, 4092), 'os.path.join', 'os.path.join', (['squad_dir', '"""meta.msgpack"""'], {}), "(squad_dir, 'meta.msgpack')\n", (4065, 4092), False, 'import os\n'), ((7668, 7683), 'random.random', 'random.random', ([], {}), '()\n', (7681, 7683), False, 'import random\n'), ((8142, 8196), 'os.path.join', 'os.path.join', (['model_dir', 'args.restore_dir', 'args.resume'], {}), '(model_dir, args.restore_dir, args.resume)\n', (8154, 8196), False, 'import os\n'), ((8940, 8979), 'os.path.join', 'os.path.join', (['squad_dir', '"""meta.msgpack"""'], {}), "(squad_dir, 'meta.msgpack')\n", (8952, 8979), False, 'import os\n'), ((9655, 9696), 'apip.utils.score', 'utils.score', (['[predictions[b]]', '[truth[b]]'], {}), '([predictions[b]], [truth[b]])\n', (9666, 9696), False, 'from apip import utils\n'), ((4511, 4544), 'torch.zeros', 'torch.zeros', (["opt['embedding_dim']"], {}), "(opt['embedding_dim'])\n", (4522, 4544), False, 'import torch\n'), ((10188, 10234), 'apip.utils.score', 'utils.score', (['[i_predictions[a][b]]', '[truth[b]]'], {}), '([i_predictions[a][b]], [truth[b]])\n', (10199, 10234), False, 'from apip import utils\n'), ((9924, 9951), 'torch.ones', 'torch.ones', (['args.batch_size'], {}), '(args.batch_size)\n', (9934, 9951), False, 'import torch\n')] |
from abc import ABC, abstractmethod
import numpy as np
class Correlation(ABC):
"""
Abstract base class of all Correlations. Serves as a template for creating new Kriging correlation
functions.
"""
@abstractmethod
def c(self, x, s, params, dt=False, dx=False):
"""
Abstract method that needs to be implemented by the user when creating a new Correlation function.
"""
pass
@staticmethod
def check_samples_and_return_stack(x, s):
x_, s_ = np.atleast_2d(x), np.atleast_2d(s)
# Create stack matrix, where each block is x_i with all s
stack = np.tile(
np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)
) - np.tile(s_, (np.size(x_, 0), 1, 1))
return stack
@staticmethod
def derivatives(x_, s_, params):
stack = Correlation.check_samples_and_return_stack(x_, s_)
# Taking stack and creating array of all thetaj*dij
after_parameters = params * abs(stack)
# Create matrix of all ones to compare
comp_ones = np.ones((np.size(x_, 0), np.size(s_, 0), np.size(s_, 1)))
# zeta_matrix has all values min{1,theta*dij}
zeta_matrix_ = np.minimum(after_parameters, comp_ones)
# Copy zeta_matrix to another matrix that will used to find where derivative should be zero
indices = zeta_matrix_.copy()
# If value of min{1,theta*dij} is 1, the derivative should be 0.
# So, replace all values of 1 with 0, then perform the .astype(bool).astype(int)
# operation like in the linear example, so you end up with an array of 1's where
# the derivative should be caluclated and 0 where it should be zero
indices[indices == 1] = 0
# Create matrix of all |dij| (where non zero) to be used in calculation of dR/dtheta
dtheta_derivs_ = indices.astype(bool).astype(int) * abs(stack)
# Same as above, but for matrix of all thetaj where non-zero
dx_derivs_ = indices.astype(bool).astype(int) * params * np.sign(stack)
return zeta_matrix_, dtheta_derivs_, dx_derivs_
| [
"numpy.atleast_2d",
"numpy.minimum",
"numpy.size",
"numpy.sign",
"numpy.atleast_3d"
] | [((1213, 1252), 'numpy.minimum', 'np.minimum', (['after_parameters', 'comp_ones'], {}), '(after_parameters, comp_ones)\n', (1223, 1252), True, 'import numpy as np\n'), ((513, 529), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (526, 529), True, 'import numpy as np\n'), ((531, 547), 'numpy.atleast_2d', 'np.atleast_2d', (['s'], {}), '(s)\n', (544, 547), True, 'import numpy as np\n'), ((2050, 2064), 'numpy.sign', 'np.sign', (['stack'], {}), '(stack)\n', (2057, 2064), True, 'import numpy as np\n'), ((1087, 1101), 'numpy.size', 'np.size', (['x_', '(0)'], {}), '(x_, 0)\n', (1094, 1101), True, 'import numpy as np\n'), ((1103, 1117), 'numpy.size', 'np.size', (['s_', '(0)'], {}), '(s_, 0)\n', (1110, 1117), True, 'import numpy as np\n'), ((1119, 1133), 'numpy.size', 'np.size', (['s_', '(1)'], {}), '(s_, 1)\n', (1126, 1133), True, 'import numpy as np\n'), ((663, 680), 'numpy.atleast_3d', 'np.atleast_3d', (['x_'], {}), '(x_)\n', (676, 680), True, 'import numpy as np\n'), ((693, 707), 'numpy.size', 'np.size', (['s_', '(0)'], {}), '(s_, 0)\n', (700, 707), True, 'import numpy as np\n'), ((737, 751), 'numpy.size', 'np.size', (['x_', '(0)'], {}), '(x_, 0)\n', (744, 751), True, 'import numpy as np\n')] |
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import resnet
fX = theano.config.floatX
def test_zero_last_axis_partition_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(None,)),
resnet._ZeroLastAxisPartitionNode("z", zero_ratio=0.5, axis=0)]
).network()
fn = network.function(["i"], ["s"])
x = np.arange(10).astype(fX)
ans = x.copy()
ans[5:] = 0
np.testing.assert_allclose(ans, fn(x)[0])
| [
"treeano.sandbox.nodes.resnet._ZeroLastAxisPartitionNode",
"treeano.nodes.InputNode",
"numpy.arange"
] | [((464, 477), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (473, 477), True, 'import numpy as np\n'), ((293, 325), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': '(None,)'}), "('i', shape=(None,))\n", (305, 325), True, 'import treeano.nodes as tn\n'), ((336, 398), 'treeano.sandbox.nodes.resnet._ZeroLastAxisPartitionNode', 'resnet._ZeroLastAxisPartitionNode', (['"""z"""'], {'zero_ratio': '(0.5)', 'axis': '(0)'}), "('z', zero_ratio=0.5, axis=0)\n", (369, 398), False, 'from treeano.sandbox.nodes import resnet\n')] |
"""Random select substitution; save substituted structure and JSON info"""
import warnings
warnings.simplefilter('ignore')
import errno
import functools
import glob
import math
import os
import random
import re
import signal
import sys
import numpy as np
import pandas as pd
import pymatgen
import shry
from ase import Atoms
from ase.io import read, write
from ase.spacegroup import Spacegroup
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.io.cif import CifParser
from pymatgen.util.string import formula_double_format
shry.const.DISABLE_PROGRESSBAR = True
from shry.core import (NeedSupercellError, PatchedSpacegroupAnalyzer,
Substitutor, TooBigError)
from shry.main import LabeledStructure
CONFIG_IRREDUCIBLE_MAX = 3e3
CONFIG_IRREDUCIBLE_MIN = 1e1
TOTAL_SUBSTITUTION_MIN = 1e1
TOTAL_SUBSTITUTION_MAX = 1e4
CONFIG_TIMEOUT = 120
CONFIG_CELL_MAX = 500
CONFIG_CELLVEC_MIN = 1
CONFIG_CELLVEC_MAX = 3
CONFIG_RETRY_N = 500
MAX_SUBBED_MIN=3 # default 3
MAX_NSPIECIES_MIN=4 # default 4
SHRY_TOLERANCE=0.01 #angstrom
SHRY_ANGLE_TOLERANCE=5.0 #degree
# "Manual" periodic table for searching element within the same group
# (just so that it looks like make sense)
PT = [
"H,Li,Na,K,Rb,Cs,Fr".split(","), # Lazy writing
"D,Li,Na,K,Rb,Cs,Fr".split(","), # Workaround for Deuterium
",Be,Mg,Ca,Sr,Ba,Ra".split(","),
",,,Sc,Y,Lu,Lr".split(","),
",,,Ti,Zr,Hf,Rf".split(","),
",,,V,Nb,Ta,Db".split(","),
",,,Cr,Mo,W,Sg".split(","),
",,,Mn,Tc,Re,Bh".split(","),
",,,Fe,Ru,Os,Hs".split(","),
",,,Co,Rh,Ir,Mt".split(","),
",,,Ni,Pd,Pt,Ds".split(","),
",,,Cu,Ag,Au,Rg".split(","),
",,,Zn,Cd,Hg,Cn".split(","),
",B,Al,Ga,In,Tl,Nh".split(","),
",C,Si,Ge,Sn,Pb,Fl".split(","),
",N,P,As,Sb,Bi,Mc".split(","),
",O,S,Se,Te,Po,Lv".split(","),
",F,Cl,Br,I,At,Ts".split(","),
"He,Ne,Ar,Kr,Xe,Rn,Og".split(","),
# Here I just grouped like this; but anyway combinatorially identical
"La,Ce,Pr,Nd,Pm,Sm,Eu,Gd,Tb,Dy,Ho,Er,Tm,Yb".split(
","
), # Lu omitted because it's there
"Ac,Th,Pa,U,Np,Pu,Am,Cm,Bk,Cf,Es,Fm,Md,No".split(","), # Lr is up there
]
PTDF = pd.DataFrame(PT, dtype=str).T
PTDF.fillna("", inplace=True)
PTDF = PTDF.convert_dtypes()
# Copied from SHRY
# Operates on chemical formula
COMPONENT = re.compile(r"[A-z][a-z]*[0-9.\+\-]*[0-9.]*")
# Operates on single component
AMOUNT = re.compile(r"(?<=[\+\-A-Za-z])[0-9.]+(?![\+\-])")
# WARNING: Does not process string with spaces properly
SPECIES = re.compile(r"[A-Z][a-z]*")
OXSTATE = re.compile(r"[0-9][-+]*")
class TimeoutError(Exception):
pass
def timeout(seconds=CONFIG_TIMEOUT, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
@functools.wraps(func)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.setitimer(signal.ITIMER_REAL, seconds) # used timer instead of alarm
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# def random_select_from_group(number, howmuch, butnot):
def random_select_from_group(species, howmuch, exclude):
number = np.where(PTDF == species)[1][0]
group = PTDF.iloc[:, number]
# notnone = list(group[(group != "None") & (group != butnot)])
notnone = group != ""
notbutnot = functools.reduce(
lambda x, y: x & y,
[group != x for x in exclude],
[True] * len(notnone),
)
# notnone = list(group[(group != "None") & (group not in butnot)])
okgroup = list(group[notnone & notbutnot])
# return random.sample(notnone, howmuch)
if howmuch > len(okgroup):
# Contingency: select anything from anywhere
group = PTDF.to_numpy().flatten()
notnone = group != ""
notbutnot = functools.reduce(
lambda x, y: x & y,
[group != x for x in exclude],
np.ones(notnone.shape, dtype=bool),
)
okgroup = list(group[notnone & notbutnot])
return random.sample(okgroup, howmuch)
def get_oxstate(string):
ox = OXSTATE.findall(string)
if ox:
ox = ox[0]
if "-" in ox:
return -1 * int(ox.strip("+-"))
return int(ox.strip("+-"))
else:
return 0
def to_oxstate_string(number):
if number >= 0:
return f"{number}+"
return f"{abs(number)}-"
def remove_label(string):
return re.sub(OXSTATE, '', string)
def random_scale_and_substitute(cif_filename):
"""
Random select supercell and substitution
"""
@timeout()
def get_count(ct_structure):
substitutor = Substitutor(
ct_structure,
symprec=SHRY_TOLERANCE,
angle_tolerance=SHRY_ANGLE_TOLERANCE
)
return substitutor.count()
def beautiful_inted_formula(composition):
inted_element_composition = composition.inted_composition.element_composition
# Because buggy formula!
sym_amt = inted_element_composition.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(int(sym_amt[s]), False) for s in syms]
return "".join(formula)
# return composition.inted_composition.element_composition.formula.replace(
# " ", ""
# )
parser = CifParser(cif_filename)
key = list(parser.as_dict().keys())[0]
cif_dict = parser.as_dict()[key]
chem_formula = cif_dict["_chemical_formula_sum"].replace(" ", "")
structure = LabeledStructure.from_file(cif_filename)
#print(structure)
sga = PatchedSpacegroupAnalyzer(structure,
symprec=SHRY_TOLERANCE,
angle_tolerance=SHRY_ANGLE_TOLERANCE
)
if "_space_group_IT_number" in cif_dict:
cif_sg_number = cif_dict["_space_group_IT_number"]
else:
cif_sg_number = cif_dict["_symmetry_Int_Tables_number"]
assert sga.get_space_group_number() == int(cif_sg_number)
lattice_type = sga.get_lattice_type()
point_group = sga.get_point_group_symbol()
space_group_num = sga.get_space_group_number()
space_group = sga.get_space_group_symbol()
#structure_id = "icsd-" + cif_dict["_database_code_ICSD"]
structure_id = "cod-" + cif_dict["_cod_database_code"]
for attempt in range(CONFIG_RETRY_N):
# Select random supercell, that fits below max
# Brute force; not taking time anyway
print(f"---attempt={attempt}---")
natom = len(structure)
if natom <= CONFIG_CELL_MAX:
while True:
scaling_matrix = [random.randint(CONFIG_CELLVEC_MIN, CONFIG_CELLVEC_MAX) for i in range(3)]
enlargement = functools.reduce(lambda x, y: x * y, scaling_matrix)
if (enlargement * natom) <= CONFIG_CELL_MAX:
break
else:
scaling_matrix = [1, 1, 1]
enlargement = 1
print(f"scaling_matrix={scaling_matrix}")
#print([x.is_ordered for x in structure])
fully_ordered = all(x.is_ordered for x in structure)
if not fully_ordered:
print("Partial occupancies in some of the sites.")
raise ValueError
continue
"""
chk_sup_structure = structure.copy()
chk_sup_structure *= scaling_matrix
try:
substitutor = Substitutor(chk_sup_structure)
count = substitutor.count()
except (TimeoutError, TooBigError):
continue
print(f"inside fully_ordered.")
print(f"count={count}")
if count > CONFIG_IRREDUCIBLE_MAX:
print(
f"Scaling matrix {scaling_matrix} too big! "
f"({count} structures without substitution)"
)
continue
if count < CONFIG_IRREDUCIBLE_MIN:
print(
f"Scaling matrix {scaling_matrix} is not appropriate (probably too small)! "
f"({count} structures without substitution)"
)
continue
"""
# Use SymmetrizedStructure to get some properties
sym_structure = PatchedSpacegroupAnalyzer(structure, symprec=SHRY_TOLERANCE, angle_tolerance=SHRY_ANGLE_TOLERANCE).get_symmetrized_structure()
# Multiplicities of each equivalent sites
multiplicities = [len(x) for x in sym_structure.equivalent_indices]
supercell_mul = [enlargement * len(x) for x in sym_structure.equivalent_indices]
# Index of which sites has _more than one_ multiplicities
mulsites = [i for i, m in enumerate(supercell_mul) if m > 1]
# Select random amount of positions to be substituted as index
# From the index of m > 1 sites, randomly select 1-all sites to be substituted
# Cap the selected sites to 3 as more tend to break the limit!
# If no multiplicities: try other supercell
if not len(mulsites):
continue
max_subbed = min((len(mulsites), MAX_SUBBED_MIN))
#print(f"max_subbed={max_subbed}")
subbed = random.sample(mulsites, random.randint(1, max_subbed))
partitions = []
for i in subbed:
# Substitute to random N amount of final species
max_nspecies = min((supercell_mul[i], MAX_NSPIECIES_MIN))
nspecies = random.randint(2, max_nspecies) # limit to 4
#print(f"i={i}, nspecies={nspecies}")
# Random select each amount of final species
ranges = []
left = 0 # Left part of the range
pad = nspecies - 2 # Padding to ensure valid range is always selected
for _ in range(nspecies - 1): # select but the last
margin = supercell_mul[i] - pad
right = random.randrange(left + 1, margin) # at least choose one
ranges.append(range(left, right))
pad -= 1
left = right
right = supercell_mul[i] # move right
ranges.append(range(left, right))
partitions.append([len(x) for x in ranges])
# Convert the final ratios to fractions (or pymatgen will protest)
fraced = [[x / sum(p) for x in p] for p in partitions]
# Actually substitute
exclude_species = set()
exclude_species |= set(SPECIES.findall(chem_formula.replace(" ", "")))
for e, s in enumerate(subbed):
lead_i = sym_structure.equivalent_indices[s][0]
composition = sym_structure[lead_i].species
composition_str = str(composition).replace(" ", "")
# Better
components = COMPONENT.findall(composition_str)
specieses = [SPECIES.findall(x)[0] for x in components]
oxstates = [get_oxstate(x) for x in components]
oxstate_strings = [to_oxstate_string(x) for x in oxstates]
if len(components) > 1:
for species in specieses:
exclude_species -= {species}
# Just choose first one
species = specieses[0]
oxstate_string = oxstate_strings[0]
else:
species = specieses[0]
oxstate_string = oxstate_strings[0]
# Don't exclude the initial species from current position
exclude_species -= {species}
sub_species = random_select_from_group(
species, len(fraced[e]), exclude_species
)
# Perhaps (hopefully) doesn't matter
# exclude_species |= set(sub_species)
target_composition_dict = {
get_el_sp(s + oxstate_string): f for s, f in zip(sub_species, fraced[e])
}
# Use SHRY's patch to Composition
target_composition = Composition(target_composition_dict)
for i in sym_structure.equivalent_indices[s]:
sym_structure.replace(
i, target_composition, properties=sym_structure[i].properties
)
combinations = []
for p in partitions:
n_part = sum(p)
p_max = max(p)
p_max_i = p.index(p_max)
pc = p.copy()
pc.pop(p_max_i)
c = 1
n_r = n_part
for e in pc:
c *= math.comb(n_r, e)
n_r -= e
combinations.append(c)
substitutions = functools.reduce(lambda x, y: x * y, combinations)
if substitutions >= TOTAL_SUBSTITUTION_MAX:
print(f"Total substitutions = {substitutions:.3e} is too large >= {TOTAL_SUBSTITUTION_MAX:.3e}: rejected!", flush=True)
print("============================================")
# Instead return the substitution configuration
continue
elif substitutions <= TOTAL_SUBSTITUTION_MIN:
print(f"Total substitutions = {substitutions:.3e} is too small <= {TOTAL_SUBSTITUTION_MIN:.3e}: rejected!", flush=True)
print("============================================")
# Instead return the substitution configuration
continue
else:
print("TOTAL_SUBSTITUTION:")
print(f"{TOTAL_SUBSTITUTION_MIN:.3e} < {substitutions:.3e} < {TOTAL_SUBSTITUTION_MAX:.3e}", flush=True)
equivalent_labels = [
list(x[0].properties["_atom_site_label"])[0]
for x in sym_structure.equivalent_sites
]
equivalent_formulas = [
set(beautiful_inted_formula(sym_structure[i].species) for i in g)
for g in sym_structure.equivalent_indices
]
assert all(len(x) == 1 for x in equivalent_formulas)
equivalent_formulas = [list(x)[0] for x in equivalent_formulas]
wyckoffs = [
f"{remove_label(equivalent_labels[x])} ({sym_structure.wyckoff_symbols[x]}) $\\rightarrow$ {equivalent_formulas[x]}"
for x in subbed
]
sub_chem_formula = beautiful_inted_formula(sym_structure.composition)
config = {
"Compound": chem_formula,
"ID": structure_id,
"LatticeType": lattice_type,
"PointGroup": point_group,
"SpaceGroup_No": space_group_num,
"SpaceGroup": space_group,
"Compositions": sub_chem_formula,
"Wyckoffs": ", ".join(wyckoffs),
"Supercell": "x".join(map(str, scaling_matrix)),
"Substitutions": substitutions,
"Note": "autosub:success",
"Checked": None
}
try:
large_structure = sym_structure.copy()
large_structure *= scaling_matrix
print(f"get_count starts...", flush=True)
count = get_count(large_structure)
#print(f"count = {count}")
config["Equivalent Structures"] = count
except (TimeoutError, TooBigError):
# If too long, likely too big!
print("TIMEOUT/ sub. was too long => likely too big")
#print("============================================")
continue
except MemoryError:
print("MemoryError/ memory overflow => likely too big")
#print("============================================")
continue
except NeedSupercellError as e:
print(config)
print(enlargement)
print(multiplicities)
raise e
if count <= CONFIG_IRREDUCIBLE_MAX and count >= CONFIG_IRREDUCIBLE_MIN:
print(f"Expected {count:.3e} structures with this substitution: approved!")
print("============================================")
# Instead return the substitution configuration
return sym_structure, config
print(
f"Expected {count:.3e} structures with this substitution: try again! (attempt {attempt})"
)
#print("============================================")
# If all failed then return the default config: No supercell no substitution plain single structure
config = {
"Compound": chem_formula,
"ID": structure_id,
"LatticeType": lattice_type,
"PointGroup": point_group,
"SpaceGroup_No": space_group_num,
"SpaceGroup": space_group,
"Compositions": chem_formula,
"Wyckoffs": "",
"Supercell": "1x1x1",
"Substitutions": 1,
"Equivalent Structures": 1,
"Note": "autosub:failure",
"Checked": None
}
return sym_structure, config
def remove_glob(pathname, recursive=True):
for p in glob.glob(pathname, recursive=recursive):
if os.path.isfile(p):
os.remove(p)
def main():
paths = sys.argv[1:]
#paths = [f"SG{i}" for i in range(1,231)]
#paths = [f"SG{i}" for i in range(10,11)]
series = []
try:
for path in paths:
#print(os.path.join(os.getcwd(),path))
if os.path.isdir(path):
root_dir = path
remove_glob(os.path.join(root_dir,'*_partial.cif'))
cifs = glob.glob(os.path.join(root_dir, "*.cif"))
else:
continue
#root_dir = os.path.dirname(path)
#cifs = [path]
sg = Spacegroup(int(root_dir.lstrip("SG").rstrip("/")))
print("============================================")
print(f"Space group: {sg.no} ({sg.symbol})")
print("============================================")
for cif in cifs:
print(f"cif = {cif}")
cif_dir = os.path.dirname(cif)
cif_filename = os.path.basename(cif).rstrip(".cif")
out_cif = os.path.join(cif_dir, cif_filename + "_partial.cif")
#cif_instance = LabeledStructure.from_file(cif)
#reduced_str = pymatgen.symmetry.analyzer.SpacegroupAnalyzer(
# cif_instance
#)
#print("------------------------------------------")
#print("This is the space group symmbol")
#print(f"Space group= {reduced_str.get_space_group_symbol()}")
#sym_str = reduced_str.get_symmetrized_structure()
#print("This is the symmetrized structure")
#print(sym_str)
#print(f"=========================================")
#print(f" ")
#print(f" ")
sub_str, subconfig = random_scale_and_substitute(cif)
subconfig["File"] = cif
serie = pd.Series(subconfig)
series.append(serie)
# Critical: do *NOT* refine_struct
sub_str.to(filename=out_cif, symprec=0.01, refine_struct=False)
out_parser = CifParser(out_cif)
key = list(out_parser.as_dict().keys())[0]
# Space group should _not_ change
assert int(sg.no) == int(
out_parser.as_dict()[key]["_symmetry_Int_Tables_number"]
)
print("")
finally:
# Even if crash, the dataframe should be saved.
df = pd.DataFrame(series)
print(df)
df.to_excel(f"autosub_SG{sg.no}.xls")
if __name__ == "__main__":
main()
| [
"re.compile",
"shry.main.LabeledStructure.from_file",
"shry.core.Substitutor",
"signal.alarm",
"os.strerror",
"os.remove",
"pymatgen.core.composition.Composition",
"numpy.where",
"pymatgen.io.cif.CifParser",
"functools.wraps",
"os.path.isdir",
"pandas.DataFrame",
"warnings.simplefilter",
"... | [((91, 122), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (112, 122), False, 'import warnings\n'), ((2390, 2435), 're.compile', 're.compile', (['"""[A-z][a-z]*[0-9.\\\\+\\\\-]*[0-9.]*"""'], {}), "('[A-z][a-z]*[0-9.\\\\+\\\\-]*[0-9.]*')\n", (2400, 2435), False, 'import re\n'), ((2475, 2527), 're.compile', 're.compile', (['"""(?<=[\\\\+\\\\-A-Za-z])[0-9.]+(?![\\\\+\\\\-])"""'], {}), "('(?<=[\\\\+\\\\-A-Za-z])[0-9.]+(?![\\\\+\\\\-])')\n", (2485, 2527), False, 'import re\n'), ((2591, 2616), 're.compile', 're.compile', (['"""[A-Z][a-z]*"""'], {}), "('[A-Z][a-z]*')\n", (2601, 2616), False, 'import re\n'), ((2628, 2652), 're.compile', 're.compile', (['"""[0-9][-+]*"""'], {}), "('[0-9][-+]*')\n", (2638, 2652), False, 'import re\n'), ((2238, 2265), 'pandas.DataFrame', 'pd.DataFrame', (['PT'], {'dtype': 'str'}), '(PT, dtype=str)\n', (2250, 2265), True, 'import pandas as pd\n'), ((2748, 2772), 'os.strerror', 'os.strerror', (['errno.ETIME'], {}), '(errno.ETIME)\n', (2759, 2772), False, 'import os\n'), ((4275, 4306), 'random.sample', 'random.sample', (['okgroup', 'howmuch'], {}), '(okgroup, howmuch)\n', (4288, 4306), False, 'import random\n'), ((4673, 4700), 're.sub', 're.sub', (['OXSTATE', '""""""', 'string'], {}), "(OXSTATE, '', string)\n", (4679, 4700), False, 'import re\n'), ((5602, 5625), 'pymatgen.io.cif.CifParser', 'CifParser', (['cif_filename'], {}), '(cif_filename)\n', (5611, 5625), False, 'from pymatgen.io.cif import CifParser\n'), ((5794, 5834), 'shry.main.LabeledStructure.from_file', 'LabeledStructure.from_file', (['cif_filename'], {}), '(cif_filename)\n', (5820, 5834), False, 'from shry.main import LabeledStructure\n'), ((5867, 5969), 'shry.core.PatchedSpacegroupAnalyzer', 'PatchedSpacegroupAnalyzer', (['structure'], {'symprec': 'SHRY_TOLERANCE', 'angle_tolerance': 'SHRY_ANGLE_TOLERANCE'}), '(structure, symprec=SHRY_TOLERANCE,\n angle_tolerance=SHRY_ANGLE_TOLERANCE)\n', (5892, 5969), False, 'from shry.core import NeedSupercellError, PatchedSpacegroupAnalyzer, Substitutor, TooBigError\n'), ((17010, 17050), 'glob.glob', 'glob.glob', (['pathname'], {'recursive': 'recursive'}), '(pathname, recursive=recursive)\n', (17019, 17050), False, 'import glob\n'), ((2900, 2921), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (2915, 2921), False, 'import functools\n'), ((4881, 4973), 'shry.core.Substitutor', 'Substitutor', (['ct_structure'], {'symprec': 'SHRY_TOLERANCE', 'angle_tolerance': 'SHRY_ANGLE_TOLERANCE'}), '(ct_structure, symprec=SHRY_TOLERANCE, angle_tolerance=\n SHRY_ANGLE_TOLERANCE)\n', (4892, 4973), False, 'from shry.core import NeedSupercellError, PatchedSpacegroupAnalyzer, Substitutor, TooBigError\n'), ((12823, 12873), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x * y)', 'combinations'], {}), '(lambda x, y: x * y, combinations)\n', (12839, 12873), False, 'import functools\n'), ((17063, 17080), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (17077, 17080), False, 'import os\n'), ((19622, 19642), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (19634, 19642), True, 'import pandas as pd\n'), ((2972, 3018), 'signal.signal', 'signal.signal', (['signal.SIGALRM', '_handle_timeout'], {}), '(signal.SIGALRM, _handle_timeout)\n', (2985, 3018), False, 'import signal\n'), ((3031, 3076), 'signal.setitimer', 'signal.setitimer', (['signal.ITIMER_REAL', 'seconds'], {}), '(signal.ITIMER_REAL, seconds)\n', (3047, 3076), False, 'import signal\n'), ((3426, 3451), 'numpy.where', 'np.where', (['(PTDF == species)'], {}), '(PTDF == species)\n', (3434, 3451), True, 'import numpy as np\n'), ((4167, 4201), 'numpy.ones', 'np.ones', (['notnone.shape'], {'dtype': 'bool'}), '(notnone.shape, dtype=bool)\n', (4174, 4201), True, 'import numpy as np\n'), ((9483, 9512), 'random.randint', 'random.randint', (['(1)', 'max_subbed'], {}), '(1, max_subbed)\n', (9497, 9512), False, 'import random\n'), ((9718, 9749), 'random.randint', 'random.randint', (['(2)', 'max_nspecies'], {}), '(2, max_nspecies)\n', (9732, 9749), False, 'import random\n'), ((12195, 12231), 'pymatgen.core.composition.Composition', 'Composition', (['target_composition_dict'], {}), '(target_composition_dict)\n', (12206, 12231), False, 'from pymatgen.core.composition import Composition\n'), ((17094, 17106), 'os.remove', 'os.remove', (['p'], {}), '(p)\n', (17103, 17106), False, 'import os\n'), ((17367, 17386), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (17380, 17386), False, 'import os\n'), ((3209, 3224), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (3221, 3224), False, 'import signal\n'), ((6984, 7036), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x * y)', 'scaling_matrix'], {}), '(lambda x, y: x * y, scaling_matrix)\n', (7000, 7036), False, 'import functools\n'), ((8531, 8633), 'shry.core.PatchedSpacegroupAnalyzer', 'PatchedSpacegroupAnalyzer', (['structure'], {'symprec': 'SHRY_TOLERANCE', 'angle_tolerance': 'SHRY_ANGLE_TOLERANCE'}), '(structure, symprec=SHRY_TOLERANCE,\n angle_tolerance=SHRY_ANGLE_TOLERANCE)\n', (8556, 8633), False, 'from shry.core import NeedSupercellError, PatchedSpacegroupAnalyzer, Substitutor, TooBigError\n'), ((10163, 10197), 'random.randrange', 'random.randrange', (['(left + 1)', 'margin'], {}), '(left + 1, margin)\n', (10179, 10197), False, 'import random\n'), ((12029, 12058), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['(s + oxstate_string)'], {}), '(s + oxstate_string)\n', (12038, 12058), False, 'from pymatgen.core.periodic_table import get_el_sp\n'), ((12721, 12738), 'math.comb', 'math.comb', (['n_r', 'e'], {}), '(n_r, e)\n', (12730, 12738), False, 'import math\n'), ((18042, 18062), 'os.path.dirname', 'os.path.dirname', (['cif'], {}), '(cif)\n', (18057, 18062), False, 'import os\n'), ((18157, 18209), 'os.path.join', 'os.path.join', (['cif_dir', "(cif_filename + '_partial.cif')"], {}), "(cif_dir, cif_filename + '_partial.cif')\n", (18169, 18209), False, 'import os\n'), ((19033, 19053), 'pandas.Series', 'pd.Series', (['subconfig'], {}), '(subconfig)\n', (19042, 19053), True, 'import pandas as pd\n'), ((19252, 19270), 'pymatgen.io.cif.CifParser', 'CifParser', (['out_cif'], {}), '(out_cif)\n', (19261, 19270), False, 'from pymatgen.io.cif import CifParser\n'), ((6880, 6934), 'random.randint', 'random.randint', (['CONFIG_CELLVEC_MIN', 'CONFIG_CELLVEC_MAX'], {}), '(CONFIG_CELLVEC_MIN, CONFIG_CELLVEC_MAX)\n', (6894, 6934), False, 'import random\n'), ((17448, 17487), 'os.path.join', 'os.path.join', (['root_dir', '"""*_partial.cif"""'], {}), "(root_dir, '*_partial.cif')\n", (17460, 17487), False, 'import os\n'), ((17521, 17552), 'os.path.join', 'os.path.join', (['root_dir', '"""*.cif"""'], {}), "(root_dir, '*.cif')\n", (17533, 17552), False, 'import os\n'), ((5336, 5350), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['sym'], {}), '(sym)\n', (5345, 5350), False, 'from pymatgen.core.periodic_table import get_el_sp\n'), ((18094, 18115), 'os.path.basename', 'os.path.basename', (['cif'], {}), '(cif)\n', (18110, 18115), False, 'import os\n')] |
from __future__ import print_function
from stompy.grid import unstructured_grid
import numpy as np
import logging
log=logging.getLogger(__name__)
from shapely import geometry
import xarray as xr
# TODO: migrate to xarray
from ...io import qnc
from ... import utils
# for now, only supports 2D/3D grid - no mix with 1D
# First try - RGFGRID says it can't read it.
# okay - make sure we're outputting the same netcdf
# version...
# r17b_net.nc: first line just says netcdf r17b_net {s
# file says:
# r17b_net.nc: NetCDF Data Format data
# for default qnc output, file says its HDF5.
# okay - now it gets the nodes, but doesn't have any
# edges.
# even reading/writing the existing DFM grid does not
# get the edges.
# does including the projection definition help? nope.
def write_dfm(ug,nc_fn,overwrite=False):
nc=qnc.empty(fn=nc_fn,overwrite=overwrite,format='NETCDF3_CLASSIC')
# schema copied from r17b_net.nc as written by rgfgrid
nc.createDimension('nNetNode',ug.Nnodes())
nc.createDimension('nNetLink',ug.Nedges())
nc.createDimension('nNetLinkPts',2)
node_x=nc.createVariable('NetNode_x','f8',('nNetNode'))
node_x[:] = ug.nodes['x'][:,0]
node_x.units='m'
node_x.standard_name = "projection_x_coordinate"
node_x.long_name="x-coordinate of net nodes"
node_x.grid_mapping = "projected_coordinate_system"
node_y=nc.createVariable('NetNode_y','f8',('nNetNode'))
node_y[:] = ug.nodes['x'][:,1]
node_y.units = "m"
node_y.standard_name = "projection_y_coordinate"
node_y.long_name = "y-coordinate of net nodes"
node_y.grid_mapping = "projected_coordinate_system"
if 1:
# apparently this doesn't have to be correct -
proj=nc.createVariable('projected_coordinate_system',
'i4',())
proj.setncattr('name',"Unknown projected")
proj.epsg = 28992
proj.grid_mapping_name = "Unknown projected"
proj.longitude_of_prime_meridian = 0.
proj.semi_major_axis = 6378137.
proj.semi_minor_axis = 6356752.314245
proj.inverse_flattening = 298.257223563
proj.proj4_params = ""
proj.EPSG_code = "EPGS:28992"
proj.projection_name = ""
proj.wkt = ""
proj.comment = ""
proj.value = "value is equal to EPSG code"
proj[...]=28992
if ('lon' in ug.nodes.dtype.names) and ('lat' in ug.nodes.dtype.names):
print("Will include longitude & latitude")
node_lon=nc.createVariable('NetNode_lon','f8',('nNetNode'))
node_lon[:]=ug.nodes['lon'][:]
node_lon.units = "degrees_east"
node_lon.standard_name = "longitude"
node_lon.long_name = "longitude"
node_lon.grid_mapping = "wgs84"
node_lat=nc.createVariable('NetNode_lat','f8',('nNetNode'))
node_lat.units = "degrees_north"
node_lat.standard_name = "latitude"
node_lat.long_name = "latitude"
node_lat.grid_mapping = "wgs84"
if 1:
wgs=nc.createVariable('wgs84','i4',())
wgs.setncattr('name',"WGS84")
wgs.epsg = 4326
wgs.grid_mapping_name = "latitude_longitude"
wgs.longitude_of_prime_meridian = 0.
wgs.semi_major_axis = 6378137.
wgs.semi_minor_axis = 6356752.314245
wgs.inverse_flattening = 298.257223563
wgs.proj4_params = ""
wgs.EPSG_code = "EPGS:4326"
wgs.projection_name = ""
wgs.wkt = ""
wgs.comment = ""
wgs.value = "value is equal to EPSG code"
if 'depth' in ug.nodes.dtype.names:
node_z = nc.createVariable('NetNode_z','f8',('nNetNode'))
node_z[:] = ug.nodes['depth'][:]
node_z.units = "m"
node_z.positive = "up"
node_z.standard_name = "sea_floor_depth"
node_z.long_name = "Bottom level at net nodes (flow element\'s corners)"
node_z.coordinates = "NetNode_x NetNode_y"
node_z.grid_mapping = "projected_coordinate_system"
links = nc.createVariable('NetLink','i4',('nNetLink','nNetLinkPts'))
links[:,:]=ug.edges['nodes'] + 1 # to 1-based!
links.standard_name = "netlink"
links.long_name = "link between two netnodes"
link_types=nc.createVariable('NetLinkType','i4',('nNetLink'))
link_types[:] = 2 # always seems to be 2 for these grids
link_types.long_name = "type of netlink"
link_types.valid_range = [0, 2]
link_types.flag_values = [0, 1, 2]
link_types.flag_meanings = "closed_link_between_2D_nodes link_between_1D_nodes link_between_2D_nodes"
# global attributes - probably ought to allow passing in values for these...
nc.institution = "SFEI et al"
nc.references = "http://github.com/rustychris/stompy"
nc.history = "stompy unstructured_grid"
nc.source = "Deltares, D-Flow FM Version 1.1.135.38878MS, Feb 26 2015, 17:00:33, model"
nc.Conventions = "CF-1.5:Deltares-0.1"
if 1:
# add the complines to encode islands
lines=ug.boundary_linestrings()
nc.createDimension('nNetCompLines',len(lines))
# And add the cells:
nc.createDimension('nNetElemMaxNode',ug.max_sides)
nc.createDimension('nNetElem',ug.Ncells())
missing=-2147483647 # DFM's preferred missing value
cell_var=nc.createVariable('NetElemNode','i4',('nNetElem','nNetElemMaxNode'),
fill_value=missing)
# what to do about missing nodes?
cell_nodes=ug.cells['nodes'] + 1 #make it 1-based
cell_nodes[ cell_nodes<1 ] = missing
cell_var[:,:] =cell_nodes
# Write the complines
for i,line in enumerate(lines):
dimname='nNetCompLineNode_%d'%(i+1)
nc.createDimension(dimname,len(line))
compline_x=nc.createVariable('NetCompLine_x_%d'%i,'f8',(dimname,))
compline_y=nc.createVariable('NetCompLine_y_%d'%i,'f8',(dimname,))
compline_x[:] = line[:,0]
compline_y[:] = line[:,1]
nc.close()
class DFMGrid(unstructured_grid.UnstructuredGrid):
def __init__(self,nc=None,fn=None,
cells_from_edges='auto',max_sides=6,cleanup=False):
"""
nc: An xarray dataset or path to netcdf file holding the grid
fn: path to netcdf file holding the grid (redundant with nc)
cells_from_edges: 'auto' create cells based on edges if cells do not exist in the dataset
specify True or False to force or disable this.
max_sides: maximum number of sides per cell, used both for initializing datastructures, and
for determining cells from edge connectivity.
cleanup: for grids created from multiple subdomains, there are sometime duplicate edges and nodes.
this will remove those duplicates, though there are no guarantees of indices.
"""
if nc is None:
assert fn
#nc=qnc.QDataset(fn)
# Trying out xarray instead
nc=xr.open_dataset(fn)
if isinstance(nc,str):
#nc=qnc.QDataset(nc)
nc=xr.open_dataset(nc)
#if isinstance(nc,xr.Dataset):
# raise Exception("Pass the filename or a qnc.QDataset. Not ready for xarray")
# Default names for fields
var_points_x='NetNode_x'
var_points_y='NetNode_y'
var_edges='NetLink'
var_cells='NetElemNode' # often have to infer the cells
meshes=[v for v in nc.data_vars if getattr(nc[v],'cf_role','none')=='mesh_topology']
if meshes:
mesh=nc[meshes[0]]
var_points_x,var_points_y = mesh.node_coordinates.split(' ')
var_edges=mesh.edge_node_connectivity
try:
var_cells=mesh.face_node_connectivity
except AttributeError:
var_cells='not specified'
cells_from_edges=True
# probably this ought to attempt to find a mesh variable
# with attributes that tell the correct names, and lacking
# that go with these as defaults
# seems we always get nodes and edges
edge_start_index=nc[var_edges].attrs.get('start_index',1)
kwargs=dict(points=np.array([nc[var_points_x].values,
nc[var_points_y].values]).T,
edges=nc[var_edges].values-edge_start_index)
# some nc files also have elements...
if var_cells in nc.variables:
cells=nc[var_cells].values.copy()
# missing values come back in different ways -
# might come in masked, might also have some huge negative values,
# and regardless it will be one-based.
if isinstance(cells,np.ma.MaskedArray):
cells=cells.filled(0)
if np.issubdtype(cells.dtype,np.float):
bad=np.isnan(cells)
cells=cells.astype(np.int32)
cells[bad]=0
# just to be safe, do this even if it came from Masked.
cell_start_index=nc[var_cells].attrs.get('start_index',1)
cells-=cell_start_index # force to 0-based
cells[ cells<0 ] = -1
kwargs['cells']=cells
if cells_from_edges=='auto':
cells_from_edges=False
var_depth='NetNode_z'
if var_depth in nc.variables: # have depth at nodes
kwargs['extra_node_fields']=[ ('depth','f4') ]
if cells_from_edges: # True or 'auto'
self.max_sides=max_sides
# Partition handling - at least the output of map_merge
# does *not* remap indices in edges and cells
if 'partitions_node_start' in nc.variables:
nodes_are_contiguous = np.all( np.diff(nc.partitions_node_start.values) == nc.partitions_node_count.values[:-1] )
assert nodes_are_contiguous, "Merged grids can only be handled when node indices are contiguous"
else:
nodes_are_contiguous=True
if 'partitions_edge_start' in nc.variables:
edges_are_contiguous = np.all( np.diff(nc.partitions_edge_start.values) == nc.partitions_edge_count.values[:-1] )
assert edges_are_contiguous, "Merged grids can only be handled when edge indices are contiguous"
else:
edges_are_contiguous=True
if 'partitions_face_start' in nc.variables:
faces_are_contiguous = np.all( np.diff(nc.partitions_face_start.values) == nc.partitions_face_count.values[:-1] )
assert faces_are_contiguous, "Merged grids can only be handled when face indices are contiguous"
if cleanup:
log.warning("Some MPI grids have duplicate cells, which cannot be cleaned, but cleanup=True")
else:
face_are_contiguous=True
if 0: # This is for hints to possibly handling non-contiguous indices in the future. caveat emptor.
node_offsets=nc.partitions_node_start.values-1
cell_missing=kwargs['cells']<0
if 'FlowElemDomain' in nc:
cell_domains=nc.FlowElemDomain.values # hope that's 0-based?
else:
HERE
cell_node_offsets=node_offsets[cell_domains]
kwargs['cells']+=cell_node_offsets[:,None]
# for part_i in range(nc.NumPartitionsInFile):
# edge_start=nc.partitions_edge_start.values[part_i]
# edge_count=nc.partitions_edge_count.values[part_i]
# node_start=nc.partitions_node_start.values[part_i]
# node_count=nc.partitions_node_count.values[part_i]
# cell_start=nc.partitions_face_start.values[part_i]
# cell_count=nc.partitions_face_count.values[part_i]
#
# kwargs['edges'][edge_start-1:edge_start-1+edge_count] += node_start-1
# kwargs['cells'][cell_start-1:cell_start-1+cell_count] += node_start-1
# Reset the missing nodes
kwargs['cells'][cell_missing]=-1
# And force valid values for over-the-top cells:
bad=kwargs['cells']>=len(kwargs['points'])
kwargs['cells'][bad]=0
super(DFMGrid,self).__init__(**kwargs)
if cells_from_edges:
print("Making cells from edges")
self.make_cells_from_edges()
if var_depth in nc.variables: # have depth at nodes
self.nodes['depth']=nc[var_depth].values.copy()
if cleanup:
cleanup_multidomains(self)
def cleanup_multidomains(grid):
"""
Given an unstructured grid which was the product of DFlow-FM
multiple domains stitched together, fix some of the extraneous
geometries left behind.
Grid doesn't have to have been read as a DFMGrid.
"""
log.info("Regenerating edges")
grid.make_edges_from_cells()
log.info("Removing orphaned nodes")
grid.delete_orphan_nodes()
log.info("Removing duplicate nodes")
grid.merge_duplicate_nodes()
log.info("Renumbering nodes")
grid.renumber_nodes()
log.info("Extracting grid boundary")
return grid
def polyline_to_boundary_edges(g,linestring,rrtol=3.0):
"""
Mimic FlowFM boundary edge selection from polyline to edges.
Currently does not get into any of the interpolation, just
identifies boundary edges which would be selected as part of the
boundary group.
g: UnstructuredGrid instance
linestring: [N,2] polyline data
rrtol: controls search distance away from boundary. Defaults to
roughly 3 cell length scales out from the boundary.
"""
linestring=np.asanyarray(linestring)
g.edge_to_cells()
boundary_edges=np.nonzero( np.any(g.edges['cells']<0,axis=1) )[0]
adj_cells=g.edges['cells'][boundary_edges].max(axis=1)
# some of this assumes that the grid is orthogonal, so we're not worrying
# about overridden cell centers
adj_centers=g.cells_center()[adj_cells]
edge_centers=g.edges_center()[boundary_edges]
cell_to_edge=edge_centers-adj_centers
cell_to_edge_dist=utils.dist(cell_to_edge)
outward=cell_to_edge / cell_to_edge_dist[:,None]
dis=np.maximum( 0.5*np.sqrt(g.cells_area()[adj_cells]),
cell_to_edge_dist )
probes=edge_centers+(2*rrtol*dis)[:,None]*outward
segs=np.array([adj_centers,probes]).transpose(1,0,2)
if 0: # plotting for verification
lcoll=collections.LineCollection(segs)
ax.add_collection(lcoll)
linestring_geom= geometry.LineString(linestring)
probe_geoms=[geometry.LineString(seg) for seg in segs]
hits=[idx
for idx,probe_geom in enumerate(probe_geoms)
if linestring_geom.intersects(probe_geom)]
edge_hits=boundary_edges[hits]
return edge_hits
| [
"logging.getLogger",
"numpy.diff",
"numpy.any",
"numpy.asanyarray",
"numpy.issubdtype",
"numpy.array",
"shapely.geometry.LineString",
"numpy.isnan",
"xarray.open_dataset"
] | [((118, 145), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (135, 145), False, 'import logging\n'), ((13680, 13705), 'numpy.asanyarray', 'np.asanyarray', (['linestring'], {}), '(linestring)\n', (13693, 13705), True, 'import numpy as np\n'), ((14564, 14595), 'shapely.geometry.LineString', 'geometry.LineString', (['linestring'], {}), '(linestring)\n', (14583, 14595), False, 'from shapely import geometry\n'), ((14614, 14638), 'shapely.geometry.LineString', 'geometry.LineString', (['seg'], {}), '(seg)\n', (14633, 14638), False, 'from shapely import geometry\n'), ((7008, 7027), 'xarray.open_dataset', 'xr.open_dataset', (['fn'], {}), '(fn)\n', (7023, 7027), True, 'import xarray as xr\n'), ((7108, 7127), 'xarray.open_dataset', 'xr.open_dataset', (['nc'], {}), '(nc)\n', (7123, 7127), True, 'import xarray as xr\n'), ((8838, 8874), 'numpy.issubdtype', 'np.issubdtype', (['cells.dtype', 'np.float'], {}), '(cells.dtype, np.float)\n', (8851, 8874), True, 'import numpy as np\n'), ((13764, 13800), 'numpy.any', 'np.any', (["(g.edges['cells'] < 0)"], {'axis': '(1)'}), "(g.edges['cells'] < 0, axis=1)\n", (13770, 13800), True, 'import numpy as np\n'), ((14376, 14407), 'numpy.array', 'np.array', (['[adj_centers, probes]'], {}), '([adj_centers, probes])\n', (14384, 14407), True, 'import numpy as np\n'), ((8895, 8910), 'numpy.isnan', 'np.isnan', (['cells'], {}), '(cells)\n', (8903, 8910), True, 'import numpy as np\n'), ((8245, 8305), 'numpy.array', 'np.array', (['[nc[var_points_x].values, nc[var_points_y].values]'], {}), '([nc[var_points_x].values, nc[var_points_y].values])\n', (8253, 8305), True, 'import numpy as np\n'), ((9802, 9842), 'numpy.diff', 'np.diff', (['nc.partitions_node_start.values'], {}), '(nc.partitions_node_start.values)\n', (9809, 9842), True, 'import numpy as np\n'), ((10142, 10182), 'numpy.diff', 'np.diff', (['nc.partitions_edge_start.values'], {}), '(nc.partitions_edge_start.values)\n', (10149, 10182), True, 'import numpy as np\n'), ((10482, 10522), 'numpy.diff', 'np.diff', (['nc.partitions_face_start.values'], {}), '(nc.partitions_face_start.values)\n', (10489, 10522), True, 'import numpy as np\n')] |
import torch
import torch.nn.functional as F
import numpy as np
import math
import random
import sys
sys.path.append("../")
from causal_graphs.variable_distributions import _random_categ
from causal_discovery.datasets import InterventionalDataset
class GraphFitting(object):
def __init__(self, model, graph, num_batches, num_graphs, theta_only_num_graphs, batch_size, lambda_sparse, max_graph_stacking=200):
"""
Creates a DistributionFitting object that summarizes all functionalities
for performing the graph fitting stage of ENCO.
Parameters
----------
model : MultivarMLP
PyTorch module of the neural networks that model the conditional
distributions.
graph : CausalDAG
Causal graph on which we want to perform causal structure learning.
num_batches : int
Number of batches to use per MC sample in the graph fitting stage.
Usually 1, only higher needed if GPU is running out of memory for
common batch sizes.
num_graphs : int
Number of graph samples to use for estimating the gradients in the
graph fitting stage. Usually in the range 20-100.
theta_only_num_graphs : int
Number of graph samples to use in the graph fitting stage if
gamma is frozen. Needs to be an even number, and usually 2 or 4.
batch_size : int
Size of the batches to use in the gradient estimators.
lambda_sparse : float
Sparsity regularizer value to use in the graph fitting stage.
max_graph_stacking : int
Number of graphs that can maximally evaluated in parallel on the device.
If you run out of GPU memory, try to lower this number. It will then
evaluate the graph sequentially, which can be slightly slower but uses
less memory.
"""
self.model = model
self.graph = graph
self.num_batches = num_batches
self.num_graphs = num_graphs
self.batch_size = batch_size
self.lambda_sparse = lambda_sparse
self.max_graph_stacking = max_graph_stacking
self.theta_only_num_graphs = theta_only_num_graphs
self.inter_vars = []
if self.graph.num_vars >= 100 or hasattr(self.graph, "data_int"):
self.dataset = InterventionalDataset(self.graph,
dataset_size=4096,
batch_size=self.batch_size)
def perform_update_step(self, gamma, theta, var_idx=-1, only_theta=False):
"""
Performs a full update step of the graph fitting stage. We first sample a batch of graphs,
evaluate them on a interventional data batch, and estimate the gradients for gamma and theta
based on the log-likelihoods.
Parameters
----------
gamma : nn.Parameter
Parameter tensor representing the gamma parameters in ENCO.
theta : nn.Parameter
Parameter tensor representing the theta parameters in ENCO.
var_idx : int
Variable on which should be intervened to obtain the update. If none is given, i.e.,
a negative value, the variable will be randomly selected.
only_theta : bool
If True, gamma is frozen and the gradients are only estimated for theta. See
Appendix D.2 in the paper for details on the gamma freezing stage.
"""
# Obtain log-likelihood estimates for randomly sampled graph structures
if not only_theta:
MC_samp = self.get_MC_samples(gamma, theta, num_batches=self.num_batches, num_graphs=self.num_graphs,
batch_size=self.batch_size, var_idx=var_idx, mirror_graphs=False)
else:
MC_samp = self.get_MC_samples(gamma, theta, num_batches=self.num_batches, num_graphs=self.theta_only_num_graphs,
batch_size=self.batch_size, var_idx=var_idx, mirror_graphs=True)
adj_matrices, log_likelihoods, var_idx = MC_samp
# Determine gradients for gamma and theta
gamma_grads, theta_grads, theta_mask = self.gradient_estimator(
adj_matrices, log_likelihoods, gamma, theta, var_idx)
gamma.grad = gamma_grads
theta.grad = theta_grads
return theta_mask, var_idx
@torch.no_grad()
def get_MC_samples(self, gamma, theta, num_batches, num_graphs, batch_size,
var_idx=-1, mirror_graphs=False):
"""
Samples and evaluates a batch of graph structures on a batch of interventional data.
Parameters
----------
gamma : nn.Parameter
Parameter tensor representing the gamma parameters in ENCO.
theta : nn.Parameter
Parameter tensor representing the theta parameters in ENCO.
num_batches : int
Number of batches to use per MC sample.
num_graphs : int
Number of graph structures to sample.
batch_size : int
Size of interventional data batches.
var_idx : int
Variable on which should be intervened to obtain the update. If none is given, i.e.,
a negative value, the variable will be randomly selected.
mirror_graphs : bool
This variable should be true if only theta is optimized. In this case, the first
half of the graph structure samples is identical to the second half, except that
the values of the outgoing edges of the intervened variable are flipped. This
allows for more efficient, low-variance gradient estimators. See details in
the paper.
"""
if mirror_graphs:
assert num_graphs % 2 == 0, "Number of graphs must be divisible by two for mirroring"
device = self.get_device()
# Sample data batch
if hasattr(self, "dataset"):
# Pre-sampled data
var_idx = self.sample_next_var_idx()
int_sample = torch.cat([self.dataset.get_batch(var_idx) for _ in range(num_batches)], dim=0).to(device)
else:
# If no dataset exists, data is newly sampled from the graph
intervention_dict, var_idx = self.sample_intervention(self.graph,
dataset_size=num_batches*batch_size,
var_idx=var_idx)
int_sample = self.graph.sample(interventions=intervention_dict,
batch_size=num_batches*batch_size,
as_array=True)
int_sample = torch.from_numpy(int_sample).long().to(device)
# Split number of graph samples acorss multiple iterations if not all can fit into memory
num_graphs_list = [min(self.max_graph_stacking, num_graphs-i*self.max_graph_stacking)
for i in range(math.ceil(num_graphs * 1.0 / self.max_graph_stacking))]
num_graphs_list = [(num_graphs_list[i], sum(num_graphs_list[:i])) for i in range(len(num_graphs_list))]
# Tensors needed for sampling
edge_prob = (torch.sigmoid(gamma) * torch.sigmoid(theta)).detach()
edge_prob_batch = edge_prob[None].expand(num_graphs, -1, -1)
# Inner function for sampling a batch of random adjacency matrices from current belief probabilities
def sample_adj_matrix():
sample_matrix = torch.bernoulli(edge_prob_batch)
sample_matrix = sample_matrix * (1 - torch.eye(sample_matrix.shape[-1], device=sample_matrix.device)[None])
if mirror_graphs: # First and second half of tensors are identical, except the intervened variable
sample_matrix[num_graphs//2:] = sample_matrix[:num_graphs//2]
sample_matrix[num_graphs//2:, var_idx] = 1 - sample_matrix[num_graphs//2:, var_idx]
sample_matrix[:, var_idx, var_idx] = 0.
return sample_matrix
# Evaluate log-likelihoods under sampled adjacency matrix and data
adj_matrices = []
log_likelihoods = []
for n_idx in range(num_batches):
batch = int_sample[n_idx*batch_size:(n_idx+1)*batch_size]
if n_idx == 0:
adj_matrix = sample_adj_matrix()
adj_matrices.append(adj_matrix)
for c_idx, (graph_count, start_idx) in enumerate(num_graphs_list):
adj_matrix_expanded = adj_matrix[start_idx:start_idx+graph_count,
None].expand(-1, batch_size, -1, -1).flatten(0, 1)
batch_exp = batch[None, :].expand(graph_count, -1, -1).flatten(0, 1)
nll = self.evaluate_likelihoods(batch_exp, adj_matrix_expanded, var_idx)
nll = nll.reshape(graph_count, batch_size, -1)
if n_idx == 0:
log_likelihoods.append(nll.mean(dim=1))
else:
log_likelihoods[c_idx] += nll.mean(dim=1)
# Combine all data
adj_matrices = torch.cat(adj_matrices, dim=0)
log_likelihoods = torch.cat(log_likelihoods, dim=0) / num_batches
return adj_matrices, log_likelihoods, var_idx
@torch.no_grad()
def gradient_estimator(self, adj_matrices, log_likelihoods, gamma, theta, var_idx):
"""
Returns the estimated gradients for gamma and theta. It uses the low-variance gradient estimators
proposed in Section 3.3 of the paper.
Parameters
----------
adj_matrices : torch.FloatTensor, shape [batch_size, num_vars, num_vars]
The adjacency matrices on which the interventional data has been evaluated on.
log_likelihoods : torch.FloatTensor, shape [batch_size, num_vars]
The average log-likelihood under the adjacency matrices for all variables
in the graph.
gamma : nn.Parameter
Parameter tensor representing the gamma parameters in ENCO.
theta : nn.Parameter
Parameter tensor representing the theta parameters in ENCO.
var_idx : int
Variable on which the intervention was performed.
"""
batch_size = adj_matrices.shape[0]
log_likelihoods = log_likelihoods.unsqueeze(dim=1)
orient_probs = torch.sigmoid(theta)
edge_probs = torch.sigmoid(gamma)
# Gradient calculation
num_pos = adj_matrices.sum(dim=0)
num_neg = batch_size - num_pos
mask = ((num_pos > 0) * (num_neg > 0)).float()
pos_grads = (log_likelihoods * adj_matrices).sum(dim=0) / num_pos.clamp_(min=1e-5)
neg_grads = (log_likelihoods * (1 - adj_matrices)).sum(dim=0) / num_neg.clamp_(min=1e-5)
gamma_grads = mask * edge_probs * (1 - edge_probs) * orient_probs * (pos_grads - neg_grads + self.lambda_sparse)
theta_grads = mask * orient_probs * (1 - orient_probs) * edge_probs * (pos_grads - neg_grads)
# Masking gamma for incoming edges to intervened variable
gamma_grads[:, var_idx] = 0.
gamma_grads[torch.arange(gamma_grads.shape[0]), torch.arange(gamma_grads.shape[1])] = 0.
# Masking all theta's except the ones with a intervened variable
theta_grads[:var_idx] = 0.
theta_grads[var_idx+1:] = 0.
theta_grads -= theta_grads.transpose(0, 1) # theta_ij = -theta_ji
# Creating a mask which theta's are actually updated for the optimizer
theta_mask = torch.zeros_like(theta_grads)
theta_mask[var_idx] = 1.
theta_mask[:, var_idx] = 1.
theta_mask[var_idx, var_idx] = 0.
return gamma_grads, theta_grads, theta_mask
def sample_next_var_idx(self):
"""
Returns next variable to intervene on. We iterate through the variables
in a shuffled order, like a standard dataset.
"""
if len(self.inter_vars) == 0: # If an epoch finished, reshuffle variables
self.inter_vars = [i for i in range(len(self.graph.variables))]
random.shuffle(self.inter_vars)
var_idx = self.inter_vars.pop()
return var_idx
def sample_intervention(self, graph, dataset_size, var_idx=-1):
"""
Returns a new data batch for an intervened variable.
"""
# Select variable to intervene on
if var_idx < 0:
var_idx = self.sample_next_var_idx()
var = graph.variables[var_idx]
# Soft, perfect intervention => replace p(X_n) by random categorical
# Scale is set to 0.0, which represents a uniform distribution.
int_dist = _random_categ(size=(var.prob_dist.num_categs,), scale=0.0, axis=-1)
# Sample from interventional distribution
value = np.random.multinomial(n=1, pvals=int_dist, size=(dataset_size,))
value = np.argmax(value, axis=-1) # One-hot to index
intervention_dict = {var.name: value}
return intervention_dict, var_idx
@torch.no_grad()
def evaluate_likelihoods(self, int_sample, adj_matrix, var_idx):
"""
Evaluates the negative log-likelihood of the interventional data batch (int_sample)
on the given graph structures (adj_matrix) and the intervened variable (var_idx).
"""
self.model.eval()
device = self.get_device()
int_sample = int_sample.to(device)
adj_matrix = adj_matrix.to(device)
# Transpose for mask because adj[i,j] means that i->j
mask_adj_matrix = adj_matrix.transpose(1, 2)
preds = self.model(int_sample, mask=mask_adj_matrix)
# Evaluate negative log-likelihood of predictions
preds = preds.flatten(0, 1)
labels = int_sample.clone()
labels[:, var_idx] = -1 # Perfect interventions => no predictions of the intervened variable
labels = labels.reshape(-1)
nll = F.cross_entropy(preds, labels, reduction='none', ignore_index=-1)
nll = nll.reshape(*int_sample.shape)
self.model.train()
return nll
def get_device(self):
return self.model.device
| [
"torch.bernoulli",
"math.ceil",
"random.shuffle",
"torch.eye",
"torch.sigmoid",
"numpy.argmax",
"torch.from_numpy",
"causal_graphs.variable_distributions._random_categ",
"numpy.random.multinomial",
"torch.arange",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"torch.zeros_like",
"c... | [((101, 123), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (116, 123), False, 'import sys\n'), ((4683, 4698), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4696, 4698), False, 'import torch\n'), ((9744, 9759), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9757, 9759), False, 'import torch\n'), ((13539, 13554), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13552, 13554), False, 'import torch\n'), ((9578, 9608), 'torch.cat', 'torch.cat', (['adj_matrices'], {'dim': '(0)'}), '(adj_matrices, dim=0)\n', (9587, 9608), False, 'import torch\n'), ((10888, 10908), 'torch.sigmoid', 'torch.sigmoid', (['theta'], {}), '(theta)\n', (10901, 10908), False, 'import torch\n'), ((10930, 10950), 'torch.sigmoid', 'torch.sigmoid', (['gamma'], {}), '(gamma)\n', (10943, 10950), False, 'import torch\n'), ((12053, 12082), 'torch.zeros_like', 'torch.zeros_like', (['theta_grads'], {}), '(theta_grads)\n', (12069, 12082), False, 'import torch\n'), ((13183, 13250), 'causal_graphs.variable_distributions._random_categ', '_random_categ', ([], {'size': '(var.prob_dist.num_categs,)', 'scale': '(0.0)', 'axis': '(-1)'}), '(size=(var.prob_dist.num_categs,), scale=0.0, axis=-1)\n', (13196, 13250), False, 'from causal_graphs.variable_distributions import _random_categ\n'), ((13317, 13381), 'numpy.random.multinomial', 'np.random.multinomial', ([], {'n': '(1)', 'pvals': 'int_dist', 'size': '(dataset_size,)'}), '(n=1, pvals=int_dist, size=(dataset_size,))\n', (13338, 13381), True, 'import numpy as np\n'), ((13398, 13423), 'numpy.argmax', 'np.argmax', (['value'], {'axis': '(-1)'}), '(value, axis=-1)\n', (13407, 13423), True, 'import numpy as np\n'), ((14436, 14501), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['preds', 'labels'], {'reduction': '"""none"""', 'ignore_index': '(-1)'}), "(preds, labels, reduction='none', ignore_index=-1)\n", (14451, 14501), True, 'import torch.nn.functional as F\n'), ((2568, 2653), 'causal_discovery.datasets.InterventionalDataset', 'InterventionalDataset', (['self.graph'], {'dataset_size': '(4096)', 'batch_size': 'self.batch_size'}), '(self.graph, dataset_size=4096, batch_size=self.batch_size\n )\n', (2589, 2653), False, 'from causal_discovery.datasets import InterventionalDataset\n'), ((7954, 7986), 'torch.bernoulli', 'torch.bernoulli', (['edge_prob_batch'], {}), '(edge_prob_batch)\n', (7969, 7986), False, 'import torch\n'), ((9635, 9668), 'torch.cat', 'torch.cat', (['log_likelihoods'], {'dim': '(0)'}), '(log_likelihoods, dim=0)\n', (9644, 9668), False, 'import torch\n'), ((12612, 12643), 'random.shuffle', 'random.shuffle', (['self.inter_vars'], {}), '(self.inter_vars)\n', (12626, 12643), False, 'import random\n'), ((11654, 11688), 'torch.arange', 'torch.arange', (['gamma_grads.shape[0]'], {}), '(gamma_grads.shape[0])\n', (11666, 11688), False, 'import torch\n'), ((11690, 11724), 'torch.arange', 'torch.arange', (['gamma_grads.shape[1]'], {}), '(gamma_grads.shape[1])\n', (11702, 11724), False, 'import torch\n'), ((7433, 7486), 'math.ceil', 'math.ceil', (['(num_graphs * 1.0 / self.max_graph_stacking)'], {}), '(num_graphs * 1.0 / self.max_graph_stacking)\n', (7442, 7486), False, 'import math\n'), ((7660, 7680), 'torch.sigmoid', 'torch.sigmoid', (['gamma'], {}), '(gamma)\n', (7673, 7680), False, 'import torch\n'), ((7683, 7703), 'torch.sigmoid', 'torch.sigmoid', (['theta'], {}), '(theta)\n', (7696, 7703), False, 'import torch\n'), ((8036, 8099), 'torch.eye', 'torch.eye', (['sample_matrix.shape[-1]'], {'device': 'sample_matrix.device'}), '(sample_matrix.shape[-1], device=sample_matrix.device)\n', (8045, 8099), False, 'import torch\n'), ((7151, 7179), 'torch.from_numpy', 'torch.from_numpy', (['int_sample'], {}), '(int_sample)\n', (7167, 7179), False, 'import torch\n')] |
#this code is the workbench for q-learning
#it consists on a lifting particle that must reach a certain height
#it is only subjected to gravity
#Force applied to the particle might be fixed 9.9 or 9.7N
import numpy as np
import math
import random
import matplotlib.pyplot as plt
#INITIALIZE VARIABLES
######################
m=1 #1kg mass
g=9.80 #gravity
dt=0.05 #simulation time
Final_height=50 #1m
Final_vel=0
#STATES are discretized 0-1-2-3...50...-59-60 cm and speed is discretized in
n_pos=61
STATES=np.linspace(0,Final_height+10,Final_height+10+1)
#SPEEDS are discretized -10,-9,-8...0,1,2,3...,50cm/s.
n_speeds=61
SPEEDS=np.linspace(-10,50,n_speeds)
#ROWS= States (61*61=3721 rows)
#COLUMNS= Actions (9.9 , 9.7) two actions
Rows=n_pos*n_speeds
Columns=2
Actions=([9.9, 9.7])
#time steps
n_items=302
x=np.linspace(0,301,n_items)
#Initialize Q matrix
Q=np.ones((Rows,Columns))
#Q-learning variables
alpha=0.5
gamma=0.5
epsilon=0.15
goalCounter=0
Contador=0
#function to choose the Action
def ChooseAction (Columns,Q,state):
if np.random.uniform() < epsilon:
rand_action=np.random.permutation(Columns)
action=rand_action[1] #current action
F=Actions[action]
max_index=1
# if not select max action in Qtable (act greedy)
else:
QMax=max(Q[state])
max_indices=np.where(Q[state]==QMax)[0] # Identify all indexes where Q equals max
n_hits=len(max_indices) # Number of hits
max_index=int(max_indices[random.randint(0, n_hits-1)]) # If many hits, choose randomly
F=Actions[max_index]
return F, max_index
#function to apply the dynamic model
def ActionToState(F,g,m,dt,z_pos_old,z_vel_old,z_accel_old):
z_accel=(-g + F/m)*100
z_vel=z_vel_old + (z_accel+z_accel_old)/2*dt
z_pos=z_pos_old + (z_vel+z_vel_old)/2*dt
z_accel_old=z_accel
z_vel_old=z_vel
z_pos_old=z_pos
return z_accel,z_vel,z_pos,z_vel_old,z_pos_old
#BEGINNING of the algorithm
for episode in range(1,200000):
# initial state
z_pos=np.zeros(n_items)
z_vel=np.zeros(n_items)
z_accel=np.zeros(n_items)
z_pos_goal=np.zeros((1000, n_items))
z_vel_goal=np.zeros((1000, n_items))
z_acel_goal=np.zeros((1000, n_items))
z_accel_old=0
z_vel_old=0
z_pos_old=0 #initial conditions of the particle
state=11 #let's choose the initial state always height 0, speed 0cm/s
print("episode",episode) #check
for i in range(1,300):
## Choose sometimes the Force randomly
F,max_index = ChooseAction(Columns, Q, state)
#update the dynamic model
z_accel[i],z_vel[i],z_pos[i],z_vel_old,z_pos_old= ActionToState (F,g,m,dt,z_pos_old,z_vel_old,z_accel_old)
#if negative height or velocity values, reward it very negatively.
#If too big values, too
if (min(z_pos)<0 or min(z_vel)<SPEEDS[1] or max(z_vel)>SPEEDS[60] or max(z_pos)>0.99*n_pos):
Q[state,max_index]=-100 #penalty
break
else: #if positive values, do the loop
rounded_pos=round(z_pos[i]) #round the height
rounded_vel=round(z_vel[i]) #round the vel
#calculate which is my new state
index_1=np.where(STATES==rounded_pos)
index_2=np.where(SPEEDS==rounded_vel)
index_1=int(index_1[0])
index_2=int(index_2[0])
state=n_speeds*index_1 + index_2 #new state in Q matrix
QMax=max(Q[state]) #selects the highest value of the row
#REWARD
A1=math.exp(-abs(rounded_pos-Final_height)/(0.1*n_pos))
A2=math.exp(-abs(rounded_vel-Final_vel)/(0.1*14))
Reward=A1*A2*1000000 #takes into account pos and vel
#Q VALUE update
Q[state,max_index]=Q[state,max_index] + alpha*(Reward + gamma*(QMax - Q[state,max_index])) #update Q value
#checking
if (rounded_pos==Final_height or rounded_pos==Final_height-1 or rounded_pos==Final_height+1):
print("entra")
goalCounter=goalCounter+1
if (rounded_vel==Final_vel or rounded_vel==Final_vel+1):
Contador=Contador +1 #counter of successful hits
#saving of successful data
z_pos_goal[0:i,Contador]=z_pos[0:i]
z_vel_goal[0:i,Contador]=z_vel[0:i]
z_acel_goal[0:i,Contador]=z_accel[0:i]
state=11 #reinitialize
break
else:
break
| [
"numpy.ones",
"numpy.where",
"numpy.linspace",
"numpy.zeros",
"numpy.random.uniform",
"random.randint",
"numpy.random.permutation"
] | [((533, 589), 'numpy.linspace', 'np.linspace', (['(0)', '(Final_height + 10)', '(Final_height + 10 + 1)'], {}), '(0, Final_height + 10, Final_height + 10 + 1)\n', (544, 589), True, 'import numpy as np\n'), ((661, 691), 'numpy.linspace', 'np.linspace', (['(-10)', '(50)', 'n_speeds'], {}), '(-10, 50, n_speeds)\n', (672, 691), True, 'import numpy as np\n'), ((858, 886), 'numpy.linspace', 'np.linspace', (['(0)', '(301)', 'n_items'], {}), '(0, 301, n_items)\n', (869, 886), True, 'import numpy as np\n'), ((912, 936), 'numpy.ones', 'np.ones', (['(Rows, Columns)'], {}), '((Rows, Columns))\n', (919, 936), True, 'import numpy as np\n'), ((2123, 2140), 'numpy.zeros', 'np.zeros', (['n_items'], {}), '(n_items)\n', (2131, 2140), True, 'import numpy as np\n'), ((2152, 2169), 'numpy.zeros', 'np.zeros', (['n_items'], {}), '(n_items)\n', (2160, 2169), True, 'import numpy as np\n'), ((2183, 2200), 'numpy.zeros', 'np.zeros', (['n_items'], {}), '(n_items)\n', (2191, 2200), True, 'import numpy as np\n'), ((2217, 2242), 'numpy.zeros', 'np.zeros', (['(1000, n_items)'], {}), '((1000, n_items))\n', (2225, 2242), True, 'import numpy as np\n'), ((2259, 2284), 'numpy.zeros', 'np.zeros', (['(1000, n_items)'], {}), '((1000, n_items))\n', (2267, 2284), True, 'import numpy as np\n'), ((2302, 2327), 'numpy.zeros', 'np.zeros', (['(1000, n_items)'], {}), '((1000, n_items))\n', (2310, 2327), True, 'import numpy as np\n'), ((1107, 1126), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1124, 1126), True, 'import numpy as np\n'), ((1159, 1189), 'numpy.random.permutation', 'np.random.permutation', (['Columns'], {}), '(Columns)\n', (1180, 1189), True, 'import numpy as np\n'), ((1401, 1427), 'numpy.where', 'np.where', (['(Q[state] == QMax)'], {}), '(Q[state] == QMax)\n', (1409, 1427), True, 'import numpy as np\n'), ((3415, 3446), 'numpy.where', 'np.where', (['(STATES == rounded_pos)'], {}), '(STATES == rounded_pos)\n', (3423, 3446), True, 'import numpy as np\n'), ((3466, 3497), 'numpy.where', 'np.where', (['(SPEEDS == rounded_vel)'], {}), '(SPEEDS == rounded_vel)\n', (3474, 3497), True, 'import numpy as np\n'), ((1556, 1585), 'random.randint', 'random.randint', (['(0)', '(n_hits - 1)'], {}), '(0, n_hits - 1)\n', (1570, 1585), False, 'import random\n')] |
from __future__ import print_function
import numpy as np
def IsPowerOfTwo(i):
"""Returns true if all entries of i are powers of two, False otherwise.
"""
return (i & (i - 1)) == 0 and i != 0
def Log2ofPowerof2(shape):
""" Returns powers of two exponent for each element of shape
"""
res = np.array(shape)
for i in range(res.size):
n = shape[i]
assert (IsPowerOfTwo(n)), "Invalid input"
ix = 0
while n > 1:
n //= 2
ix += 1
res[i] = ix
return res
def Freq(i, N):
"""Outputs the absolute integers frequencies [0,1,...,N/2,N/2-1,...,1]
in numpy fft convention as integer i runs from 0 to N-1.
Inputs can be numpy arrays e.g. i (i1,i2,i3) with N (N1,N2,N3)
or i (i1,i2,...) with N
Both inputs must be integers.
All entries of N must be even.
"""
assert (np.all(N % 2 == 0)), "This routine only for even numbers of points"
return i - 2 * (i >= (N / 2)) * (i % (N / 2))
def rfft2_reals(shape):
"""Pure reals modes in 2d rfft array. (from real map shape, not rfft array)
"""
N0, N1 = shape
fx = [0]
fy = [0]
if N1 % 2 == 0: fx.append(0); fy.append(N1 // 2)
if N0 % 2 == 0: fx.append(N0 // 2); fy.append(0)
if N1 % 2 == 0 and N0 % 2 == 0: fx.append(N0 // 2); fy.append(N1 // 2)
return np.array(fx), np.array(fy)
def upgrade_map(LD_map, HD_res):
"""
Upgrade LD_map to a higher resolution map, using rfft and back.
:param LD_map: Must have shape entries powers of two.
:param HD_res:
:return: Same map at higher resolution.
"""
LD_res = Log2ofPowerof2(LD_map.shape)
if np.all(LD_res == HD_res): return LD_map
assert np.all(HD_res >= LD_res)
HD_rshape = (2 ** HD_res[0], 2 ** (HD_res[1] - 1) + 1)
HD_shape = (2 ** HD_res[0], 2 ** HD_res[1])
rfft = np.fft.rfft2(LD_map)
ret_rfft = np.zeros(HD_rshape, dtype=complex)
ret_rfft[0:rfft.shape[0] / 2 + 1, 0:rfft.shape[1]] = rfft[0:rfft.shape[0] / 2 + 1, :] # positive frequencies
# negative frequencies :
ret_rfft[HD_rshape[0] - rfft.shape[0] + rfft.shape[0] / 2:, 0:rfft.shape[1]] = rfft[rfft.shape[0] / 2:, :]
fac_LDrfft2HDrfft = 2 ** (HD_res[0] - LD_res[0] + HD_res[1] - LD_res[1])
return np.fft.irfft2(ret_rfft, HD_shape) * fac_LDrfft2HDrfft
def subsample(HD_map, LD_res):
"""Simple subsampling of map.
"""
HD_res = Log2ofPowerof2(HD_map.shape)
if np.all(LD_res == HD_res): return HD_map.copy()
assert np.all(HD_res >= LD_res)
return HD_map[0::2 ** (HD_res[0] - LD_res[0]), 0::2 ** (HD_res[1] - LD_res[1])]
def supersample(LD_map, HD_shape):
"""Simple hypersampling of map.
"""
if LD_map.shape == HD_shape: return LD_map.copy()
assert np.all(np.array(HD_shape) > np.array(LD_map.shape))
assert np.all(np.array(HD_shape) % np.array(LD_map.shape) == 0.)
HDmap = np.zeros(HD_shape)
fac0, fac1 = (HD_shape[0] / LD_map.shape[0], HD_shape[1] / LD_map.shape[1])
for i in range(fac0):
for j in range(fac1):
HDmap[i::fac0, j::fac1] = LD_map
return HDmap
def degrade(HD_map, LD_shape):
if np.all(HD_map.shape <= LD_shape): return HD_map.copy()
fac0, fac1 = (HD_map.shape[0] / LD_shape[0], HD_map.shape[1] / LD_shape[1])
assert fac0 * LD_shape[0] == HD_map.shape[0] and fac1 * LD_shape[1] == HD_map.shape[1], (
(fac0, fac1), LD_shape, HD_map.shape)
ret = np.zeros(LD_shape, dtype=HD_map.dtype)
for _i in range(fac0):
sl0 = slice(_i, HD_map.shape[0], fac0)
for _j in range(fac1):
sl1 = slice(_j, HD_map.shape[1], fac1)
ret += HD_map[sl0, sl1]
return ret * (1. / (fac0 * fac1))
def degrade_mask(mask, LD_shape):
# FIXME :
dmask = degrade(mask, LD_shape)
return dmask # * (dmask >= 1.)
def udgrade_rfft2(rfft2map, shape, norm=False):
assert norm == False, 'not implemented' # norm. factor for rfft normalization maps. Here just shuffling indices.
if shape == (rfft2map.shape[0], 2 * (rfft2map.shape[1] - 1)):
return rfft2map
assert np.all([s % 2 == 0 for s in shape]), shape
assert rfft2map.shape[0] % 2 == 0
rshape = np.array((shape[0], shape[1] / 2 + 1))
if np.all(np.array(rshape) >= rfft2map.shape):
return _upgrade_rfft2(rfft2map, shape)
elif np.all(np.array(rshape) <= rfft2map.shape):
return _degrade_rfft2(rfft2map, shape)
else:
assert 0, 'not implemented'
def _degrade_rfft2(rfft2map, LDshape):
ret = np.zeros((LDshape[0], LDshape[0] // 2 + 1), dtype=complex)
ret[0:LDshape[0] // 2 + 1, :] = rfft2map[0:LDshape[0] // 2 + 1, 0:ret.shape[1]]
ret[LDshape[0] // 2::] = rfft2map[rfft2map.shape[0] - LDshape[0] // 2:, 0:ret.shape[1]]
# Corrections for pure reals and (-k) = k* :
ret[LDshape[0] // 2 + 1:, -1] = ret[1:LDshape[0] // 2, -1][::-1].conj()
ret[rfft2_reals(LDshape)].imag = 0.
return ret
def _upgrade_rfft2(rfft2map, HDshape):
ret = np.zeros((HDshape[0], HDshape[0] / 2 + 1), dtype=complex)
# positive 0axis frequencies : (including N/2 + 1, which is pure real.
ret[0:rfft2map.shape[0] // 2 + 1, 0:rfft2map.shape[1]] = rfft2map[0:rfft2map.shape[0] // 2 + 1, 0:rfft2map.shape[1]]
# Negative 0axis freq.
ret[HDshape[0] - rfft2map.shape[0] // 2:HDshape[0], 0:rfft2map.shape[1]] \
= rfft2map[rfft2map.shape[0] // 2:, 0:rfft2map.shape[1]]
return ret
| [
"numpy.fft.irfft2",
"numpy.fft.rfft2",
"numpy.array",
"numpy.zeros",
"numpy.all"
] | [((320, 335), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (328, 335), True, 'import numpy as np\n'), ((923, 941), 'numpy.all', 'np.all', (['(N % 2 == 0)'], {}), '(N % 2 == 0)\n', (929, 941), True, 'import numpy as np\n'), ((1709, 1733), 'numpy.all', 'np.all', (['(LD_res == HD_res)'], {}), '(LD_res == HD_res)\n', (1715, 1733), True, 'import numpy as np\n'), ((1760, 1784), 'numpy.all', 'np.all', (['(HD_res >= LD_res)'], {}), '(HD_res >= LD_res)\n', (1766, 1784), True, 'import numpy as np\n'), ((1904, 1924), 'numpy.fft.rfft2', 'np.fft.rfft2', (['LD_map'], {}), '(LD_map)\n', (1916, 1924), True, 'import numpy as np\n'), ((1940, 1974), 'numpy.zeros', 'np.zeros', (['HD_rshape'], {'dtype': 'complex'}), '(HD_rshape, dtype=complex)\n', (1948, 1974), True, 'import numpy as np\n'), ((2496, 2520), 'numpy.all', 'np.all', (['(LD_res == HD_res)'], {}), '(LD_res == HD_res)\n', (2502, 2520), True, 'import numpy as np\n'), ((2554, 2578), 'numpy.all', 'np.all', (['(HD_res >= LD_res)'], {}), '(HD_res >= LD_res)\n', (2560, 2578), True, 'import numpy as np\n'), ((2943, 2961), 'numpy.zeros', 'np.zeros', (['HD_shape'], {}), '(HD_shape)\n', (2951, 2961), True, 'import numpy as np\n'), ((3200, 3232), 'numpy.all', 'np.all', (['(HD_map.shape <= LD_shape)'], {}), '(HD_map.shape <= LD_shape)\n', (3206, 3232), True, 'import numpy as np\n'), ((3486, 3524), 'numpy.zeros', 'np.zeros', (['LD_shape'], {'dtype': 'HD_map.dtype'}), '(LD_shape, dtype=HD_map.dtype)\n', (3494, 3524), True, 'import numpy as np\n'), ((4146, 4183), 'numpy.all', 'np.all', (['[(s % 2 == 0) for s in shape]'], {}), '([(s % 2 == 0) for s in shape])\n', (4152, 4183), True, 'import numpy as np\n'), ((4240, 4278), 'numpy.array', 'np.array', (['(shape[0], shape[1] / 2 + 1)'], {}), '((shape[0], shape[1] / 2 + 1))\n', (4248, 4278), True, 'import numpy as np\n'), ((4574, 4632), 'numpy.zeros', 'np.zeros', (['(LDshape[0], LDshape[0] // 2 + 1)'], {'dtype': 'complex'}), '((LDshape[0], LDshape[0] // 2 + 1), dtype=complex)\n', (4582, 4632), True, 'import numpy as np\n'), ((5040, 5097), 'numpy.zeros', 'np.zeros', (['(HDshape[0], HDshape[0] / 2 + 1)'], {'dtype': 'complex'}), '((HDshape[0], HDshape[0] / 2 + 1), dtype=complex)\n', (5048, 5097), True, 'import numpy as np\n'), ((1393, 1405), 'numpy.array', 'np.array', (['fx'], {}), '(fx)\n', (1401, 1405), True, 'import numpy as np\n'), ((1407, 1419), 'numpy.array', 'np.array', (['fy'], {}), '(fy)\n', (1415, 1419), True, 'import numpy as np\n'), ((2317, 2350), 'numpy.fft.irfft2', 'np.fft.irfft2', (['ret_rfft', 'HD_shape'], {}), '(ret_rfft, HD_shape)\n', (2330, 2350), True, 'import numpy as np\n'), ((2817, 2835), 'numpy.array', 'np.array', (['HD_shape'], {}), '(HD_shape)\n', (2825, 2835), True, 'import numpy as np\n'), ((2838, 2860), 'numpy.array', 'np.array', (['LD_map.shape'], {}), '(LD_map.shape)\n', (2846, 2860), True, 'import numpy as np\n'), ((4293, 4309), 'numpy.array', 'np.array', (['rshape'], {}), '(rshape)\n', (4301, 4309), True, 'import numpy as np\n'), ((2880, 2898), 'numpy.array', 'np.array', (['HD_shape'], {}), '(HD_shape)\n', (2888, 2898), True, 'import numpy as np\n'), ((2901, 2923), 'numpy.array', 'np.array', (['LD_map.shape'], {}), '(LD_map.shape)\n', (2909, 2923), True, 'import numpy as np\n'), ((4393, 4409), 'numpy.array', 'np.array', (['rshape'], {}), '(rshape)\n', (4401, 4409), True, 'import numpy as np\n')] |
import numpy as np
import pytest # noqa: F401
from pandas_datareader._utils import RemoteDataError
from epymetheus.datasets import fetch_usstocks
# --------------------------------------------------------------------------------
def test_toomanyasset():
"""
Test if fetch_usstocks raises ValueError
when n_asset is too many.
"""
with pytest.raises(ValueError):
fetch_usstocks(n_assets=1000)
def test_usstocks():
try:
universe = fetch_usstocks(n_assets=2)
assert not np.isnan(universe.values).any(axis=None)
except RemoteDataError as e:
print("Skip", e)
| [
"epymetheus.datasets.fetch_usstocks",
"pytest.raises",
"numpy.isnan"
] | [((359, 384), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (372, 384), False, 'import pytest\n'), ((394, 423), 'epymetheus.datasets.fetch_usstocks', 'fetch_usstocks', ([], {'n_assets': '(1000)'}), '(n_assets=1000)\n', (408, 423), False, 'from epymetheus.datasets import fetch_usstocks\n'), ((475, 501), 'epymetheus.datasets.fetch_usstocks', 'fetch_usstocks', ([], {'n_assets': '(2)'}), '(n_assets=2)\n', (489, 501), False, 'from epymetheus.datasets import fetch_usstocks\n'), ((521, 546), 'numpy.isnan', 'np.isnan', (['universe.values'], {}), '(universe.values)\n', (529, 546), True, 'import numpy as np\n')] |
import math
import numpy as np
import cv2
import sys
# # Implement the functions below.
def extract_red(image):
""" Returns the red channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the red channel.
"""
# Since Red is last index, we want all rows, columns, and the last channel.
return np.copy(image[:, :, 2])
def extract_green(image):
""" Returns the green channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the green channel.
"""
# Return green channel, all rows, columns
return np.copy(image[:, :, 1])
def extract_blue(image):
""" Returns the blue channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the blue channel.
"""
# Since blue is the first index, get first channel.
return np.copy(image[:, :, 0])
def swap_green_blue(image):
""" Returns an image with the green and blue channels of the input image swapped. It is highly
recommended to make a copy of the input image in order to avoid modifying the original array.
You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 3D array with the green and blue channels swapped.
"""
temp_image = np.copy(image)
temp_image[:, :, 0] = extract_green(image)
temp_image[:, :, 1] = extract_blue(image)
return temp_image
def copy_paste_middle(src, dst, shape):
""" Copies the middle region of size shape from src to the middle of dst. It is
highly recommended to make a copy of the input image in order to avoid modifying the
original array. You can do this by calling:
temp_image = np.copy(image)
Note: Assumes that src and dst are monochrome images, i.e. 2d arrays.
Note: Where 'middle' is ambiguous because of any difference in the oddness
or evenness of the size of the copied region and the image size, the function
rounds downwards. E.g. in copying a shape = (1,1) from a src image of size (2,2)
into an dst image of size (3,3), the function copies the range [0:1,0:1] of
the src into the range [1:2,1:2] of the dst.
Args:
src (numpy.array): 2D array where the rectangular shape will be copied from.
dst (numpy.array): 2D array where the rectangular shape will be copied to.
shape (tuple): Tuple containing the height (int) and width (int) of the section to be
copied.
Returns:
numpy.array: Output monochrome image (2D array)
"""
src = np.copy(src)
dst = np.copy(dst)
# height is rows, width is columns
src_rows, src_cols = src.shape
dst_rows, dst_cols = dst.shape
# shape size mid points.
shape_mid_rows = int(np.floor(shape[0] / 2))
shape_mid_cols = int(np.floor(shape[1] / 2))
# mid point of the "copy" image
copy_mid_row = int(np.floor(src_rows / 2))
copy_mid_col = int(np.floor(src_cols / 2))
# mid points of the paste image.
paste_mid_row = int(np.floor(dst_rows / 2))
paste_mid_col = int(np.floor(dst_cols / 2))
# calculate the shifts to make sure copy is correct.
r1_dst, r2_dst, c1_dst, c2_dst, r1_src, r2_src, c1_src, c2_src = [
paste_mid_row - shape_mid_rows,
paste_mid_row + shape_mid_rows,
paste_mid_col - shape_mid_cols,
paste_mid_col + shape_mid_cols,
copy_mid_row - shape_mid_rows,
copy_mid_row + shape_mid_rows,
copy_mid_col - shape_mid_cols,
copy_mid_col + shape_mid_cols
]
dst[r1_dst: r2_dst, c1_dst: c2_dst] = src[r1_src: r2_src, c1_src: c2_src]
return dst
def image_stats(image):
""" Returns the tuple (min,max,mean,stddev) of statistics for the input monochrome image.
In order to become more familiar with Numpy, you should look for pre-defined functions
that do these operations i.e. numpy.min.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
Returns:
tuple: Four-element tuple containing:
min (float): Input array minimum value.
max (float): Input array maximum value.
mean (float): Input array mean / average value.
stddev (float): Input array standard deviation.
"""
return 1.*np.min(image), 1.*np.max(image), 1.*np.mean(image), 1.*np.std(image)
def center_and_normalize(image, scale):
""" Returns an image with the same mean as the original but with values scaled about the
mean so as to have a standard deviation of "scale".
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
scale (int or float): scale factor.
Returns:
numpy.array: Output 2D image.
"""
i_min, i_max, i_mean, i_std = image_stats(image)
# take the mean from the image, then divide by the std deviation. We then scale by the
# scale factor and then add the mean back into the image.
normal_image = (((image-i_mean) / i_std) * scale) + i_mean
return normal_image
def shift_image_left(image, shift):
""" Outputs the input monochrome image shifted shift pixels to the left.
The returned image has the same shape as the original with
the BORDER_REPLICATE rule to fill-in missing values. See
http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/copyMakeBorder/copyMakeBorder.html?highlight=copy
for further explanation.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
shift (int): Displacement value representing the number of pixels to shift the input image.
This parameter may be 0 representing zero displacement.
Returns:
numpy.array: Output shifted 2D image.
"""
temp_image = np.copy(image)
# take the temp image, all rows, from column defined in shift to end, move shift using border replicate.
return cv2.copyMakeBorder(temp_image[:, shift:], 0, 0, 0, shift, cv2.BORDER_REPLICATE)
def difference_image(img1, img2):
""" Returns the difference between the two input images (img1 - img2). The resulting array must be normalized
and scaled to fit [0, 255].
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
img1 (numpy.array): Input 2D image.
img2 (numpy.array): Input 2D image.
Returns:
numpy.array: Output 2D image containing the result of subtracting img2 from img1.
"""
difference = img1.astype(np.float) - img2.astype(np.float)
output_image = np.zeros(difference.shape)
cv2.normalize(difference, output_image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# print("Max Value is ", max(output_image.flatten()))
# print("Min Value is ", min(output_image.flatten()))
return output_image
def add_noise(image, channel, sigma):
""" Returns a copy of the input color image with Gaussian noise added to
channel (0-2). The Gaussian noise mean must be zero. The parameter sigma
controls the standard deviation of the noise.
The returned array values must not be clipped or normalized and scaled. This means that
there could be values that are not in [0, 255].
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): input RGB (BGR in OpenCV) image.
channel (int): Channel index value.
sigma (float): Gaussian noise standard deviation.
Returns:
numpy.array: Output 3D array containing the result of adding Gaussian noise to the
specified channel.
"""
# generate random noise using the image.shape tuple as the dimensions.
gaussian_noise = np.random.randn(*image.shape) * sigma
temp_image = np.copy(image)
temp_image = (temp_image * 1.0) # make it a float
temp_image[:, :, channel] += gaussian_noise[:, :, channel]
return temp_image
| [
"numpy.copy",
"numpy.mean",
"cv2.normalize",
"cv2.copyMakeBorder",
"numpy.std",
"numpy.floor",
"numpy.max",
"numpy.zeros",
"numpy.min",
"numpy.random.randn"
] | [((589, 612), 'numpy.copy', 'np.copy', (['image[:, :, 2]'], {}), '(image[:, :, 2])\n', (596, 612), True, 'import numpy as np\n'), ((1085, 1108), 'numpy.copy', 'np.copy', (['image[:, :, 1]'], {}), '(image[:, :, 1])\n', (1092, 1108), True, 'import numpy as np\n'), ((1588, 1611), 'numpy.copy', 'np.copy', (['image[:, :, 0]'], {}), '(image[:, :, 0])\n', (1595, 1611), True, 'import numpy as np\n'), ((2094, 2108), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (2101, 2108), True, 'import numpy as np\n'), ((3386, 3398), 'numpy.copy', 'np.copy', (['src'], {}), '(src)\n', (3393, 3398), True, 'import numpy as np\n'), ((3409, 3421), 'numpy.copy', 'np.copy', (['dst'], {}), '(dst)\n', (3416, 3421), True, 'import numpy as np\n'), ((7217, 7231), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (7224, 7231), True, 'import numpy as np\n'), ((7352, 7431), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['temp_image[:, shift:]', '(0)', '(0)', '(0)', 'shift', 'cv2.BORDER_REPLICATE'], {}), '(temp_image[:, shift:], 0, 0, 0, shift, cv2.BORDER_REPLICATE)\n', (7370, 7431), False, 'import cv2\n'), ((8084, 8110), 'numpy.zeros', 'np.zeros', (['difference.shape'], {}), '(difference.shape)\n', (8092, 8110), True, 'import numpy as np\n'), ((8115, 8205), 'cv2.normalize', 'cv2.normalize', (['difference', 'output_image'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX'}), '(difference, output_image, alpha=0, beta=255, norm_type=cv2.\n NORM_MINMAX)\n', (8128, 8205), False, 'import cv2\n'), ((9560, 9574), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (9567, 9574), True, 'import numpy as np\n'), ((3587, 3609), 'numpy.floor', 'np.floor', (['(shape[0] / 2)'], {}), '(shape[0] / 2)\n', (3595, 3609), True, 'import numpy as np\n'), ((3636, 3658), 'numpy.floor', 'np.floor', (['(shape[1] / 2)'], {}), '(shape[1] / 2)\n', (3644, 3658), True, 'import numpy as np\n'), ((3720, 3742), 'numpy.floor', 'np.floor', (['(src_rows / 2)'], {}), '(src_rows / 2)\n', (3728, 3742), True, 'import numpy as np\n'), ((3767, 3789), 'numpy.floor', 'np.floor', (['(src_cols / 2)'], {}), '(src_cols / 2)\n', (3775, 3789), True, 'import numpy as np\n'), ((3853, 3875), 'numpy.floor', 'np.floor', (['(dst_rows / 2)'], {}), '(dst_rows / 2)\n', (3861, 3875), True, 'import numpy as np\n'), ((3901, 3923), 'numpy.floor', 'np.floor', (['(dst_cols / 2)'], {}), '(dst_cols / 2)\n', (3909, 3923), True, 'import numpy as np\n'), ((9505, 9534), 'numpy.random.randn', 'np.random.randn', (['*image.shape'], {}), '(*image.shape)\n', (9520, 9534), True, 'import numpy as np\n'), ((5275, 5288), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (5281, 5288), True, 'import numpy as np\n'), ((5293, 5306), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (5299, 5306), True, 'import numpy as np\n'), ((5311, 5325), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (5318, 5325), True, 'import numpy as np\n'), ((5330, 5343), 'numpy.std', 'np.std', (['image'], {}), '(image)\n', (5336, 5343), True, 'import numpy as np\n')] |
import numpy as np
class IBM:
def __init__(self, config):
self.D = config["ibm"].get('vertical_mixing', 0) # Vertical mixing [m*2/s]
self.dt = config['dt']
self.x = np.array([])
self.y = np.array([])
self.pid = np.array([])
self.land_collision = config["ibm"].get('land_collision', 'reposition')
def update_ibm(self, grid, state, forcing):
# Vertical advection velocity
W = forcing.forcing.wvel(state.X, state.Y, state.Z)
# Vertical diffusion velocity
rand = np.random.normal(size=len(state.X))
W += rand * (2 * self.D / self.dt) ** 0.5
# Update vertical position, using reflexive boundary condition at top
state.Z += W * self.dt
state.Z[state.Z < 0] *= -1
# Reflexive boundary condition at bottom
H = grid.sample_depth(state.X, state.Y) # Water depth
below_seabed = state.Z > H
state.Z[below_seabed] = 2*H[below_seabed] - state.Z[below_seabed]
if self.land_collision == "reposition":
# If particles have not moved: Assume they ended up on land.
# If that is the case, reposition them within the cell.
pid, pidx_old, pidx_new = np.intersect1d(self.pid, state.pid, return_indices=True)
onland = ((self.x[pidx_old] == state.X[pidx_new]) &
(self.y[pidx_old] == state.Y[pidx_new]))
num_onland = np.count_nonzero(onland)
pidx_new_onland = pidx_new[onland]
x_new = np.round(state.X[pidx_new_onland]) - 0.5 + np.random.rand(num_onland)
y_new = np.round(state.Y[pidx_new_onland]) - 0.5 + np.random.rand(num_onland)
state.X[pidx_new_onland] = x_new
state.Y[pidx_new_onland] = y_new
self.x = state.X
self.y = state.Y
self.pid = state.pid
| [
"numpy.intersect1d",
"numpy.random.rand",
"numpy.count_nonzero",
"numpy.array",
"numpy.round"
] | [((196, 208), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (204, 208), True, 'import numpy as np\n'), ((226, 238), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (234, 238), True, 'import numpy as np\n'), ((258, 270), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (266, 270), True, 'import numpy as np\n'), ((1233, 1289), 'numpy.intersect1d', 'np.intersect1d', (['self.pid', 'state.pid'], {'return_indices': '(True)'}), '(self.pid, state.pid, return_indices=True)\n', (1247, 1289), True, 'import numpy as np\n'), ((1442, 1466), 'numpy.count_nonzero', 'np.count_nonzero', (['onland'], {}), '(onland)\n', (1458, 1466), True, 'import numpy as np\n'), ((1577, 1603), 'numpy.random.rand', 'np.random.rand', (['num_onland'], {}), '(num_onland)\n', (1591, 1603), True, 'import numpy as np\n'), ((1667, 1693), 'numpy.random.rand', 'np.random.rand', (['num_onland'], {}), '(num_onland)\n', (1681, 1693), True, 'import numpy as np\n'), ((1534, 1568), 'numpy.round', 'np.round', (['state.X[pidx_new_onland]'], {}), '(state.X[pidx_new_onland])\n', (1542, 1568), True, 'import numpy as np\n'), ((1624, 1658), 'numpy.round', 'np.round', (['state.Y[pidx_new_onland]'], {}), '(state.Y[pidx_new_onland])\n', (1632, 1658), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 5 00:24:23 2021
@author: 34123
"""
import matplotlib.pyplot as plt
import numpy as np
import random
from scipy.stats import multivariate_normal
def plot_random_init_iris_sepal(df_full):
sepal_df = df_full.iloc[:,0:2]
sepal_df = np.array(sepal_df)
m1 = random.choice(sepal_df)
m2 = random.choice(sepal_df)
m3 = random.choice(sepal_df)
cov1 = np.cov(np.transpose(sepal_df))
cov2 = np.cov(np.transpose(sepal_df))
cov3 = np.cov(np.transpose(sepal_df))
x1 = np.linspace(4,8,150)
x2 = np.linspace(1.5,4.5,150)
X, Y = np.meshgrid(x1,x2)
Z1 = multivariate_normal(m1, cov1)
Z2 = multivariate_normal(m2, cov2)
Z3 = multivariate_normal(m3, cov3)
# a new array of given shape and type, without initializing entries
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X; pos[:, :, 1] = Y
plt.figure(figsize=(10,10))
plt.scatter(sepal_df[:,0], sepal_df[:,1], marker='o')
plt.contour(X, Y, Z1.pdf(pos), colors="r" ,alpha = 0.5)
plt.contour(X, Y, Z2.pdf(pos), colors="b" ,alpha = 0.5)
plt.contour(X, Y, Z3.pdf(pos), colors="g" ,alpha = 0.5)
# making both the axis equal
plt.axis('equal')
plt.xlabel('Sepal Length', fontsize=16)
plt.ylabel('Sepal Width', fontsize=16)
plt.title('Initial Random Clusters(Sepal)', fontsize=22)
plt.grid()
plt.show()
def plot_random_init_iris_petal(df_full):
petal_df = df_full.iloc[:,2:4]
petal_df = np.array(petal_df)
m1 = random.choice(petal_df)
m2 = random.choice(petal_df)
m3 = random.choice(petal_df)
cov1 = np.cov(np.transpose(petal_df))
cov2 = np.cov(np.transpose(petal_df))
cov3 = np.cov(np.transpose(petal_df))
x1 = np.linspace(-1,7,150)
x2 = np.linspace(-1,4,150)
X, Y = np.meshgrid(x1,x2)
Z1 = multivariate_normal(m1, cov1)
Z2 = multivariate_normal(m2, cov2)
Z3 = multivariate_normal(m3, cov3)
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X; pos[:, :, 1] = Y
plt.figure(figsize=(10,10))
plt.scatter(petal_df[:,0], petal_df[:,1], marker='o')
plt.contour(X, Y, Z1.pdf(pos), colors="r" ,alpha = 0.5)
plt.contour(X, Y, Z2.pdf(pos), colors="b" ,alpha = 0.5)
plt.contour(X, Y, Z3.pdf(pos), colors="g" ,alpha = 0.5)
plt.axis('equal')
plt.xlabel('Petal Length', fontsize=16)
plt.ylabel('Petal Width', fontsize=16)
plt.title('Initial Random Clusters(Petal)', fontsize=22)
plt.grid()
plt.show()
def plot_cluster_iris_sepal(df_full, labels, centers):
# finding mode
seto = max(set(labels[0:50]), key=labels[0:50].count) # 2
vers = max(set(labels[50:100]), key=labels[50:100].count) # 1
virg = max(set(labels[100:]), key=labels[100:].count) # 0
# sepal
s_mean_clus1 = np.array([centers[seto][0],centers[seto][1]])
s_mean_clus2 = np.array([centers[vers][0],centers[vers][1]])
s_mean_clus3 = np.array([centers[virg][0],centers[virg][1]])
values = np.array(labels) #label
# search all 3 species
searchval_seto = seto
searchval_vers = vers
searchval_virg = virg
# index of all 3 species
ii_seto = np.where(values == searchval_seto)[0]
ii_vers = np.where(values == searchval_vers)[0]
ii_virg = np.where(values == searchval_virg)[0]
ind_seto = list(ii_seto)
ind_vers = list(ii_vers)
ind_virg = list(ii_virg)
sepal_df = df_full.iloc[:,0:2]
seto_df = sepal_df[sepal_df.index.isin(ind_seto)]
vers_df = sepal_df[sepal_df.index.isin(ind_vers)]
virg_df = sepal_df[sepal_df.index.isin(ind_virg)]
cov_seto = np.cov(np.transpose(np.array(seto_df)))
cov_vers = np.cov(np.transpose(np.array(vers_df)))
cov_virg = np.cov(np.transpose(np.array(virg_df)))
sepal_df = np.array(sepal_df)
x1 = np.linspace(4,8,150)
x2 = np.linspace(1.5,4.5,150)
X, Y = np.meshgrid(x1,x2)
Z1 = multivariate_normal(s_mean_clus1, cov_seto)
Z2 = multivariate_normal(s_mean_clus2, cov_vers)
Z3 = multivariate_normal(s_mean_clus3, cov_virg)
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X; pos[:, :, 1] = Y
plt.figure(figsize=(10,10))
plt.scatter(sepal_df[:,0], sepal_df[:,1], marker='o')
plt.contour(X, Y, Z1.pdf(pos), colors="r" ,alpha = 0.5)
plt.contour(X, Y, Z2.pdf(pos), colors="b" ,alpha = 0.5)
plt.contour(X, Y, Z3.pdf(pos), colors="g" ,alpha = 0.5)
plt.axis('equal')
plt.xlabel('Sepal Length', fontsize=16)
plt.ylabel('Sepal Width', fontsize=16)
plt.title('Final Clusters(Sepal)', fontsize=22)
plt.grid()
plt.show()
def plot_cluster_iris_petal(df_full, labels, centers):
# petal
p_mean_clus1 = np.array([centers[seto][2],centers[seto][3]])
p_mean_clus2 = np.array([centers[vers][2],centers[vers][3]])
p_mean_clus3 = np.array([centers[virg][2],centers[virg][3]])
petal_df = df_full.iloc[:,2:4]
seto_df = petal_df[petal_df.index.isin(ind_seto)]
vers_df = petal_df[petal_df.index.isin(ind_vers)]
virg_df = petal_df[petal_df.index.isin(ind_virg)]
cov_seto = np.cov(np.transpose(np.array(seto_df)))
cov_vers = np.cov(np.transpose(np.array(vers_df)))
cov_virg = np.cov(np.transpose(np.array(virg_df)))
petal_df = np.array(petal_df)
x1 = np.linspace(0.5,7,150)
x2 = np.linspace(-1,4,150)
X, Y = np.meshgrid(x1,x2)
Z1 = multivariate_normal(p_mean_clus1, cov_seto)
Z2 = multivariate_normal(p_mean_clus2, cov_vers)
Z3 = multivariate_normal(p_mean_clus3, cov_virg)
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X; pos[:, :, 1] = Y
plt.figure(figsize=(10,10))
plt.scatter(petal_df[:,0], petal_df[:,1], marker='o')
plt.contour(X, Y, Z1.pdf(pos), colors="r" ,alpha = 0.5)
plt.contour(X, Y, Z2.pdf(pos), colors="b" ,alpha = 0.5)
plt.contour(X, Y, Z3.pdf(pos), colors="g" ,alpha = 0.5)
plt.axis('equal')
plt.xlabel('Petal Length', fontsize=16)
plt.ylabel('Petal Width', fontsize=16)
plt.title('Final Clusters(Petal)', fontsize=22)
plt.grid()
plt.show()
| [
"random.choice",
"matplotlib.pyplot.title",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.where",
"scipy.stats.multivariate_normal",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.empty",
"matplotlib.py... | [((287, 305), 'numpy.array', 'np.array', (['sepal_df'], {}), '(sepal_df)\n', (295, 305), True, 'import numpy as np\n'), ((320, 343), 'random.choice', 'random.choice', (['sepal_df'], {}), '(sepal_df)\n', (333, 343), False, 'import random\n'), ((353, 376), 'random.choice', 'random.choice', (['sepal_df'], {}), '(sepal_df)\n', (366, 376), False, 'import random\n'), ((386, 409), 'random.choice', 'random.choice', (['sepal_df'], {}), '(sepal_df)\n', (399, 409), False, 'import random\n'), ((551, 573), 'numpy.linspace', 'np.linspace', (['(4)', '(8)', '(150)'], {}), '(4, 8, 150)\n', (562, 573), True, 'import numpy as np\n'), ((583, 609), 'numpy.linspace', 'np.linspace', (['(1.5)', '(4.5)', '(150)'], {}), '(1.5, 4.5, 150)\n', (594, 609), True, 'import numpy as np\n'), ((619, 638), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (630, 638), True, 'import numpy as np\n'), ((649, 678), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['m1', 'cov1'], {}), '(m1, cov1)\n', (668, 678), False, 'from scipy.stats import multivariate_normal\n'), ((690, 719), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['m2', 'cov2'], {}), '(m2, cov2)\n', (709, 719), False, 'from scipy.stats import multivariate_normal\n'), ((729, 758), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['m3', 'cov3'], {}), '(m3, cov3)\n', (748, 758), False, 'from scipy.stats import multivariate_normal\n'), ((846, 870), 'numpy.empty', 'np.empty', (['(X.shape + (2,))'], {}), '(X.shape + (2,))\n', (854, 870), True, 'import numpy as np\n'), ((918, 946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (928, 946), True, 'import matplotlib.pyplot as plt\n'), ((950, 1005), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sepal_df[:, 0]', 'sepal_df[:, 1]'], {'marker': '"""o"""'}), "(sepal_df[:, 0], sepal_df[:, 1], marker='o')\n", (961, 1005), True, 'import matplotlib.pyplot as plt\n'), ((1228, 1245), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1236, 1245), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1354), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sepal Length"""'], {'fontsize': '(16)'}), "('Sepal Length', fontsize=16)\n", (1325, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1397), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sepal Width"""'], {'fontsize': '(16)'}), "('Sepal Width', fontsize=16)\n", (1369, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1458), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial Random Clusters(Sepal)"""'], {'fontsize': '(22)'}), "('Initial Random Clusters(Sepal)', fontsize=22)\n", (1411, 1458), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1473), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1471, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1486, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1605), 'numpy.array', 'np.array', (['petal_df'], {}), '(petal_df)\n', (1595, 1605), True, 'import numpy as np\n'), ((1620, 1643), 'random.choice', 'random.choice', (['petal_df'], {}), '(petal_df)\n', (1633, 1643), False, 'import random\n'), ((1653, 1676), 'random.choice', 'random.choice', (['petal_df'], {}), '(petal_df)\n', (1666, 1676), False, 'import random\n'), ((1686, 1709), 'random.choice', 'random.choice', (['petal_df'], {}), '(petal_df)\n', (1699, 1709), False, 'import random\n'), ((1846, 1869), 'numpy.linspace', 'np.linspace', (['(-1)', '(7)', '(150)'], {}), '(-1, 7, 150)\n', (1857, 1869), True, 'import numpy as np\n'), ((1877, 1900), 'numpy.linspace', 'np.linspace', (['(-1)', '(4)', '(150)'], {}), '(-1, 4, 150)\n', (1888, 1900), True, 'import numpy as np\n'), ((1910, 1929), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (1921, 1929), True, 'import numpy as np\n'), ((1940, 1969), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['m1', 'cov1'], {}), '(m1, cov1)\n', (1959, 1969), False, 'from scipy.stats import multivariate_normal\n'), ((1981, 2010), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['m2', 'cov2'], {}), '(m2, cov2)\n', (2000, 2010), False, 'from scipy.stats import multivariate_normal\n'), ((2020, 2049), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['m3', 'cov3'], {}), '(m3, cov3)\n', (2039, 2049), False, 'from scipy.stats import multivariate_normal\n'), ((2061, 2085), 'numpy.empty', 'np.empty', (['(X.shape + (2,))'], {}), '(X.shape + (2,))\n', (2069, 2085), True, 'import numpy as np\n'), ((2133, 2161), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2143, 2161), True, 'import matplotlib.pyplot as plt\n'), ((2165, 2220), 'matplotlib.pyplot.scatter', 'plt.scatter', (['petal_df[:, 0]', 'petal_df[:, 1]'], {'marker': '"""o"""'}), "(petal_df[:, 0], petal_df[:, 1], marker='o')\n", (2176, 2220), True, 'import matplotlib.pyplot as plt\n'), ((2411, 2428), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2419, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2434, 2473), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal Length"""'], {'fontsize': '(16)'}), "('Petal Length', fontsize=16)\n", (2444, 2473), True, 'import matplotlib.pyplot as plt\n'), ((2479, 2517), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal Width"""'], {'fontsize': '(16)'}), "('Petal Width', fontsize=16)\n", (2489, 2517), True, 'import matplotlib.pyplot as plt\n'), ((2522, 2578), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial Random Clusters(Petal)"""'], {'fontsize': '(22)'}), "('Initial Random Clusters(Petal)', fontsize=22)\n", (2531, 2578), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2593), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2591, 2593), True, 'import matplotlib.pyplot as plt\n'), ((2598, 2608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2606, 2608), True, 'import matplotlib.pyplot as plt\n'), ((2919, 2965), 'numpy.array', 'np.array', (['[centers[seto][0], centers[seto][1]]'], {}), '([centers[seto][0], centers[seto][1]])\n', (2927, 2965), True, 'import numpy as np\n'), ((2984, 3030), 'numpy.array', 'np.array', (['[centers[vers][0], centers[vers][1]]'], {}), '([centers[vers][0], centers[vers][1]])\n', (2992, 3030), True, 'import numpy as np\n'), ((3049, 3095), 'numpy.array', 'np.array', (['[centers[virg][0], centers[virg][1]]'], {}), '([centers[virg][0], centers[virg][1]])\n', (3057, 3095), True, 'import numpy as np\n'), ((3113, 3129), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3121, 3129), True, 'import numpy as np\n'), ((3913, 3931), 'numpy.array', 'np.array', (['sepal_df'], {}), '(sepal_df)\n', (3921, 3931), True, 'import numpy as np\n'), ((3946, 3968), 'numpy.linspace', 'np.linspace', (['(4)', '(8)', '(150)'], {}), '(4, 8, 150)\n', (3957, 3968), True, 'import numpy as np\n'), ((3978, 4004), 'numpy.linspace', 'np.linspace', (['(1.5)', '(4.5)', '(150)'], {}), '(1.5, 4.5, 150)\n', (3989, 4004), True, 'import numpy as np\n'), ((4014, 4033), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (4025, 4033), True, 'import numpy as np\n'), ((4044, 4087), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['s_mean_clus1', 'cov_seto'], {}), '(s_mean_clus1, cov_seto)\n', (4063, 4087), False, 'from scipy.stats import multivariate_normal\n'), ((4099, 4142), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['s_mean_clus2', 'cov_vers'], {}), '(s_mean_clus2, cov_vers)\n', (4118, 4142), False, 'from scipy.stats import multivariate_normal\n'), ((4152, 4195), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['s_mean_clus3', 'cov_virg'], {}), '(s_mean_clus3, cov_virg)\n', (4171, 4195), False, 'from scipy.stats import multivariate_normal\n'), ((4207, 4231), 'numpy.empty', 'np.empty', (['(X.shape + (2,))'], {}), '(X.shape + (2,))\n', (4215, 4231), True, 'import numpy as np\n'), ((4279, 4307), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4289, 4307), True, 'import matplotlib.pyplot as plt\n'), ((4369, 4424), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sepal_df[:, 0]', 'sepal_df[:, 1]'], {'marker': '"""o"""'}), "(sepal_df[:, 0], sepal_df[:, 1], marker='o')\n", (4380, 4424), True, 'import matplotlib.pyplot as plt\n'), ((4615, 4632), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (4623, 4632), True, 'import matplotlib.pyplot as plt\n'), ((4703, 4742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sepal Length"""'], {'fontsize': '(16)'}), "('Sepal Length', fontsize=16)\n", (4713, 4742), True, 'import matplotlib.pyplot as plt\n'), ((4747, 4785), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sepal Width"""'], {'fontsize': '(16)'}), "('Sepal Width', fontsize=16)\n", (4757, 4785), True, 'import matplotlib.pyplot as plt\n'), ((4790, 4837), 'matplotlib.pyplot.title', 'plt.title', (['"""Final Clusters(Sepal)"""'], {'fontsize': '(22)'}), "('Final Clusters(Sepal)', fontsize=22)\n", (4799, 4837), True, 'import matplotlib.pyplot as plt\n'), ((4844, 4854), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4852, 4854), True, 'import matplotlib.pyplot as plt\n'), ((4859, 4869), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4867, 4869), True, 'import matplotlib.pyplot as plt\n'), ((4971, 5017), 'numpy.array', 'np.array', (['[centers[seto][2], centers[seto][3]]'], {}), '([centers[seto][2], centers[seto][3]])\n', (4979, 5017), True, 'import numpy as np\n'), ((5036, 5082), 'numpy.array', 'np.array', (['[centers[vers][2], centers[vers][3]]'], {}), '([centers[vers][2], centers[vers][3]])\n', (5044, 5082), True, 'import numpy as np\n'), ((5101, 5147), 'numpy.array', 'np.array', (['[centers[virg][2], centers[virg][3]]'], {}), '([centers[virg][2], centers[virg][3]])\n', (5109, 5147), True, 'import numpy as np\n'), ((5544, 5562), 'numpy.array', 'np.array', (['petal_df'], {}), '(petal_df)\n', (5552, 5562), True, 'import numpy as np\n'), ((5578, 5602), 'numpy.linspace', 'np.linspace', (['(0.5)', '(7)', '(150)'], {}), '(0.5, 7, 150)\n', (5589, 5602), True, 'import numpy as np\n'), ((5612, 5635), 'numpy.linspace', 'np.linspace', (['(-1)', '(4)', '(150)'], {}), '(-1, 4, 150)\n', (5623, 5635), True, 'import numpy as np\n'), ((5645, 5664), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (5656, 5664), True, 'import numpy as np\n'), ((5675, 5718), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['p_mean_clus1', 'cov_seto'], {}), '(p_mean_clus1, cov_seto)\n', (5694, 5718), False, 'from scipy.stats import multivariate_normal\n'), ((5730, 5773), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['p_mean_clus2', 'cov_vers'], {}), '(p_mean_clus2, cov_vers)\n', (5749, 5773), False, 'from scipy.stats import multivariate_normal\n'), ((5783, 5826), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['p_mean_clus3', 'cov_virg'], {}), '(p_mean_clus3, cov_virg)\n', (5802, 5826), False, 'from scipy.stats import multivariate_normal\n'), ((5838, 5862), 'numpy.empty', 'np.empty', (['(X.shape + (2,))'], {}), '(X.shape + (2,))\n', (5846, 5862), True, 'import numpy as np\n'), ((5910, 5938), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (5920, 5938), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6054), 'matplotlib.pyplot.scatter', 'plt.scatter', (['petal_df[:, 0]', 'petal_df[:, 1]'], {'marker': '"""o"""'}), "(petal_df[:, 0], petal_df[:, 1], marker='o')\n", (6010, 6054), True, 'import matplotlib.pyplot as plt\n'), ((6245, 6262), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (6253, 6262), True, 'import matplotlib.pyplot as plt\n'), ((6314, 6353), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal Length"""'], {'fontsize': '(16)'}), "('Petal Length', fontsize=16)\n", (6324, 6353), True, 'import matplotlib.pyplot as plt\n'), ((6358, 6396), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal Width"""'], {'fontsize': '(16)'}), "('Petal Width', fontsize=16)\n", (6368, 6396), True, 'import matplotlib.pyplot as plt\n'), ((6401, 6448), 'matplotlib.pyplot.title', 'plt.title', (['"""Final Clusters(Petal)"""'], {'fontsize': '(22)'}), "('Final Clusters(Petal)', fontsize=22)\n", (6410, 6448), True, 'import matplotlib.pyplot as plt\n'), ((6453, 6463), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6461, 6463), True, 'import matplotlib.pyplot as plt\n'), ((6468, 6478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6476, 6478), True, 'import matplotlib.pyplot as plt\n'), ((429, 451), 'numpy.transpose', 'np.transpose', (['sepal_df'], {}), '(sepal_df)\n', (441, 451), True, 'import numpy as np\n'), ((471, 493), 'numpy.transpose', 'np.transpose', (['sepal_df'], {}), '(sepal_df)\n', (483, 493), True, 'import numpy as np\n'), ((513, 535), 'numpy.transpose', 'np.transpose', (['sepal_df'], {}), '(sepal_df)\n', (525, 535), True, 'import numpy as np\n'), ((1728, 1750), 'numpy.transpose', 'np.transpose', (['petal_df'], {}), '(petal_df)\n', (1740, 1750), True, 'import numpy as np\n'), ((1770, 1792), 'numpy.transpose', 'np.transpose', (['petal_df'], {}), '(petal_df)\n', (1782, 1792), True, 'import numpy as np\n'), ((1812, 1834), 'numpy.transpose', 'np.transpose', (['petal_df'], {}), '(petal_df)\n', (1824, 1834), True, 'import numpy as np\n'), ((3287, 3321), 'numpy.where', 'np.where', (['(values == searchval_seto)'], {}), '(values == searchval_seto)\n', (3295, 3321), True, 'import numpy as np\n'), ((3339, 3373), 'numpy.where', 'np.where', (['(values == searchval_vers)'], {}), '(values == searchval_vers)\n', (3347, 3373), True, 'import numpy as np\n'), ((3391, 3425), 'numpy.where', 'np.where', (['(values == searchval_virg)'], {}), '(values == searchval_virg)\n', (3399, 3425), True, 'import numpy as np\n'), ((3763, 3780), 'numpy.array', 'np.array', (['seto_df'], {}), '(seto_df)\n', (3771, 3780), True, 'import numpy as np\n'), ((3818, 3835), 'numpy.array', 'np.array', (['vers_df'], {}), '(vers_df)\n', (3826, 3835), True, 'import numpy as np\n'), ((3873, 3890), 'numpy.array', 'np.array', (['virg_df'], {}), '(virg_df)\n', (3881, 3890), True, 'import numpy as np\n'), ((5394, 5411), 'numpy.array', 'np.array', (['seto_df'], {}), '(seto_df)\n', (5402, 5411), True, 'import numpy as np\n'), ((5449, 5466), 'numpy.array', 'np.array', (['vers_df'], {}), '(vers_df)\n', (5457, 5466), True, 'import numpy as np\n'), ((5504, 5521), 'numpy.array', 'np.array', (['virg_df'], {}), '(virg_df)\n', (5512, 5521), True, 'import numpy as np\n')] |
# -----------------------------------------------------------------------------------------------------------
# Funções auxiliares para predições
# -----------------------------------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
createPredTable(dic_ŷ_train :: dict, dic_ŷ_test :: narray,
train :: dataframe, test :: dataframe)
Retorna um dataframe com as coordenadas e as predições de cada modelo treinado.
Parâmetros:
- dic_ŷ_train : dicionário com as predições de cada modelo para o conjunto de treino
- dic_ŷ_test : dicionário com as predições de cada modelo para o conjunto de teste
- train : dataframe (t, p) representativo dos dados de treino
- test : dataframe (n-t, p) representativo dos dados de teste
Retorna:
- df_pred : dataframe(n, 9) com as coordenadas e as predições de cada modelo
"""
def createPredTable(dic_ŷ_train, dic_ŷ_test, train, test):
train_coords = train[['X','Y']]
test_coords = test[['X','Y']]
df_pred = pd.concat([train_coords,test_coords])
for model in dic_ŷ_test.keys():
ŷ_train = list(dic_ŷ_train[model])
ŷ_test = list(dic_ŷ_test[model])
map_labels = ŷ_train + ŷ_test
df_pred[model] = map_labels
return df_pred
# -----------------------------------------------------------------------------------------------------------
"""
createMissClassifTable(df_pred :: dataframe, y_train :: narray, y_test :: narray)
Retorna um dataframe com as coordenadas e as inconsistências entre o mapa geológico e cada mapa preditivo.
As colunas de inconsistências por modelo são binárias, de modo que 1 simboliza inconsistência entre os mapas.
Parâmetros:
- df_pred : dataframe (n, 9) representativo das predições de cada modelo
- y_train : narray (t, ) representativo dos labels de treino
- y_test : narray (n-t, ) representativo dos labels de teste
Retorna:
- df_miss : dataframe(n, 9) com as coordenadas e as inconsistências apresentadas por cada modelo
"""
def createMissClassifTable(df_pred, y_train, y_test):
model_list = df_pred.columns[2:]
true_labels = list(y_train) + list(y_test)
df_miss = df_pred[['X','Y']]
for model in model_list:
diff_list = true_labels - df_pred[model]
miss_list = []
for diff in diff_list:
if diff == 0:
miss_list.append(0)
else:
miss_list.append(1)
df_miss[model] = miss_list
return df_miss
# -----------------------------------------------------------------------------------------------------------
"""
createPredProbaTable(pr_ŷ_train :: narray, pr_ŷ_test :: narray,
train :: dataframe, test :: dataframe)
Retorna um dataframe com as probabilidades preditas para cada uma das 6 classes (unidades).
Parâmetros:
- pr_ŷ_train : narray (t, 6) representando as predições probabilísticas para cada uma das classes
no conjunto de treino
- pr_ŷ_test : narray (n-t, 6) representando as predições probabilísticas para cada uma das classes
no conjunto de teste
- train : dataframe (t, p) representativo dos dados de treino
- test : dataframe (n-t, p) representativo dos dados de teste
Retorna:
- df_proba_pred : dataframe (n, 8) com as coordenadas e probabilidades para cada uma das classes
"""
def createPredProbaTable(pr_ŷ_train, pr_ŷ_test, train, test):
litho_list = ['MAcgg','PP3csbg','PP34b','PP4esjc','PP4esb','PP4egm']
train_coords = train[['X','Y']]
test_coords = test[['X','Y']]
df_proba_pred = pd.concat([train_coords,test_coords])
pr_ŷ = np.concatenate([pr_ŷ_train,pr_ŷ_test])
i = 0
for litho in litho_list:
df_proba_pred[litho] = pr_ŷ[:,i]
i += 1
return df_proba_pred
# -----------------------------------------------------------------------------------------------------------
"""
InformationEntropy(pr_ŷ_train :: narray, pr_ŷ_test :: narray,
train :: dataframe, test :: dataframe)
Retorna um dataframe com as coordenadas e valores de entropia da informação (de Shannon). Probabilidades
nulas são ignoradas para o cálculo da entropia.
Parâmetros:
- pr_ŷ_train : narray (t, 6) representando as predições probabilísticas para cada uma das classes
do conjunto de treino
- pr_ŷ_test : narray (n-t, 6) representando as predições probabilísticas para cada uma das classes
do conjunto de teste
- train : dataframe (t, p) representativo dos dados de treino
- test : dataframe (n-t, p) representativo dos dados de teste
Retorna:
- df_entropy : dataframe(n, 3) com as coordenadas e entropia
"""
def InformationEntropy(pr_ŷ_train, pr_ŷ_test, train, test):
train_coords = train[['X','Y']]
test_coords = test[['X','Y']]
df_entropy = pd.concat([train_coords,test_coords])
pr_ŷ = np.concatenate([pr_ŷ_train,pr_ŷ_test])
size = len(df_entropy)
entropy_list = []
for i in range(size):
pred_prob = pr_ŷ[i,:]
h = 0
for p in pred_prob:
if p != 0:
h += - (p * (np.log2(p)))
entropy_list.append(h)
df_entropy['ENTROPY'] = entropy_list
return df_entropy
| [
"numpy.log2",
"pandas.concat",
"numpy.concatenate"
] | [((1084, 1122), 'pandas.concat', 'pd.concat', (['[train_coords, test_coords]'], {}), '([train_coords, test_coords])\n', (1093, 1122), True, 'import pandas as pd\n'), ((3661, 3699), 'pandas.concat', 'pd.concat', (['[train_coords, test_coords]'], {}), '([train_coords, test_coords])\n', (3670, 3699), True, 'import pandas as pd\n'), ((3712, 3751), 'numpy.concatenate', 'np.concatenate', (['[pr_ŷ_train, pr_ŷ_test]'], {}), '([pr_ŷ_train, pr_ŷ_test])\n', (3726, 3751), True, 'import numpy as np\n'), ((4881, 4919), 'pandas.concat', 'pd.concat', (['[train_coords, test_coords]'], {}), '([train_coords, test_coords])\n', (4890, 4919), True, 'import pandas as pd\n'), ((4933, 4972), 'numpy.concatenate', 'np.concatenate', (['[pr_ŷ_train, pr_ŷ_test]'], {}), '([pr_ŷ_train, pr_ŷ_test])\n', (4947, 4972), True, 'import numpy as np\n'), ((5180, 5190), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (5187, 5190), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
from habitat_baselines.common.utils import Flatten
from habitat_baselines.rl.models.simple_cnn import SimpleCNN
class Contiguous(nn.Module):
r"""Converts a tensor to be stored contiguously if it is not already so.
"""
def __init__(self):
super(Contiguous, self).__init__()
def forward(self, x):
return x.contiguous()
class SimpleAllCNN(SimpleCNN):
r"""A Simple 3-Conv CNN followed by a fully connected layer. Takes in
observations and produces an embedding of the rgb and/or depth components
if they are present in the provided observations.
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
"""
def __init__(self, observation_space, output_size):
nn.Module.__init__(self)
if "rgb" in observation_space.spaces:
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
else:
self._n_input_rgb = 0
if "depth" in observation_space.spaces:
self._n_input_depth = observation_space.spaces["depth"].shape[2]
else:
self._n_input_depth = 0
cnn_dims = None
if self._n_input_rgb > 0:
cnn_dims = np.array(
observation_space.spaces["rgb"].shape[:2], dtype=np.float32
)
elif self._n_input_depth > 0:
cnn_dims = np.array(
observation_space.spaces["depth"].shape[:2], dtype=np.float32
)
self._init_model(cnn_dims, output_size)
def _init_model(self, cnn_dims, output_size):
r"""cnn_dims: initial cnn dimensions.
"""
if self.is_blind:
self.cnn = nn.Sequential()
return
# kernel size for different CNN layers
self._cnn_layers_kernel_size = [(8, 8), (4, 4), (3, 3)]
# strides for different CNN layers
self._cnn_layers_stride = [(4, 4), (2, 2), (1, 1)]
for kernel_size, stride in zip(
self._cnn_layers_kernel_size, self._cnn_layers_stride
):
cnn_dims = self._conv_output_dim(
dimension=cnn_dims,
padding=np.array([0, 0], dtype=np.float32),
dilation=np.array([1, 1], dtype=np.float32),
kernel_size=np.array(kernel_size, dtype=np.float32),
stride=np.array(stride, dtype=np.float32),
)
self.cnn = nn.Sequential(
nn.Conv2d(
in_channels=self._n_input_rgb + self._n_input_depth,
out_channels=32,
kernel_size=self._cnn_layers_kernel_size[0],
stride=self._cnn_layers_stride[0],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=self._cnn_layers_kernel_size[1],
stride=self._cnn_layers_stride[1],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=64,
out_channels=32,
kernel_size=self._cnn_layers_kernel_size[2],
stride=self._cnn_layers_stride[2],
),
Contiguous(),
Flatten(),
nn.Linear(32 * cnn_dims[0] * cnn_dims[1], output_size),
nn.ReLU(True),
)
self.layer_init()
class SimpleDepthCNN(SimpleAllCNN):
r""" SimpleAllCNN where the only allowed input is a depth observation
regardless of what other observation modalities are provided.
"""
def __init__(self, observation_space, output_size):
nn.Module.__init__(self)
assert (
"depth" in observation_space.spaces
), "Depth input required to use SimpleDepthCNN"
self._n_input_depth = observation_space.spaces["depth"].shape[2]
self._n_input_rgb = 0
cnn_dims = np.array(
observation_space.spaces["depth"].shape[:2], dtype=np.float32
)
self._init_model(cnn_dims, output_size)
def forward(self, observations):
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
depth_observations = observations["depth"].permute(0, 3, 1, 2)
return self.cnn(depth_observations)
class SimpleRGBCNN(SimpleAllCNN):
r""" SimpleAllCNN where the only allowed input is an RGB observation
regardless of what other observation modalities are provided.
"""
def __init__(self, observation_space, output_size):
nn.Module.__init__(self)
assert (
"rgb" in observation_space.spaces
), "RGB input required to use SimpleRGBCNN"
self._n_input_depth = 0
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
cnn_dims = np.array(observation_space.spaces["rgb"].shape[:2], dtype=np.float32)
self._init_model(cnn_dims, output_size)
def forward(self, observations):
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
rgb_observations = observations["rgb"].permute(0, 3, 1, 2)
rgb_observations = rgb_observations / 255.0 # normalize RGB
return self.cnn(rgb_observations) | [
"habitat_baselines.common.utils.Flatten",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"numpy.array",
"torch.nn.Linear",
"torch.nn.Module.__init__"
] | [((845, 869), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (863, 869), True, 'import torch.nn as nn\n'), ((3679, 3703), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (3697, 3703), True, 'import torch.nn as nn\n'), ((3948, 4019), 'numpy.array', 'np.array', (["observation_space.spaces['depth'].shape[:2]"], {'dtype': 'np.float32'}), "(observation_space.spaces['depth'].shape[:2], dtype=np.float32)\n", (3956, 4019), True, 'import numpy as np\n'), ((4564, 4588), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (4582, 4588), True, 'import torch.nn as nn\n'), ((4825, 4894), 'numpy.array', 'np.array', (["observation_space.spaces['rgb'].shape[:2]"], {'dtype': 'np.float32'}), "(observation_space.spaces['rgb'].shape[:2], dtype=np.float32)\n", (4833, 4894), True, 'import numpy as np\n'), ((1295, 1364), 'numpy.array', 'np.array', (["observation_space.spaces['rgb'].shape[:2]"], {'dtype': 'np.float32'}), "(observation_space.spaces['rgb'].shape[:2], dtype=np.float32)\n", (1303, 1364), True, 'import numpy as np\n'), ((1764, 1779), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1777, 1779), True, 'import torch.nn as nn\n'), ((2524, 2693), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(self._n_input_rgb + self._n_input_depth)', 'out_channels': '(32)', 'kernel_size': 'self._cnn_layers_kernel_size[0]', 'stride': 'self._cnn_layers_stride[0]'}), '(in_channels=self._n_input_rgb + self._n_input_depth, out_channels\n =32, kernel_size=self._cnn_layers_kernel_size[0], stride=self.\n _cnn_layers_stride[0])\n', (2533, 2693), True, 'import torch.nn as nn\n'), ((2776, 2789), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2783, 2789), True, 'import torch.nn as nn\n'), ((2803, 2930), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': 'self._cnn_layers_kernel_size[1]', 'stride': 'self._cnn_layers_stride[1]'}), '(in_channels=32, out_channels=64, kernel_size=self.\n _cnn_layers_kernel_size[1], stride=self._cnn_layers_stride[1])\n', (2812, 2930), True, 'import torch.nn as nn\n'), ((3018, 3031), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3025, 3031), True, 'import torch.nn as nn\n'), ((3045, 3172), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(32)', 'kernel_size': 'self._cnn_layers_kernel_size[2]', 'stride': 'self._cnn_layers_stride[2]'}), '(in_channels=64, out_channels=32, kernel_size=self.\n _cnn_layers_kernel_size[2], stride=self._cnn_layers_stride[2])\n', (3054, 3172), True, 'import torch.nn as nn\n'), ((3286, 3295), 'habitat_baselines.common.utils.Flatten', 'Flatten', ([], {}), '()\n', (3293, 3295), False, 'from habitat_baselines.common.utils import Flatten\n'), ((3309, 3363), 'torch.nn.Linear', 'nn.Linear', (['(32 * cnn_dims[0] * cnn_dims[1])', 'output_size'], {}), '(32 * cnn_dims[0] * cnn_dims[1], output_size)\n', (3318, 3363), True, 'import torch.nn as nn\n'), ((3377, 3390), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3384, 3390), True, 'import torch.nn as nn\n'), ((1456, 1527), 'numpy.array', 'np.array', (["observation_space.spaces['depth'].shape[:2]"], {'dtype': 'np.float32'}), "(observation_space.spaces['depth'].shape[:2], dtype=np.float32)\n", (1464, 1527), True, 'import numpy as np\n'), ((2238, 2272), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float32'}), '([0, 0], dtype=np.float32)\n', (2246, 2272), True, 'import numpy as np\n'), ((2299, 2333), 'numpy.array', 'np.array', (['[1, 1]'], {'dtype': 'np.float32'}), '([1, 1], dtype=np.float32)\n', (2307, 2333), True, 'import numpy as np\n'), ((2363, 2402), 'numpy.array', 'np.array', (['kernel_size'], {'dtype': 'np.float32'}), '(kernel_size, dtype=np.float32)\n', (2371, 2402), True, 'import numpy as np\n'), ((2427, 2461), 'numpy.array', 'np.array', (['stride'], {'dtype': 'np.float32'}), '(stride, dtype=np.float32)\n', (2435, 2461), True, 'import numpy as np\n')] |
import unittest
import os, csv, json
import matplotlib.image as mpimg
import numpy as np
from numpy.testing import assert_array_equal
from skimage.measure import compare_ssim as ssim
from src.ea import evolutionary_algorithm
from src.ea.chromosome import Chromosome
class TestEA(unittest.TestCase):
def setUp(self):
rel_path = os.path.dirname(__file__)
image_path = os.path.join(rel_path, "test_images/00000.png")
self.img_class_16 = mpimg.imread(image_path)
self.img_class_16 = self.img_class_16[:, :, :3]
image_path = os.path.join(rel_path, "test_images/00000_gray.png")
self.img_class_16_gray = mpimg.imread(image_path)
self.img_class_16_gray = self.img_class_16_gray[:, :, :1]
image_path = os.path.join(rel_path, "test_images/00001.png")
self.img_class_01 = mpimg.imread(image_path)
self.img_class_01 = self.img_class_01[:, :, :3]
model = [os.path.join(rel_path, "../res/models/alex_e5_rgb/model.h5")]
with open(os.path.join(rel_path, "../res/index_label_dict.csv"), "r", encoding="utf8") as f:
reader = csv.reader(f)
index_to_label = {rows[0]: rows[1] for rows in reader}
with open(os.path.join(rel_path, "../config.json"), "r", encoding="utf8") as f:
data = json.load(f)
ea_params = data["ea_params_other"]
self.ea = evolutionary_algorithm.EvolutionaryAlgorithm(0, model, index_to_label,
ea_params=ea_params,
color_range=3)
self.ea._max_gen = 5
self.ea_gray = evolutionary_algorithm.EvolutionaryAlgorithm(0, model, index_to_label,
ea_params=ea_params,
color_range=1)
self.ea_gray._max_gen = 5
def test_fitness(self):
self.ea.class_index = 16
fitness = self.ea.fitness(self.img_class_16)
self.assertGreater(fitness, 0.9)
self.ea.class_index = 17
fitness = self.ea.fitness(self.img_class_16)
self.assertLess(fitness, 0.001)
self.ea.class_index = 16
self.ea._original = self.img_class_16
fitness = self.ea.fitness(self.img_class_16)
self.assertLess(fitness, 1.0 / 1000)
def test_mutate_rgb(self):
self.ea.class_index = 16
chrom = Chromosome(self.img_class_16)
mutated_chrom = self.ea._mutate(0, chrom)
img_mutated_16 = mutated_chrom.data
image_diff = ssim(img_mutated_16, self.img_class_16,
multichannel=True, data_range=1.0)
self.assertGreater(image_diff, 0.5)
self.assertLess(image_diff, 1.0)
def test_mutate_gray(self):
self.ea_gray.class_index = 16
chrom = Chromosome(self.img_class_16_gray)
mutated_chrom = self.ea_gray._mutate(0, chrom)
img_mutated_16 = mutated_chrom.data
image_diff = ssim(np.squeeze(img_mutated_16), np.squeeze(self.img_class_16_gray),
multichannel=False, data_range=1.0)
self.assertGreater(image_diff, 0.5)
self.assertLess(image_diff, 1.0)
def test_evaluate_api(self):
self.ea.class_index = 16
champion = Chromosome(self.img_class_16)
api_champ = Chromosome(self.img_class_01)
# case 1:
# new champion is better than old champion
api_champ.api_fitness = -1
champion.fitness = 0.9
new_champ, reset = self.ea._evaluate_api(champion, api_champ)
self.assertFalse(reset)
assert_array_equal(new_champ.data, self.img_class_16)
# case 2:
# new champion is only slightly worse than old champion
api_champ.api_fitness = 0.999999999
new_champ, reset = self.ea._evaluate_api(champion, api_champ)
self.assertIsNone(new_champ)
self.assertFalse(reset)
# case 3:
# new champion is way worse than api champion
api_champ.api_fitness = 0.999999999
champion.data = self.img_class_01
api_champ.data = self.img_class_16
new_champ, reset = self.ea._evaluate_api(champion, api_champ)
self.assertIsNone(new_champ)
self.assertTrue(reset)
| [
"src.ea.evolutionary_algorithm.EvolutionaryAlgorithm",
"skimage.measure.compare_ssim",
"matplotlib.image.imread",
"os.path.join",
"numpy.squeeze",
"os.path.dirname",
"src.ea.chromosome.Chromosome",
"json.load",
"csv.reader",
"numpy.testing.assert_array_equal"
] | [((343, 368), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (358, 368), False, 'import os, csv, json\n'), ((390, 437), 'os.path.join', 'os.path.join', (['rel_path', '"""test_images/00000.png"""'], {}), "(rel_path, 'test_images/00000.png')\n", (402, 437), False, 'import os, csv, json\n'), ((466, 490), 'matplotlib.image.imread', 'mpimg.imread', (['image_path'], {}), '(image_path)\n', (478, 490), True, 'import matplotlib.image as mpimg\n'), ((569, 621), 'os.path.join', 'os.path.join', (['rel_path', '"""test_images/00000_gray.png"""'], {}), "(rel_path, 'test_images/00000_gray.png')\n", (581, 621), False, 'import os, csv, json\n'), ((655, 679), 'matplotlib.image.imread', 'mpimg.imread', (['image_path'], {}), '(image_path)\n', (667, 679), True, 'import matplotlib.image as mpimg\n'), ((769, 816), 'os.path.join', 'os.path.join', (['rel_path', '"""test_images/00001.png"""'], {}), "(rel_path, 'test_images/00001.png')\n", (781, 816), False, 'import os, csv, json\n'), ((845, 869), 'matplotlib.image.imread', 'mpimg.imread', (['image_path'], {}), '(image_path)\n', (857, 869), True, 'import matplotlib.image as mpimg\n'), ((1399, 1509), 'src.ea.evolutionary_algorithm.EvolutionaryAlgorithm', 'evolutionary_algorithm.EvolutionaryAlgorithm', (['(0)', 'model', 'index_to_label'], {'ea_params': 'ea_params', 'color_range': '(3)'}), '(0, model, index_to_label,\n ea_params=ea_params, color_range=3)\n', (1443, 1509), False, 'from src.ea import evolutionary_algorithm\n'), ((1684, 1794), 'src.ea.evolutionary_algorithm.EvolutionaryAlgorithm', 'evolutionary_algorithm.EvolutionaryAlgorithm', (['(0)', 'model', 'index_to_label'], {'ea_params': 'ea_params', 'color_range': '(1)'}), '(0, model, index_to_label,\n ea_params=ea_params, color_range=1)\n', (1728, 1794), False, 'from src.ea import evolutionary_algorithm\n'), ((2503, 2532), 'src.ea.chromosome.Chromosome', 'Chromosome', (['self.img_class_16'], {}), '(self.img_class_16)\n', (2513, 2532), False, 'from src.ea.chromosome import Chromosome\n'), ((2648, 2722), 'skimage.measure.compare_ssim', 'ssim', (['img_mutated_16', 'self.img_class_16'], {'multichannel': '(True)', 'data_range': '(1.0)'}), '(img_mutated_16, self.img_class_16, multichannel=True, data_range=1.0)\n', (2652, 2722), True, 'from skimage.measure import compare_ssim as ssim\n'), ((2921, 2955), 'src.ea.chromosome.Chromosome', 'Chromosome', (['self.img_class_16_gray'], {}), '(self.img_class_16_gray)\n', (2931, 2955), False, 'from src.ea.chromosome import Chromosome\n'), ((3379, 3408), 'src.ea.chromosome.Chromosome', 'Chromosome', (['self.img_class_16'], {}), '(self.img_class_16)\n', (3389, 3408), False, 'from src.ea.chromosome import Chromosome\n'), ((3429, 3458), 'src.ea.chromosome.Chromosome', 'Chromosome', (['self.img_class_01'], {}), '(self.img_class_01)\n', (3439, 3458), False, 'from src.ea.chromosome import Chromosome\n'), ((3705, 3758), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['new_champ.data', 'self.img_class_16'], {}), '(new_champ.data, self.img_class_16)\n', (3723, 3758), False, 'from numpy.testing import assert_array_equal\n'), ((944, 1004), 'os.path.join', 'os.path.join', (['rel_path', '"""../res/models/alex_e5_rgb/model.h5"""'], {}), "(rel_path, '../res/models/alex_e5_rgb/model.h5')\n", (956, 1004), False, 'import os, csv, json\n'), ((1129, 1142), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1139, 1142), False, 'import os, csv, json\n'), ((1318, 1330), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1327, 1330), False, 'import os, csv, json\n'), ((3081, 3107), 'numpy.squeeze', 'np.squeeze', (['img_mutated_16'], {}), '(img_mutated_16)\n', (3091, 3107), True, 'import numpy as np\n'), ((3109, 3143), 'numpy.squeeze', 'np.squeeze', (['self.img_class_16_gray'], {}), '(self.img_class_16_gray)\n', (3119, 3143), True, 'import numpy as np\n'), ((1025, 1078), 'os.path.join', 'os.path.join', (['rel_path', '"""../res/index_label_dict.csv"""'], {}), "(rel_path, '../res/index_label_dict.csv')\n", (1037, 1078), False, 'import os, csv, json\n'), ((1229, 1269), 'os.path.join', 'os.path.join', (['rel_path', '"""../config.json"""'], {}), "(rel_path, '../config.json')\n", (1241, 1269), False, 'import os, csv, json\n')] |
#!/usr/bin/python
"""
Utility script with functions used in lr classifier and cnn classifier.
For data preparation:
- get_train_test(): from dataframe, and specified columns, get train and test data and labels
- tokenize_text(): tokenize a list of texts, and return tokenized texts
- pad_texts(): add padding to texts, for them to have equal lengths
For CNN model output:
- int_to_labels(): turn enumerated labels into corresponding text labels, e.g. 0 = season1,
- classification_matrix(): plot classification matrix, i.e. confusion matrix
- unique_path(): enumerates filename, if file exists already
- save_model_info(): save information of model layers and visualisation of model
- save_model_history(): save plot of model training history
- save_model_report(): save txt file of classification report
- save_model_matrix(): save classification matrix
"""
# LIBRARIES ------------
# Basics
import os
import numpy as np
import pandas as pd
from contextlib import redirect_stdout
# Sklearn ML
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
# Tensorflow CNN
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import plot_model
# Visualisations
import matplotlib.pyplot as plt
import seaborn as sns
# DATA PREPARATION --------------------------------------
def get_train_test(df, text_column, label_column, test_size):
"""
From a dataframe, extract texts and labels, and split into test and train
Input:
- df: dataframe with texts and labels
- text_column: name of column in df storing text documents (X)
- label_column: name of column in df storing labels (y)
- test_size: size of test split
Returns:
- train and text X (texts) and y (labels)
"""
# Extract texts (X) and labels (y) from columns of df
X = df[text_column].values
y = df[label_column].values
# Split test and train
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
return X_train, X_test, y_train, y_test
def binarize_labels(y_train, y_test):
"""
Binarize test and train labels
Input:
- y_train, y_test: training and test labels as names
Returns:
- y_train_binary, y_test_binary: binarized labels
- label_names: list of unique, sorted label names
"""
# Initialise binariser
lb = LabelBinarizer()
# Apply binarizer to train and test data
y_train_binary = lb.fit_transform(y_train)
y_test_binary = lb.fit_transform(y_test)
# Get the sorted, unique label names
label_names = sorted(set(y_train))
return y_train_binary, y_test_binary, label_names
def tokenize_texts(X_train, X_test, num_words):
"""
Tokenizing the documents/texts, and getting the vocabulary size
Input:
- X_train: array of texts used for training
- X_test: array of texts used for testing
- num_words, refers to the maximum number of most common words to keep
Returns:
- X_train_toks: array of tokenized training texts
- X_test_toks: array of tokenized test texts
- vocab_size: size of vocabulary
"""
# Intialise tokenizer
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(X_train)
# Tokenized training and test data
X_train_toks = tokenizer.texts_to_sequences(X_train)
X_test_toks = tokenizer.texts_to_sequences(X_test)
# Overall vocabulary size
vocab_size = len(tokenizer.word_index) + 1
# Get word indicise
word_index = tokenizer.word_index
return X_train_toks, X_test_toks, vocab_size, word_index
def pad_texts(X_train_toks, X_test_toks):
"""
Add padding to tokenized texts, to ensure that they have the same length
Get maximum length of all documents, append 0s to documents for max length
Input:
- X_train_toks: array of tokenized training texts
- X_test_toks: array of tokenized test texts
Returns:
- X_train_pd: array of padded, tokenized training texts
- X_test_pd: array of padded, tokenized test texts
- max_len: length of longest text, now length of all texts
"""
# Get the maximum length of the test and train tokens separately
max_train = len(max(X_train_toks, key=len))
max_test = len(max(X_test_toks, key=len))
max_len = max(max_train, max_test)
# Apply padding to training and test tokens
X_train_pad = pad_sequences(X_train_toks, padding='post', maxlen=max_len)
X_test_pad = pad_sequences(X_test_toks, padding='post', maxlen=max_len)
return X_train_pad, X_test_pad, max_len
# CNN EMBEDDINGS --------------------------------------
def create_embedding_matrix(filepath, word_index, embedding_dim):
"""
Read GloVe embeddings and generate an embedding matrix
Input:
- filepath: path to glove embeddings file
- word_index: indicies, extracted from tokenizer
- embedding_dim: dimension of keras impedding, and glove embedding
Output:
- matrix of embeddings for each word
"""
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath) as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
return embedding_matrix
# CNN MODEL OUTPUTS --------------------------------------
def int_to_labels(actual_binary, predictions_binary, label_names):
"""
Transforms binarised/enumerated lables back into their original names
Input:
- actual_binary: list of true, binarised labels
- predictions_binary: list of predictions for labels
- label_names: list of sorted, unqiue names
Returns:
- labels_ture, labels_pred: corresponding original label names
"""
# Get the int (number) of the label
int_actual = (actual_binary.argmax(axis=1)).tolist()
int_pred = (predictions_binary.argmax(axis=1)).tolist()
# For each of the int of labels, get the corresponding name
# For true labels
labels_actual = []
for i in range(0, len(int_actual)):
labels_actual.append(label_names[int_actual[i]])
# For predicted labels
labels_pred = []
for i in range(0, len(int_pred)):
labels_pred.append(label_names[int_pred[i]])
# Turn lists into arrays
labels_actual = np.array(labels_actual)
labels_pred = np.array(labels_pred)
return labels_actual, labels_pred
def classification_matrix(actual, predictions):
"""
Function to plot classification matrix
Input:
- actual: array of actual label names
- predictions: array of predicted label names
Returns:
- classification_matrix
"""
# Create confusion matrix
cm = pd.crosstab(actual, predictions, rownames=['Actual'],
colnames=['Predicted'], normalize='index')
# Initialise figure
p = plt.figure(figsize=(10,10));
# Plot confusion matrix on figure as heatmap
p = sns.heatmap(cm, annot=True, fmt=".2f", cbar=False)
# Save the figure in variable
classification_matrix = p.get_figure()
return classification_matrix
def unique_path(filepath):
"""
Create unique filename by enumerating if path exists already
Input:
- filepath: desired fielpath
Returns:
- new_path: enumerated if it exists already
"""
# If the path does not exist
if not os.path.exists(filepath):
# Keep the original filepath
return filepath
# If path exists:
else:
i = 1
# Split the path and append a number
path, ext = os.path.splitext(filepath)
# Add extension
new_path = "{}_{}{}".format(path, i, ext)
# If the extension exists, enumerate one more
while os.path.exists(new_path):
i += 1
new_path = "{}_{}{}".format(path, i, ext)
return new_path
def save_model_info(model, output_directory, filename_summary, filename_plot):
"""
Save model summary in .txt file and plot of model in .png
Input:
- model: compiled model
- output_directory: path to output directory
- filename_summary: name of file to save summary in
- filename_plot: name of file to save visualisation of model
"""
# Define path fand filename for model summary
out_summary = unique_path(os.path.join(output_directory, filename_summary))
# Save model summary in defined file
with open(out_summary, "w") as file:
with redirect_stdout(file):
model.summary()
# Define path and filename for model plot
out_plot = unique_path(os.path.join(output_directory, filename_plot))
# Save model plot in defined file
plot_model(model, to_file = out_plot, show_shapes = True, show_layer_names = True)
def save_model_history(history, epochs, output_directory, filename):
"""
Plotting the model history, i.e. loss/accuracy of the model during training
Input:
- history: model history
- epochs: number of epochs the model was trained on
- output_directory: desired output directory
- filename: name of file to save history in
"""
# Define output path
out_history = unique_path(os.path.join(output_directory, filename))
# Visualize history
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, epochs), history.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), history.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), history.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
plt.savefig(out_history)
def save_model_report(report, input_file, output_directory, filename):
"""
Save report to output directory
Input:
- report: model classifcation report
- output_directory: final output_directory
- filename: name of file to save report in
"""
# Define output path and file for report
report_out = unique_path(os.path.join(output_directory, filename))
# Save report in defined path
with open(report_out, 'w', encoding='utf-8') as file:
file.writelines(f"Classification report for model trained on {input_file}:\n")
file.writelines(report)
def save_model_matrix(matrix, output_directory, filename):
"""
Save model matrix in outptut directory
Input:
- matrix: plot of classification matrix
- output_directory: path to output directory
- filename: desired filename
"""
out_matrix = unique_path(os.path.join(output_directory, filename))
matrix.savefig(out_matrix)
if __name__=="__main__":
pass | [
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.utils.plot_model",
"numpy.array",
"numpy.arange",
"sklearn.preprocessing.LabelBinarizer",
"os.path.exists",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"contextlib.redirect_stdout"... | [((2120, 2180), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': '(42)'}), '(X, y, test_size=test_size, random_state=42)\n', (2136, 2180), False, 'from sklearn.model_selection import train_test_split\n'), ((2552, 2568), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2566, 2568), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((3382, 3412), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'num_words'}), '(num_words=num_words)\n', (3391, 3412), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((4614, 4673), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X_train_toks'], {'padding': '"""post"""', 'maxlen': 'max_len'}), "(X_train_toks, padding='post', maxlen=max_len)\n", (4627, 4673), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((4691, 4749), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X_test_toks'], {'padding': '"""post"""', 'maxlen': 'max_len'}), "(X_test_toks, padding='post', maxlen=max_len)\n", (4704, 4749), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5347, 5384), 'numpy.zeros', 'np.zeros', (['(vocab_size, embedding_dim)'], {}), '((vocab_size, embedding_dim))\n', (5355, 5384), True, 'import numpy as np\n'), ((6732, 6755), 'numpy.array', 'np.array', (['labels_actual'], {}), '(labels_actual)\n', (6740, 6755), True, 'import numpy as np\n'), ((6774, 6795), 'numpy.array', 'np.array', (['labels_pred'], {}), '(labels_pred)\n', (6782, 6795), True, 'import numpy as np\n'), ((7136, 7237), 'pandas.crosstab', 'pd.crosstab', (['actual', 'predictions'], {'rownames': "['Actual']", 'colnames': "['Predicted']", 'normalize': '"""index"""'}), "(actual, predictions, rownames=['Actual'], colnames=['Predicted'\n ], normalize='index')\n", (7147, 7237), True, 'import pandas as pd\n'), ((7292, 7320), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7302, 7320), True, 'import matplotlib.pyplot as plt\n'), ((7378, 7428), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '""".2f"""', 'cbar': '(False)'}), "(cm, annot=True, fmt='.2f', cbar=False)\n", (7389, 7428), True, 'import seaborn as sns\n'), ((9152, 9228), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'to_file': 'out_plot', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), '(model, to_file=out_plot, show_shapes=True, show_layer_names=True)\n', (9162, 9228), False, 'from tensorflow.keras.utils import plot_model\n'), ((9730, 9762), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (9743, 9762), True, 'import matplotlib.pyplot as plt\n'), ((9767, 9779), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9777, 9779), True, 'import matplotlib.pyplot as plt\n'), ((10114, 10153), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy"""'], {}), "('Training Loss and Accuracy')\n", (10123, 10153), True, 'import matplotlib.pyplot as plt\n'), ((10158, 10179), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (10168, 10179), True, 'import matplotlib.pyplot as plt\n'), ((10184, 10211), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (10194, 10211), True, 'import matplotlib.pyplot as plt\n'), ((10216, 10228), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10226, 10228), True, 'import matplotlib.pyplot as plt\n'), ((10233, 10251), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10249, 10251), True, 'import matplotlib.pyplot as plt\n'), ((10256, 10280), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_history'], {}), '(out_history)\n', (10267, 10280), True, 'import matplotlib.pyplot as plt\n'), ((7817, 7841), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (7831, 7841), False, 'import os\n'), ((8020, 8046), 'os.path.splitext', 'os.path.splitext', (['filepath'], {}), '(filepath)\n', (8036, 8046), False, 'import os\n'), ((8198, 8222), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (8212, 8222), False, 'import os\n'), ((8793, 8841), 'os.path.join', 'os.path.join', (['output_directory', 'filename_summary'], {}), '(output_directory, filename_summary)\n', (8805, 8841), False, 'import os\n'), ((9063, 9108), 'os.path.join', 'os.path.join', (['output_directory', 'filename_plot'], {}), '(output_directory, filename_plot)\n', (9075, 9108), False, 'import os\n'), ((9659, 9699), 'os.path.join', 'os.path.join', (['output_directory', 'filename'], {}), '(output_directory, filename)\n', (9671, 9699), False, 'import os\n'), ((9793, 9813), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (9802, 9813), True, 'import numpy as np\n'), ((9873, 9893), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (9882, 9893), True, 'import numpy as np\n'), ((9955, 9975), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (9964, 9975), True, 'import numpy as np\n'), ((10038, 10058), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (10047, 10058), True, 'import numpy as np\n'), ((10636, 10676), 'os.path.join', 'os.path.join', (['output_directory', 'filename'], {}), '(output_directory, filename)\n', (10648, 10676), False, 'import os\n'), ((11181, 11221), 'os.path.join', 'os.path.join', (['output_directory', 'filename'], {}), '(output_directory, filename)\n', (11193, 11221), False, 'import os\n'), ((8938, 8959), 'contextlib.redirect_stdout', 'redirect_stdout', (['file'], {}), '(file)\n', (8953, 8959), False, 'from contextlib import redirect_stdout\n'), ((5595, 5629), 'numpy.array', 'np.array', (['vector'], {'dtype': 'np.float32'}), '(vector, dtype=np.float32)\n', (5603, 5629), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 11 15:52:17 2022
@author: sylvain
"""
import numpy as np
from calendar import monthrange
import pandas as pd
# Hypotheses
eta_pp = 0.7 # Average efficiency of the plunger pumps
eta_surpr = 0.4 # average efficiency of the "surpresseur" pumps
C_day = 250 # average daily water consumption in m³
depth = 70 # Depth of the wells
p_nom = 5e5 # nominal absolute pressure of the water distribution
V_dot_nom = C_day/10 # nominal flow rate of the pumps in m³/h
n_days=150 # number of days in the year where the 250m³ are needed
# Average daily consumption:
C_joules = 1000 * 9.81 * depth * C_day / eta_pp + C_day * (p_nom - 1E5)/eta_surpr
# in kWh:
C_kwh = C_joules/3.6e6
print('daily electricity consumption for water pumping:' + str(C_kwh) + ' kWh')
print('monthly electricity consumption for water pumping:' + str(30*C_kwh) + ' kWh')
print('yearly electricity consumption for water pumping:' + str(150*C_kwh) + ' kWh')
# Nominal electricity consumption of the pumps:
V_dot_nom_m3s = V_dot_nom/3600
W_dot_nom = (1000 * 9.81 * depth * V_dot_nom_m3s / eta_pp + V_dot_nom_m3s * (p_nom - 1E5)/eta_pp )/1000
print('Nominal power of the pumps: ' + str(W_dot_nom) + ' kW')
# Ratio between water consumption and electricity consumption:
water_elec_cost = C_kwh/C_day
print('Specific electricity of water: ' + str(water_elec_cost) + ' kWh/m³')
daily_profiles = pd.read_csv('PV_casamance - daily profiles.csv',index_col=0)
daily_profiles.plot()
profile = pd.DataFrame(index=pd.date_range(start='2015-01-01 00:00',end='2015-12-31 23:59',freq='1h'))
water = pd.Series(index=pd.date_range(start='2015-01-01 00:00',end='2015-12-31 23:59',freq='1h'))
conso = pd.Series(index=range(1,13),data=[8580, 10316, 7429, 16165, 10114, 5089, 2851, 2108, 2138, 3463, 6492, 8500])
profile['Lighting and others'] = 0
profile['Plunger Pump'] = 0
profile['Plunger Pump day'] = 0
profile['Surpresseurs'] = 0
profile_flat = profile
conso_base = 2100
for m in range(1,13):
n_days = monthrange(2015, m)[1]
conso_pp = conso[m] - conso_base
idx_month = (profile.index.month == m)
conso_w_month = conso_pp/water_elec_cost
print('Water consumption for month ' + str(m) + ': ' + str(conso_w_month) + ' m³')
conso_w_day = conso_w_month/n_days
n_hours_pumping = conso_w_day/V_dot_nom
conso_left = conso_w_day
daily_cons = np.zeros(24)
hh = 0
while conso_left>0:
if conso_left > 2 * V_dot_nom:
daily_cons[-hh] = V_dot_nom
daily_cons[1+hh] = V_dot_nom
conso_left -= 2 * V_dot_nom
else:
daily_cons[-hh] = conso_left/2
daily_cons[1+hh] = conso_left/2
conso_left = 0
hh += 1
# loop through each day
for n in range(1,n_days+1):
idx = (profile.index.month == m) & (profile.index.day==n)
profile.loc[idx,'Lighting and others'] = daily_profiles['Lighting & other'].values
if m<6 or m >10:
water[idx] = daily_profiles['Water cons wet season'].values
else:
water[idx] = daily_profiles['Water cons dry season'].values
profile.loc[idx,'Plunger Pump'] = daily_cons * 1000 * 9.81 * depth / eta_pp / 3.6E6
profile.loc[idx,'Plunger Pump day'] = daily_cons[range(-12,12)] * 1000 * 9.81 * depth / eta_pp / 3.6E6
water[idx_month] = water[idx_month] * conso_w_month/water[idx_month].sum()
profile.loc[idx_month,'Surpresseurs'] = water[idx_month].values * (p_nom - 1E5)/eta_surpr/3.6E6
profile.loc[idx_month,'Lighting and others'] = profile.loc[idx_month,'Lighting and others'] * conso_base/profile.loc[idx_month,'Lighting and others'].sum()
demand_night = profile['Lighting and others'] + profile['Plunger Pump'] + profile['Surpresseurs']
demand_day = profile['Lighting and others'] + profile['Plunger Pump day'] + profile['Surpresseurs']
profile.to_csv('PV_casamance - yearly profiles.csv')
| [
"numpy.zeros",
"calendar.monthrange",
"pandas.read_csv",
"pandas.date_range"
] | [((1467, 1528), 'pandas.read_csv', 'pd.read_csv', (['"""PV_casamance - daily profiles.csv"""'], {'index_col': '(0)'}), "('PV_casamance - daily profiles.csv', index_col=0)\n", (1478, 1528), True, 'import pandas as pd\n'), ((2437, 2449), 'numpy.zeros', 'np.zeros', (['(24)'], {}), '(24)\n', (2445, 2449), True, 'import numpy as np\n'), ((1580, 1654), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2015-01-01 00:00"""', 'end': '"""2015-12-31 23:59"""', 'freq': '"""1h"""'}), "(start='2015-01-01 00:00', end='2015-12-31 23:59', freq='1h')\n", (1593, 1654), True, 'import pandas as pd\n'), ((1678, 1752), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2015-01-01 00:00"""', 'end': '"""2015-12-31 23:59"""', 'freq': '"""1h"""'}), "(start='2015-01-01 00:00', end='2015-12-31 23:59', freq='1h')\n", (1691, 1752), True, 'import pandas as pd\n'), ((2073, 2092), 'calendar.monthrange', 'monthrange', (['(2015)', 'm'], {}), '(2015, m)\n', (2083, 2092), False, 'from calendar import monthrange\n')] |
from qgis.core import *
from osgeo import gdal
import math
import numpy as np
import os
MARGIN = 0.01
def weightedFunction(x, y, x0, y0, weight):
# the current weighted Function is a simple sqrt((x-x0)^1 + (y-y0)^2)/w
return math.sqrt((x - x0) ** 2 + (y - y0) ** 2) / weight
#Get the points vector layer
pointsVector = QgsVectorLayer(sys.argv, 'points', 'ogr')
#Add the vector layer to the map layer registry
QgsProject.instance().addMapLayer(pointsVector)
#get layer extents with a small margin to avoid ignoring points on bounding box's limit
bounding_box = pointsVector.extent()
extent_args = "-te " + str(bounding_box.xMinimum() - MARGIN) \
+ " " + str(bounding_box.yMinimum() - MARGIN) \
+ " " + str(bounding_box.xMaximum() + MARGIN) \
+ " " + str(bounding_box.yMaximum() + MARGIN)
os.system('gdal_rasterize -a z -ts 1000 1000 ' + extent_args + ' -l points "' + sys.argv + '" "./rasterPoints"')
rasterPoints=QgsRasterLayer('./rasterPoints', 'rasterPoints')
QgsProject.instance().addMapLayer(rasterPoints)
dataset = gdal.Open('./rasterPoints')
numpy_array = dataset.ReadAsArray()
width, height = numpy_array.shape
points = []
#get all the weighted points from the raster
print("get the points with their weights from raster")
for row in range(height):
for col in range(width):
if(numpy_array[row, col] != 0):
print(str(numpy_array[row, col]) + " at point : " + str(row) + " , " + str(col))
points.append([row, col, numpy_array[row,col]])
print("compute the weighted distance grid for each point")
distanceGrid = np.zeros(shape = (height, width))
for row in range(height):
for col in range(width):
index = 0
min_distance = weightedFunction(row, col, points[0][0], points[0][1], points[0][2])
for i in range(1, (len(points))):
weightedDistance = weightedFunction(row, col, points[i][0], points[i][1], points[i][2])
if(weightedDistance < min_distance):
min_distance = weightedDistance
index = i
distanceGrid[row, col] = index
#save the distance grid as an output raster
#output file name ( path to where to save the raster file )
print("save distance grid as raster GTiff")
outFileName = './rasterVoronoi.tiff'
#call the driver for the chosen format from GDAL
driver = gdal.GetDriverByName('GTiff')
#Create the file with dimensions of the input raster ( rasterized points )
output = driver.Create(outFileName, height, width, 1, gdal.GDT_Byte)
#set the Raster transformation of the resulting raster
output.SetGeoTransform(dataset.GetGeoTransform())
#set the projection of the resulting raster
output.SetProjection(dataset.GetProjection())
#insert data to the resulting raster in band 1 from the weighted distance grid
output.GetRasterBand(1).WriteArray(distanceGrid)
#Call the raster output file
rasterVoronoi = QgsRasterLayer('./rasterVoronoi.tiff', 'weighted Raster')
#Add it to the map layer registry ( display it on the map)
QgsProject.instance().addMapLayer(rasterVoronoi)
#polygonize the result raster
print("convert raster to shapefile")
os.system('gdal_polygonize.bat ./rasterVoronoi.tiff ./WeightedVoronoi.shp -b 1 -f "ESRI Shapefile" weighted')
weightedVoronoiVector = QgsVectorLayer('./WeightedVoronoi.shp', 'weighted voronoi', 'ogr')
#load the vector weighted voronoi diagram
QgsProject.instance().addMapLayer(weightedVoronoiVector)
print("End of script") | [
"osgeo.gdal.Open",
"math.sqrt",
"numpy.zeros",
"os.system",
"osgeo.gdal.GetDriverByName"
] | [((806, 922), 'os.system', 'os.system', (['(\'gdal_rasterize -a z -ts 1000 1000 \' + extent_args + \' -l points "\' + sys.\n argv + \'" "./rasterPoints"\')'], {}), '(\'gdal_rasterize -a z -ts 1000 1000 \' + extent_args +\n \' -l points "\' + sys.argv + \'" "./rasterPoints"\')\n', (815, 922), False, 'import os\n'), ((1042, 1069), 'osgeo.gdal.Open', 'gdal.Open', (['"""./rasterPoints"""'], {}), "('./rasterPoints')\n", (1051, 1069), False, 'from osgeo import gdal\n'), ((1551, 1582), 'numpy.zeros', 'np.zeros', ([], {'shape': '(height, width)'}), '(shape=(height, width))\n', (1559, 1582), True, 'import numpy as np\n'), ((2229, 2258), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (2249, 2258), False, 'from osgeo import gdal\n'), ((3005, 3124), 'os.system', 'os.system', (['"""gdal_polygonize.bat ./rasterVoronoi.tiff ./WeightedVoronoi.shp -b 1 -f "ESRI Shapefile" weighted"""'], {}), '(\n \'gdal_polygonize.bat ./rasterVoronoi.tiff ./WeightedVoronoi.shp -b 1 -f "ESRI Shapefile" weighted\'\n )\n', (3014, 3124), False, 'import os\n'), ((229, 269), 'math.sqrt', 'math.sqrt', (['((x - x0) ** 2 + (y - y0) ** 2)'], {}), '((x - x0) ** 2 + (y - y0) ** 2)\n', (238, 269), False, 'import math\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.stats import probplot, pearsonr
class PreparedData:
def __init__(self, inn):
self.original_data = inn
self.prepared_data = None
self.feature_labels = None
self.target_labels = None
# Assign index and convert to date.
out = inn.copy(deep=True)
out.index = pd.to_datetime(inn['date'])
# Drop unnecessary columns.
out.drop(columns=['date', 'rv1', 'rv2'], inplace=True)
# Get labels.
target_labels = [
label for label in inn.columns if
('T' in label)
& ('_out' not in label)
& ('Tdewpoint' not in label)
]
feature_labels = set(out.columns) - set(target_labels)
self.prepared_data = out
self.feature_labels = feature_labels
self.target_labels = target_labels
def compute_univariate_statistics(inn):
"""
Return a data frame wh. univariate statistics.
"""
# Compute univariate statistics.
statistics = inn.describe().T
# Compute skewness.
statistics['skewness'] = inn.skew().values
# Compute kurtosis.
statistics['kurtosis'] = inn.kurtosis().values
# Compute inter-quartile range.
statistics['IQR'] = statistics['75%'] - statistics['25%']
# Declare a list to receive a
proportion_of_outliers = []
# Compute proportion of outilers within each variable according to the
# Tukey's fences.
for variable in enumerate(inn.columns):
proportion_of_outliers.append(inn[variable[1]][(
inn[variable[1]] <
statistics['25%'][variable[0]]
- (1.5*statistics['25%'][variable[0]])
) | (
inn[variable[1]] >
statistics['75%'][variable[0]]
+ (1.5*statistics['75%'][variable[0]])
)].count()/len(inn))
statistics['proportion_of_outliers'] = np.around(
np.array(proportion_of_outliers), decimals=2
)
# Drop IQR.
statistics.drop('IQR', axis=1, inplace=True)
return statistics
if __name__ == '__main__':
# Declare file handles.
RAW_DATA = \
r'M:\Projects\003_univariate_and_multivariate_data_analysis\1_data' \
+ r'\energydata_complete.csv'
# Get data.
raw_data = pd.read_csv(RAW_DATA, sep=',')
# Prepare data.
data_preparation = PreparedData(inn=raw_data)
prepared_data = data_preparation.prepared_data
# Compute univariate statistics.
univariate_statistics = compute_univariate_statistics(inn=prepared_data)
# Plot time-series.
# Declarea a figure.
fig = plt.figure()
grid = gridspec.GridSpec(2, 3)
# Plot time series.
ax1 = plt.subplot(grid[0, :3])
ax1.plot(prepared_data['T1'])
# Plot normal probability plot.
ordered_series = np.array(prepared_data['T1'].sort_values(ascending=True))
normal_distribution = np.sort(
np.random.normal(loc=0, scale=1, size=len(ordered_series))
)
ax2 = plt.subplot(grid[1, 0])
ax2.scatter(x=normal_distribution, y=ordered_series, s=1)
ax2.plot(
[np.min(normal_distribution), np.max(normal_distribution)],
[np.min(ordered_series), np.max(ordered_series)],
c='red'
)
ax2.set_xlabel('Theoretical Distribution')
ax2.set_ylabel('Observed Distribution')
ax2.set_title('Normal Probability Plot')
# Plot histogram.
ax3 = plt.subplot(grid[1, 1])
ax3.hist(prepared_data['T1'], bins=30)
ax3.set_title('Histogram')
# Plot univariate statisticsl.
ax3 = plt.subplot(grid[1, 2])
row_labels = list(univariate_statistics.columns)
content = [[item] for item in list(np.around(univariate_statistics.loc['T1', :].values, decimals=4))]
ax3.table(
cellText=content,
rowLabels=row_labels,
loc='right',
colWidths=[0.5, 0.5], bbox=None, fontsize=10).scale(.8, .8)
ax3.axis('off')
ax3.axis('tight')
ax3.set_title('Univarite Statistics')
| [
"pandas.read_csv",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.around",
"numpy.min",
"matplotlib.pyplot.subplot",
"pandas.to_datetime"
] | [((2381, 2411), 'pandas.read_csv', 'pd.read_csv', (['RAW_DATA'], {'sep': '""","""'}), "(RAW_DATA, sep=',')\n", (2392, 2411), True, 'import pandas as pd\n'), ((2709, 2721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2719, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2756), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(3)'], {}), '(2, 3)\n', (2750, 2756), True, 'import matplotlib.gridspec as gridspec\n'), ((2791, 2815), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[0, :3]'], {}), '(grid[0, :3])\n', (2802, 2815), True, 'import matplotlib.pyplot as plt\n'), ((3083, 3106), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[1, 0]'], {}), '(grid[1, 0])\n', (3094, 3106), True, 'import matplotlib.pyplot as plt\n'), ((3499, 3522), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[1, 1]'], {}), '(grid[1, 1])\n', (3510, 3522), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3665), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[1, 2]'], {}), '(grid[1, 2])\n', (3653, 3665), True, 'import matplotlib.pyplot as plt\n'), ((438, 465), 'pandas.to_datetime', 'pd.to_datetime', (["inn['date']"], {}), "(inn['date'])\n", (452, 465), True, 'import pandas as pd\n'), ((2020, 2052), 'numpy.array', 'np.array', (['proportion_of_outliers'], {}), '(proportion_of_outliers)\n', (2028, 2052), True, 'import numpy as np\n'), ((3192, 3219), 'numpy.min', 'np.min', (['normal_distribution'], {}), '(normal_distribution)\n', (3198, 3219), True, 'import numpy as np\n'), ((3221, 3248), 'numpy.max', 'np.max', (['normal_distribution'], {}), '(normal_distribution)\n', (3227, 3248), True, 'import numpy as np\n'), ((3260, 3282), 'numpy.min', 'np.min', (['ordered_series'], {}), '(ordered_series)\n', (3266, 3282), True, 'import numpy as np\n'), ((3284, 3306), 'numpy.max', 'np.max', (['ordered_series'], {}), '(ordered_series)\n', (3290, 3306), True, 'import numpy as np\n'), ((3758, 3822), 'numpy.around', 'np.around', (["univariate_statistics.loc['T1', :].values"], {'decimals': '(4)'}), "(univariate_statistics.loc['T1', :].values, decimals=4)\n", (3767, 3822), True, 'import numpy as np\n')] |
import pkg_resources
import pathlib
import random
import numpy
import pandas
import json
import yaml
from collections import defaultdict
def define_amplicon(tmp, amplicons, reference_genome):
chosen_amplicon = tmp['name']
row = amplicons[amplicons.name == chosen_amplicon]
# PWF: this used to be >= but end_left is the index of the last base in the left primer
mask = (reference_genome.nucleotide_index > int(row['end_left'])) & (reference_genome.nucleotide_index < int(row['start_right']))
for idx, row in amplicons.iterrows():
if row['name'] == chosen_amplicon:
continue
# PWF: this used to be >= but end_left is the index of the last base in the left primer
current_amplicon_mask = (reference_genome.nucleotide_index > int(row['end_left'])) & (reference_genome.nucleotide_index < int(row['start_right']))
overlap_region = current_amplicon_mask & mask
overlap_region = numpy.logical_not(overlap_region)
mask = overlap_region & mask
return(pandas.Series( [reference_genome.nucleotide_index[mask].min(), reference_genome.nucleotide_index[mask].max()]))
def load_variant_definitions(path):
variant_definitions_path=pathlib.Path(path) / "variant_yaml/"
variant_definitions_files=variant_definitions_path.glob('**/*.yml')
variant_definitions={}
for i in variant_definitions_files:
with open(i) as INPUT:
a=yaml.safe_load(INPUT)
if 'who-label' in a.keys() and a['who-label'] is not None:
variant_definitions[a['who-label'].lower()]=a
return(variant_definitions)
def mutate_read(read,error_rate=0,snps=0,debug_mutations=None):
assert error_rate<=1
if debug_mutations is not None:
mask=numpy.isin(numpy.arange(0,len(read)),list(debug_mutations.keys()))
elif snps>0:
positions=random.sample(range(len(read)),snps)
mask=numpy.isin(numpy.arange(0,len(read)),positions)
elif error_rate>0:
mask=numpy.random.uniform(size=len(read))<error_rate
else:
raise ValueError('Read will not be mutated!')
# only if there are more than zero mutations
if numpy.sum(mask)==0:
return(read)
else:
bases={'A','T','C','G'}
# create a list of new mutations
new_bases=[]
# convert the read into an array of chars
r=numpy.array(list(read))
if debug_mutations is None:
for i in r[mask]:
new_bases.append(random.sample(bases ^ set(i),1)[0])
else:
new_bases=list(debug_mutations.values())
print(new_bases,list(debug_mutations.values()))
# set the new values
r[mask]=numpy.array(new_bases)
# recreate the sequence
sequence=''.join(i for i in r)
return(sequence)
def load_lineages_dataframe():
cov_lineages = pkg_resources.resource_filename('gpas_covid_synthetic_reads','data/cov-lineages.csv')
lineages_reference = pandas.read_csv(cov_lineages)
return lineages_reference
def load_pango_definitions(path):
lineages_reference = load_lineages_dataframe()
pango_definitions = {}
constellations_path = pathlib.Path(path) / "constellations/definitions"
for i in lineages_reference['pango_lineage']:
who_label = lineages_reference[lineages_reference['pango_lineage']==i]['who_label'].values[0].lower()
if who_label == 'epsilon' and i == 'cB.1.427':
continue
elif who_label == 'omicron':
if i == 'cB.1.1.529':
who_label = 'omicronB.1.1.529'
elif i == 'cBA.1':
who_label = 'omicronBA.1'
elif i == 'cBA.2':
who_label = 'omicronBA.2'
elif i == 'cBA.3':
who_label = 'omicronBA.3'
else:
continue
with open(constellations_path / (i + '.json') ) as INPUT:
pango_definitions[i]=json.load(INPUT)
return pango_definitions
def create_amino_acid_to_codon(genome):
spike = genome.build_gene('S')
amino_acid_to_codon=defaultdict(list)
for codon in spike.codon_to_amino_acid:
if 'x' not in codon and 'z' not in codon and 'o' not in codon:
amino_acid=spike.codon_to_amino_acid[codon]
amino_acid_to_codon[amino_acid].append(codon)
return amino_acid_to_codon
def determine_closet_codon(current_codon, possible_codons):
max_bases=4
for putative_codon in possible_codons:
bases_to_change=sum([i!=j for i,j in zip(current_codon,putative_codon)])
if bases_to_change<max_bases:
new_codon = putative_codon
max_bases=bases_to_change
return(new_codon)
| [
"pandas.read_csv",
"pathlib.Path",
"numpy.logical_not",
"pkg_resources.resource_filename",
"numpy.sum",
"numpy.array",
"yaml.safe_load",
"collections.defaultdict",
"json.load"
] | [((2890, 2980), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""gpas_covid_synthetic_reads"""', '"""data/cov-lineages.csv"""'], {}), "('gpas_covid_synthetic_reads',\n 'data/cov-lineages.csv')\n", (2921, 2980), False, 'import pkg_resources\n'), ((3002, 3031), 'pandas.read_csv', 'pandas.read_csv', (['cov_lineages'], {}), '(cov_lineages)\n', (3017, 3031), False, 'import pandas\n'), ((4124, 4141), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4135, 4141), False, 'from collections import defaultdict\n'), ((950, 983), 'numpy.logical_not', 'numpy.logical_not', (['overlap_region'], {}), '(overlap_region)\n', (967, 983), False, 'import numpy\n'), ((1214, 1232), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1226, 1232), False, 'import pathlib\n'), ((2170, 2185), 'numpy.sum', 'numpy.sum', (['mask'], {}), '(mask)\n', (2179, 2185), False, 'import numpy\n'), ((2716, 2738), 'numpy.array', 'numpy.array', (['new_bases'], {}), '(new_bases)\n', (2727, 2738), False, 'import numpy\n'), ((3206, 3224), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (3218, 3224), False, 'import pathlib\n'), ((1436, 1457), 'yaml.safe_load', 'yaml.safe_load', (['INPUT'], {}), '(INPUT)\n', (1450, 1457), False, 'import yaml\n'), ((3974, 3990), 'json.load', 'json.load', (['INPUT'], {}), '(INPUT)\n', (3983, 3990), False, 'import json\n')] |
import warnings
from math import ceil
import numpy as np
import openmdao.api as om
from wisdem.landbosse.model.Manager import Manager
from wisdem.landbosse.model.DefaultMasterInputDict import DefaultMasterInputDict
from wisdem.landbosse.landbosse_omdao.OpenMDAODataframeCache import OpenMDAODataframeCache
from wisdem.landbosse.landbosse_omdao.WeatherWindowCSVReader import read_weather_window
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import pandas as pd
use_default_component_data = -1.0
class LandBOSSE(om.Group):
def setup(self):
# Add a tower section height variable. The default value of 30 m is for transportable tower sections.
self.set_input_defaults("tower_section_length_m", 30.0, units="m")
self.set_input_defaults("blade_drag_coefficient", use_default_component_data) # Unitless
self.set_input_defaults("blade_lever_arm", use_default_component_data, units="m")
self.set_input_defaults("blade_install_cycle_time", use_default_component_data, units="h")
self.set_input_defaults("blade_offload_hook_height", use_default_component_data, units="m")
self.set_input_defaults("blade_offload_cycle_time", use_default_component_data, units="h")
self.set_input_defaults("blade_drag_multiplier", use_default_component_data) # Unitless
self.set_input_defaults("turbine_spacing_rotor_diameters", 4)
self.set_input_defaults("row_spacing_rotor_diameters", 10)
self.set_input_defaults("commissioning_pct", 0.01)
self.set_input_defaults("decommissioning_pct", 0.15)
self.set_input_defaults("trench_len_to_substation_km", 50.0, units="km")
self.set_input_defaults("interconnect_voltage_kV", 130.0, units="kV")
self.set_input_defaults("foundation_height", 0.0, units="m")
self.set_input_defaults("blade_mass", 8000.0, units="kg")
self.set_input_defaults("hub_mass", 15.4e3, units="kg")
self.set_input_defaults("nacelle_mass", 50e3, units="kg")
self.set_input_defaults("tower_mass", 240e3, units="kg")
self.set_input_defaults("turbine_rating_MW", 1500.0, units="kW")
self.add_subsystem("landbosse", LandBOSSE_API(), promotes=["*"])
class LandBOSSE_API(om.ExplicitComponent):
def setup(self):
# Clear the cache
OpenMDAODataframeCache._cache = {}
self.setup_inputs()
self.setup_outputs()
self.setup_discrete_outputs()
self.setup_discrete_inputs_that_are_not_dataframes()
self.setup_discrete_inputs_that_are_dataframes()
def setup_inputs(self):
"""
This method sets up the inputs.
"""
self.add_input("blade_drag_coefficient", use_default_component_data) # Unitless
self.add_input("blade_lever_arm", use_default_component_data, units="m")
self.add_input("blade_install_cycle_time", use_default_component_data, units="h")
self.add_input("blade_offload_hook_height", use_default_component_data, units="m")
self.add_input("blade_offload_cycle_time", use_default_component_data, units="h")
self.add_input("blade_drag_multiplier", use_default_component_data) # Unitless
# Even though LandBOSSE doesn't use foundation height, TowerSE does,
# and foundation height can be used with hub height to calculate
# tower height.
self.add_input("foundation_height", 0.0, units="m")
self.add_input("tower_section_length_m", 30.0, units="m")
self.add_input("nacelle_mass", 0.0, units="kg")
self.add_input("tower_mass", 0.0, units="kg")
# A discrete input below, number_of_blades, gives the number of blades
# on the rotor.
#
# The total mass of the rotor nacelle assembly (RNA) is the following
# sum:
#
# (blade_mass * number_of_blades) + nac_mass + hub_mass
self.add_input("blade_mass", use_default_component_data, units="kg", desc="The mass of one rotor blade.")
self.add_input("hub_mass", use_default_component_data, units="kg", desc="Mass of the rotor hub")
self.add_input(
"crane_breakdown_fraction",
val=0.0,
desc="0 means the crane is never broken down. 1 means it is broken down every turbine.",
)
self.add_input("construct_duration", val=9, desc="Total project construction time (months)")
self.add_input("hub_height_meters", val=80, units="m", desc="Hub height m")
self.add_input("rotor_diameter_m", val=77, units="m", desc="Rotor diameter m")
self.add_input("wind_shear_exponent", val=0.2, desc="Wind shear exponent")
self.add_input("turbine_rating_MW", val=1.5, units="MW", desc="Turbine rating MW")
self.add_input("fuel_cost_usd_per_gal", val=1.5, desc="Fuel cost USD/gal")
self.add_input(
"breakpoint_between_base_and_topping_percent", val=0.8, desc="Breakpoint between base and topping (percent)"
)
# Could not place units in turbine_spacing_rotor_diameters
self.add_input("turbine_spacing_rotor_diameters", desc="Turbine spacing (times rotor diameter)", val=4)
self.add_input("depth", units="m", desc="Foundation depth m", val=2.36)
self.add_input("rated_thrust_N", units="N", desc="Rated Thrust (N)", val=5.89e5)
# Can't set units
self.add_input("bearing_pressure_n_m2", desc="Bearing Pressure (n/m2)", val=191521)
self.add_input("gust_velocity_m_per_s", units="m/s", desc="50-year Gust Velocity (m/s)", val=59.5)
self.add_input("road_length_adder_m", units="m", desc="Road length adder (m)", val=5000)
# Can't set units
self.add_input("fraction_new_roads", desc="Percent of roads that will be constructed (0.0 - 1.0)", val=0.33)
self.add_input("road_quality", desc="Road Quality (0-1)", val=0.6)
self.add_input("line_frequency_hz", units="Hz", desc="Line Frequency (Hz)", val=60)
# Can't set units
self.add_input("row_spacing_rotor_diameters", desc="Row spacing (times rotor diameter)", val=10)
self.add_input(
"trench_len_to_substation_km", units="km", desc="Combined Homerun Trench Length to Substation (km)", val=50
)
self.add_input("distance_to_interconnect_mi", units="mi", desc="Distance to interconnect (miles)", val=5)
self.add_input("interconnect_voltage_kV", units="kV", desc="Interconnect Voltage (kV)", val=130)
self.add_input(
"critical_speed_non_erection_wind_delays_m_per_s",
units="m/s",
desc="Non-Erection Wind Delay Critical Speed (m/s)",
val=15,
)
self.add_input(
"critical_height_non_erection_wind_delays_m",
units="m",
desc="Non-Erection Wind Delay Critical Height (m)",
val=10,
)
self.add_discrete_input("road_distributed_winnd", val=False)
self.add_input("road_width_ft", units="ft", desc="Road width (ft)", val=20)
self.add_input("road_thickness", desc="Road thickness (in)", val=8)
self.add_input("crane_width", units="m", desc="Crane width (m)", val=12.2)
self.add_input("overtime_multiplier", desc="Overtime multiplier", val=1.4)
self.add_input("markup_contingency", desc="Markup contingency", val=0.03)
self.add_input("markup_warranty_management", desc="Markup warranty management", val=0.0002)
self.add_input("markup_sales_and_use_tax", desc="Markup sales and use tax", val=0)
self.add_input("markup_overhead", desc="Markup overhead", val=0.05)
self.add_input("markup_profit_margin", desc="Markup profit margin", val=0.05)
self.add_input("Mass tonne", val=(1.0,), desc="", units="t")
self.add_input(
"development_labor_cost_usd", val=1e6, desc="The cost of labor in the development phase", units="USD"
)
# Disabled due to Pandas conflict right now.
self.add_input("labor_cost_multiplier", val=1.0, desc="Labor cost multiplier")
self.add_input("commissioning_pct", 0.01)
self.add_input("decommissioning_pct", 0.15)
def setup_discrete_inputs_that_are_not_dataframes(self):
"""
This method sets up the discrete inputs that aren't dataframes.
"""
self.add_discrete_input("num_turbines", val=100, desc="Number of turbines in project")
# Since 3 blades are so common on rotors, that is a reasonable default
# value that will not need to be checked during component list
# assembly.
self.add_discrete_input("number_of_blades", val=3, desc="Number of blades on the rotor")
self.add_discrete_input(
"user_defined_home_run_trench", val=0, desc="Flag for user-defined home run trench length (0 = no; 1 = yes)"
)
self.add_discrete_input(
"allow_same_flag",
val=False,
desc="Allow same crane for base and topping (True or False)",
)
self.add_discrete_input(
"hour_day",
desc="Dictionary of normal and long hours for construction in a day in the form of {'long': 24, 'normal': 10}",
val={"long": 24, "normal": 10},
)
self.add_discrete_input(
"time_construct",
desc="One of the keys in the hour_day dictionary to specify how many hours per day construction happens.",
val="normal",
)
self.add_discrete_input(
"user_defined_distance_to_grid_connection",
desc="Flag for user-defined home run trench length (True or False)",
val=False,
)
# Could not place units in rate_of_deliveries
self.add_discrete_input("rate_of_deliveries", val=10, desc="Rate of deliveries (turbines per week)")
self.add_discrete_input("new_switchyard", desc="New Switchyard (True or False)", val=True)
self.add_discrete_input("num_hwy_permits", desc="Number of highway permits", val=10)
self.add_discrete_input("num_access_roads", desc="Number of access roads", val=2)
def setup_discrete_inputs_that_are_dataframes(self):
"""
This sets up the default inputs that are dataframes. They are separate
because they hold the project data and the way we need to hold their
data is different. They have defaults loaded at the top of the file
which can be overridden outside by setting the properties listed
below.
"""
# Read in default sheets for project data
default_project_data = OpenMDAODataframeCache.read_all_sheets_from_xlsx("ge15_public")
self.add_discrete_input(
"site_facility_building_area_df",
val=default_project_data["site_facility_building_area"],
desc="site_facility_building_area DataFrame",
)
self.add_discrete_input(
"components",
val=default_project_data["components"],
desc="Dataframe of components for tower, blade, nacelle",
)
self.add_discrete_input(
"crane_specs", val=default_project_data["crane_specs"], desc="Dataframe of specifications of cranes"
)
self.add_discrete_input(
"weather_window",
val=read_weather_window(default_project_data["weather_window"]),
desc="Dataframe of wind toolkit data",
)
self.add_discrete_input("crew", val=default_project_data["crew"], desc="Dataframe of crew configurations")
self.add_discrete_input(
"crew_price",
val=default_project_data["crew_price"],
desc="Dataframe of costs per hour for each type of worker.",
)
self.add_discrete_input(
"equip", val=default_project_data["equip"], desc="Collections of equipment to perform erection operations."
)
self.add_discrete_input(
"equip_price", val=default_project_data["equip_price"], desc="Prices for various type of equipment."
)
self.add_discrete_input("rsmeans", val=default_project_data["rsmeans"], desc="RSMeans price data")
self.add_discrete_input(
"cable_specs", val=default_project_data["cable_specs"], desc="cable specs for collection system"
)
self.add_discrete_input(
"material_price",
val=default_project_data["material_price"],
desc="Prices of materials for foundations and roads",
)
self.add_discrete_input("project_data", val=default_project_data, desc="Dictionary of all dataframes of data")
def setup_outputs(self):
"""
This method sets up the continuous outputs. This is where total costs
and installation times go.
To see how cost totals are calculated see, the compute_total_bos_costs
method below.
"""
self.add_output(
"bos_capex", 0.0, units="USD", desc="Total BOS CAPEX not including commissioning or decommissioning."
)
self.add_output(
"bos_capex_kW",
0.0,
units="USD/kW",
desc="Total BOS CAPEX per kW not including commissioning or decommissioning.",
)
self.add_output(
"total_capex", 0.0, units="USD", desc="Total BOS CAPEX including commissioning and decommissioning."
)
self.add_output(
"total_capex_kW",
0.0,
units="USD/kW",
desc="Total BOS CAPEX per kW including commissioning and decommissioning.",
)
self.add_output("installation_capex", 0.0, units="USD", desc="Total foundation and erection installation cost.")
self.add_output(
"installation_capex_kW", 0.0, units="USD", desc="Total foundation and erection installation cost per kW."
)
self.add_output("installation_time_months", 0.0, desc="Total balance of system installation time (months).")
def setup_discrete_outputs(self):
"""
This method sets up discrete outputs.
"""
self.add_discrete_output(
"landbosse_costs_by_module_type_operation", desc="The costs by module, type and operation", val=None
)
self.add_discrete_output(
"landbosse_details_by_module",
desc="The details from the run of LandBOSSE. This includes some costs, but mostly other things",
val=None,
)
self.add_discrete_output("erection_crane_choice", desc="The crane choices for erection.", val=None)
self.add_discrete_output(
"erection_component_name_topvbase",
desc="List of components and whether they are a topping or base operation",
val=None,
)
self.add_discrete_output(
"erection_components", desc="List of components with their values modified from the defaults.", val=None
)
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
"""
This runs the ErectionCost module using the inputs and outputs into and
out of this module.
Note: inputs, discrete_inputs are not dictionaries. They do support
[] notation. inputs is of class 'openmdao.vectors.default_vector.DefaultVector'
discrete_inputs is of class openmdao.core.component._DictValues. Other than
[] brackets, they do not behave like dictionaries. See the following
documentation for details.
http://openmdao.org/twodocs/versions/latest/_srcdocs/packages/vectors/default_vector.html
https://mdolab.github.io/OpenAeroStruct/_modules/openmdao/core/component.html
Parameters
----------
inputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object with NumPy arrays that hold float
inputs. Note that since these are NumPy arrays, they
need indexing to pull out simple float64 values.
outputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object to store outputs.
discrete_inputs : openmdao.core.component._DictValues
A dictionary-like with the non-numeric inputs (like
pandas.DataFrame)
discrete_outputs : openmdao.core.component._DictValues
A dictionary-like for non-numeric outputs (like
pandas.DataFrame)
"""
# Put the inputs together and run all the modules
master_output_dict = dict()
master_input_dict = self.prepare_master_input_dictionary(inputs, discrete_inputs)
manager = Manager(master_input_dict, master_output_dict)
result = manager.execute_landbosse("WISDEM")
# Check if everything executed correctly
if result != 0:
raise Exception("LandBOSSE didn't execute correctly")
# Gather the cost and detail outputs
costs_by_module_type_operation = self.gather_costs_from_master_output_dict(master_output_dict)
discrete_outputs["landbosse_costs_by_module_type_operation"] = costs_by_module_type_operation
details = self.gather_details_from_master_output_dict(master_output_dict)
discrete_outputs["landbosse_details_by_module"] = details
# This is where we have access to the modified components, so put those
# in the outputs of the component
discrete_outputs["erection_components"] = master_input_dict["components"]
# Now get specific outputs. These have been refactored to methods that work
# with each module so as to keep this method as compact as possible.
self.gather_specific_erection_outputs(master_output_dict, outputs, discrete_outputs)
# Compute the total BOS costs
self.compute_total_bos_costs(costs_by_module_type_operation, master_output_dict, inputs, outputs)
def prepare_master_input_dictionary(self, inputs, discrete_inputs):
"""
This prepares a master input dictionary by applying all the necessary
modifications to the inputs.
Parameters
----------
inputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object with NumPy arrays that hold float
inputs. Note that since these are NumPy arrays, they
need indexing to pull out simple float64 values.
discrete_inputs : openmdao.core.component._DictValues
A dictionary-like with the non-numeric inputs (like
pandas.DataFrame)
Returns
-------
dict
The prepared master input to go to the Manager.
"""
inputs_dict = {key: inputs[key][0] for key in inputs.keys()}
discrete_inputs_dict = {key: value for key, value in discrete_inputs.items()}
incomplete_input_dict = {**inputs_dict, **discrete_inputs_dict}
# Modify the default component data if needed and copy it into the
# appropriate values of the input dictionary.
modified_components = self.modify_component_lists(inputs, discrete_inputs)
incomplete_input_dict["project_data"]["components"] = modified_components
incomplete_input_dict["components"] = modified_components
# FoundationCost needs to have all the component data split into separate
# NumPy arrays.
incomplete_input_dict["component_data"] = modified_components
for component in incomplete_input_dict["component_data"].keys():
incomplete_input_dict[component] = np.array(incomplete_input_dict["component_data"][component])
# These are aliases because parts of the code call the same thing by
# difference names.
incomplete_input_dict["crew_cost"] = discrete_inputs["crew_price"]
incomplete_input_dict["cable_specs_pd"] = discrete_inputs["cable_specs"]
# read in RSMeans per diem:
crew_cost = discrete_inputs["crew_price"]
crew_cost = crew_cost.set_index("Labor type ID", drop=False)
incomplete_input_dict["rsmeans_per_diem"] = crew_cost.loc["RSMeans", "Per diem USD per day"]
# Calculate project size in megawatts
incomplete_input_dict["project_size_megawatts"] = float(
discrete_inputs["num_turbines"] * inputs["turbine_rating_MW"]
)
# Needed to avoid distributed wind keys
incomplete_input_dict["road_distributed_wind"] = False
defaults = DefaultMasterInputDict()
master_input_dict = defaults.populate_input_dict(incomplete_input_dict)
return master_input_dict
def gather_costs_from_master_output_dict(self, master_output_dict):
"""
This method extract all the cost_by_module_type_operation lists for
output in an Excel file.
It finds values for the keys ending in '_module_type_operation'. It
then concatenates them together so they can be easily written to
a .csv or .xlsx
On every row, it includes the:
Rotor diameter m
Turbine rating MW
Number of turbines
This enables easy mapping of new columns if need be. The columns have
spaces in the names so that they can be easily written to a user-friendly
output.
Parameters
----------
runs_dict : dict
Values are the names of the projects. Keys are the lists of
dictionaries that are lines for the .csv
Returns
-------
list
List of dicts to write to the .csv.
"""
line_items = []
# Gather the lists of costs
cost_lists = [value for key, value in master_output_dict.items() if key.endswith("_module_type_operation")]
# Flatten the list of lists that is the result of the gathering
for cost_list in cost_lists:
line_items.extend(cost_list)
# Filter out the keys needed and rename them to meaningful values
final_costs = []
for line_item in line_items:
item = {
"Module": line_item["module"],
"Type of cost": line_item["type_of_cost"],
"Cost / kW": line_item["usd_per_kw_per_project"],
"Cost / project": line_item["cost_per_project"],
"Cost / turbine": line_item["cost_per_turbine"],
"Number of turbines": line_item["num_turbines"],
"Rotor diameter (m)": line_item["rotor_diameter_m"],
"Turbine rating (MW)": line_item["turbine_rating_MW"],
"Project ID with serial": line_item["project_id_with_serial"],
}
final_costs.append(item)
return final_costs
def gather_details_from_master_output_dict(self, master_output_dict):
"""
This extracts the detail lists from all the modules to output
the detailed non-cost data from the model run.
Parameters
----------
master_output_dict : dict
The master output dict with the finished module output in it.
Returns
-------
list
List of dicts with detailed data.
"""
line_items = []
# Gather the lists of costs
details_lists = [value for key, value in master_output_dict.items() if key.endswith("_csv")]
# Flatten the list of lists
for details_list in details_lists:
line_items.extend(details_list)
return line_items
def gather_specific_erection_outputs(self, master_output_dict, outputs, discrete_outputs):
"""
This method gathers specific outputs from the ErectionCost module and places
them on the outputs.
The method does not return anything. Rather, it places the outputs directly
on the continuous of discrete outputs.
Parameters
----------
master_output_dict: dict
The master output dictionary out of LandBOSSE
outputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object to store outputs.
discrete_outputs : openmdao.core.component._DictValues
A dictionary-like for non-numeric outputs (like
pandas.DataFrame)
"""
discrete_outputs["erection_crane_choice"] = master_output_dict["crane_choice"]
discrete_outputs["erection_component_name_topvbase"] = master_output_dict["component_name_topvbase"]
def compute_total_bos_costs(self, costs_by_module_type_operation, master_output_dict, inputs, outputs):
"""
This computes the total BOS costs from the master output dictionary
and places them on the necessary outputs.
Parameters
----------
costs_by_module_type_operation: List[Dict[str, Any]]
The lists of costs by module, type and operation.
master_output_dict: Dict[str, Any]
The master output dictionary from the run. Used to obtain the
construction time,
outputs : openmdao.vectors.default_vector.DefaultVector
The outputs in which to place the results of the computations
"""
bos_per_kw = 0.0
bos_per_project = 0.0
installation_per_project = 0.0
installation_per_kW = 0.0
for row in costs_by_module_type_operation:
bos_per_kw += row["Cost / kW"]
bos_per_project += row["Cost / project"]
if row["Module"] in ["ErectionCost", "FoundationCost"]:
installation_per_project += row["Cost / project"]
installation_per_kW += row["Cost / kW"]
commissioning_pct = inputs["commissioning_pct"]
decommissioning_pct = inputs["decommissioning_pct"]
commissioning_per_project = bos_per_project * commissioning_pct
decomissioning_per_project = bos_per_project * decommissioning_pct
commissioning_per_kW = bos_per_kw * commissioning_pct
decomissioning_per_kW = bos_per_kw * decommissioning_pct
outputs["total_capex_kW"] = np.round(bos_per_kw + commissioning_per_kW + decomissioning_per_kW, 0)
outputs["total_capex"] = np.round(bos_per_project + commissioning_per_project + decomissioning_per_project, 0)
outputs["bos_capex"] = round(bos_per_project, 0)
outputs["bos_capex_kW"] = round(bos_per_kw, 0)
outputs["installation_capex"] = round(installation_per_project, 0)
outputs["installation_capex_kW"] = round(installation_per_kW, 0)
actual_construction_months = master_output_dict["actual_construction_months"]
outputs["installation_time_months"] = round(actual_construction_months, 0)
def modify_component_lists(self, inputs, discrete_inputs):
"""
This method modifies the previously loaded default component lists with
data about blades, tower sections, if they have been provided as input
to the component.
It only modifies the project component data if default data for the proper
inputs have been overridden.
The default blade data is assumed to be the first component that begins
with the word "Blade"
This should take mass from the tower in WISDEM. Ideally, this should have
an input for transportable tower 4.3, large diameter steel tower LDST 6.2m, or
unconstrained key stone tower. Or give warnings about the boundaries
that we assume.
Parameters
----------
inputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object with NumPy arrays that hold float
inputs. Note that since these are NumPy arrays, they
need indexing to pull out simple float64 values.
discrete_inputs : openmdao.core.component._DictValues
A dictionary-like with the non-numeric inputs (like
pandas.DataFrame)
Returns
-------
pd.DataFrame
The dataframe with the modified components.
"""
input_components = discrete_inputs["components"]
# This list is a sequence of pd.Series instances that have the
# specifications of each component.
output_components_list = []
# Need to convert kg to tonnes
kg_per_tonne = 1000
# Get the hub height
hub_height_meters = inputs["hub_height_meters"][0]
# Make the nacelle. This does not include the hub or blades.
nacelle_mass_kg = inputs["nacelle_mass"][0]
nacelle = input_components[input_components["Component"].str.startswith("Nacelle")].iloc[0].copy()
if inputs["nacelle_mass"] != use_default_component_data:
nacelle["Mass tonne"] = nacelle_mass_kg / kg_per_tonne
nacelle["Component"] = "Nacelle"
nacelle["Lift height m"] = hub_height_meters
output_components_list.append(nacelle)
# Make the hub
hub_mass_kg = inputs["hub_mass"][0]
hub = input_components[input_components["Component"].str.startswith("Hub")].iloc[0].copy()
hub["Lift height m"] = hub_height_meters
if hub_mass_kg != use_default_component_data:
hub["Mass tonne"] = hub_mass_kg / kg_per_tonne
output_components_list.append(hub)
# Make blades
blade = input_components[input_components["Component"].str.startswith("Blade")].iloc[0].copy()
# There is always a hub height, so use that as the lift height
blade["Lift height m"] = hub_height_meters
if inputs["blade_drag_coefficient"][0] != use_default_component_data:
blade["Coeff drag"] = inputs["blade_drag_coefficient"][0]
if inputs["blade_lever_arm"][0] != use_default_component_data:
blade["Lever arm m"] = inputs["blade_lever_arm"][0]
if inputs["blade_install_cycle_time"][0] != use_default_component_data:
blade["Cycle time installation hrs"] = inputs["blade_install_cycle_time"][0]
if inputs["blade_offload_hook_height"][0] != use_default_component_data:
blade["Offload hook height m"] = hub_height_meters
if inputs["blade_offload_cycle_time"][0] != use_default_component_data:
blade["Offload cycle time hrs"] = inputs["blade_offload_cycle_time"]
if inputs["blade_drag_multiplier"][0] != use_default_component_data:
blade["Multiplier drag rotor"] = inputs["blade_drag_multiplier"]
if inputs["blade_mass"][0] != use_default_component_data:
blade["Mass tonne"] = inputs["blade_mass"][0] / kg_per_tonne
# Assume that number_of_blades always has a reasonable value. It's
# default count when the discrete input is declared of 3 is always
# reasonable unless overridden by another input.
number_of_blades = discrete_inputs["number_of_blades"]
for i in range(number_of_blades):
component = f"Blade {i + 1}"
blade_i = blade.copy()
blade_i["Component"] = component
output_components_list.append(blade_i)
# Make tower sections
tower_mass_tonnes = inputs["tower_mass"][0] / kg_per_tonne
tower_height_m = hub_height_meters - inputs["foundation_height"][0]
default_tower_section = input_components[input_components["Component"].str.startswith("Tower")].iloc[0]
tower_sections = self.make_tower_sections(tower_mass_tonnes, tower_height_m, default_tower_section)
output_components_list.extend(tower_sections)
# Make the output component dataframe and return it.
output_components = pd.DataFrame(output_components_list)
return output_components
@staticmethod
def make_tower_sections(tower_mass_tonnes, tower_height_m, default_tower_section):
"""
This makes tower sections for a transportable tower.
Approximations:
- Weight is distributed uniformly among the sections
- The number of sections is either the maximum allowed by mass or
the maximum allowed by height, to maintain transportability.
For each tower section, calculate:
- lift height
- lever arm
- surface area
The rest of values should remain at their defaults.
Note: Tower sections are constrained in maximum diameter to 4.5 m.
However, their surface area is calculated with a 1.3 m radius
to agree more closely with empirical data. Also, tower sections
are approximated as cylinders.
Parameters
----------
tower_mass_tonnes: float
The total tower mass in tonnes
tower_height_m: float
The total height of the tower in meters.
default_tower_section: pd.Series
There are a number of values that are kept constant in creating
the tower sections. This series holds the values.
Returns
-------
List[pd.Series]
A list of series to be appended onto an output component list.
It is not a dataframe, because it is faster to append to a list
and make a dataframe once.
"""
tower_radius = 1.3
number_of_sections = max(ceil(tower_height_m / 30), ceil(tower_mass_tonnes / 80))
tower_section_height_m = tower_height_m / number_of_sections
tower_section_mass = tower_mass_tonnes / number_of_sections
tower_section_surface_area_m2 = np.pi * tower_section_height_m * (tower_radius ** 2)
sections = []
for i in range(number_of_sections):
lift_height_m = (i * tower_section_height_m) + tower_section_height_m
lever_arm = (i * tower_section_height_m) + (0.5 * tower_section_height_m)
name = f"Tower {i + 1}"
section = default_tower_section.copy()
section["Component"] = name
section["Mass tonne"] = tower_section_mass
section["Lift height m"] = lift_height_m
section["Surface area sq m"] = tower_section_surface_area_m2
section["Section height m"] = tower_section_height_m
section["Lever arm m"] = lever_arm
sections.append(section)
return sections
| [
"wisdem.landbosse.landbosse_omdao.OpenMDAODataframeCache.OpenMDAODataframeCache.read_all_sheets_from_xlsx",
"wisdem.landbosse.landbosse_omdao.WeatherWindowCSVReader.read_weather_window",
"math.ceil",
"wisdem.landbosse.model.Manager.Manager",
"warnings.catch_warnings",
"wisdem.landbosse.model.DefaultMaster... | [((401, 426), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (424, 426), False, 'import warnings\n'), ((432, 501), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.ufunc size changed"""'}), "('ignore', message='numpy.ufunc size changed')\n", (455, 501), False, 'import warnings\n'), ((10681, 10744), 'wisdem.landbosse.landbosse_omdao.OpenMDAODataframeCache.OpenMDAODataframeCache.read_all_sheets_from_xlsx', 'OpenMDAODataframeCache.read_all_sheets_from_xlsx', (['"""ge15_public"""'], {}), "('ge15_public')\n", (10729, 10744), False, 'from wisdem.landbosse.landbosse_omdao.OpenMDAODataframeCache import OpenMDAODataframeCache\n'), ((16732, 16778), 'wisdem.landbosse.model.Manager.Manager', 'Manager', (['master_input_dict', 'master_output_dict'], {}), '(master_input_dict, master_output_dict)\n', (16739, 16778), False, 'from wisdem.landbosse.model.Manager import Manager\n'), ((20545, 20569), 'wisdem.landbosse.model.DefaultMasterInputDict.DefaultMasterInputDict', 'DefaultMasterInputDict', ([], {}), '()\n', (20567, 20569), False, 'from wisdem.landbosse.model.DefaultMasterInputDict import DefaultMasterInputDict\n'), ((26151, 26221), 'numpy.round', 'np.round', (['(bos_per_kw + commissioning_per_kW + decomissioning_per_kW)', '(0)'], {}), '(bos_per_kw + commissioning_per_kW + decomissioning_per_kW, 0)\n', (26159, 26221), True, 'import numpy as np\n'), ((26255, 26344), 'numpy.round', 'np.round', (['(bos_per_project + commissioning_per_project + decomissioning_per_project)', '(0)'], {}), '(bos_per_project + commissioning_per_project +\n decomissioning_per_project, 0)\n', (26263, 26344), True, 'import numpy as np\n'), ((31687, 31723), 'pandas.DataFrame', 'pd.DataFrame', (['output_components_list'], {}), '(output_components_list)\n', (31699, 31723), True, 'import pandas as pd\n'), ((19637, 19697), 'numpy.array', 'np.array', (["incomplete_input_dict['component_data'][component]"], {}), "(incomplete_input_dict['component_data'][component])\n", (19645, 19697), True, 'import numpy as np\n'), ((33319, 33344), 'math.ceil', 'ceil', (['(tower_height_m / 30)'], {}), '(tower_height_m / 30)\n', (33323, 33344), False, 'from math import ceil\n'), ((33346, 33374), 'math.ceil', 'ceil', (['(tower_mass_tonnes / 80)'], {}), '(tower_mass_tonnes / 80)\n', (33350, 33374), False, 'from math import ceil\n'), ((11391, 11450), 'wisdem.landbosse.landbosse_omdao.WeatherWindowCSVReader.read_weather_window', 'read_weather_window', (["default_project_data['weather_window']"], {}), "(default_project_data['weather_window'])\n", (11410, 11450), False, 'from wisdem.landbosse.landbosse_omdao.WeatherWindowCSVReader import read_weather_window\n')] |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from logger import setup_logger
from model import BiSeNet
from face_dataset import FaceMask
from loss import OhemCELoss
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.distributed as dist
import os
import os.path as osp
import logging
import time
import numpy as np
from tqdm import tqdm
import math
from PIL import Image
import torchvision.transforms as transforms
import cv2
def vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='vis_results/parsing_map_on_im.jpg'):
# Colors for all 20 parts
part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
[255, 0, 85], [255, 0, 170],
[0, 255, 0], [85, 255, 0], [170, 255, 0],
[0, 255, 85], [0, 255, 170],
[0, 0, 255], [85, 0, 255], [170, 0, 255],
[0, 85, 255], [0, 170, 255],
[255, 255, 0], [255, 255, 85], [255, 255, 170],
[255, 0, 255], [255, 85, 255], [255, 170, 255],
[0, 255, 255], [85, 255, 255], [170, 255, 255]]
im = np.array(im)
vis_im = im.copy().astype(np.uint8)
vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255
num_of_class = np.max(vis_parsing_anno)
for pi in range(1, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
# print(vis_parsing_anno_color.shape, vis_im.shape)
vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)
# Save result or not
if save_im:
cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# return vis_im
def evaluate(respth='./res/test_res', dspth='./data', cp='model_final_diss.pth'):
if not os.path.exists(respth):
os.makedirs(respth)
n_classes = 19
net = BiSeNet(n_classes=n_classes)
net.cuda()
save_pth = osp.join(respth, 'cp', cp)
net.load_state_dict(torch.load(save_pth))
net.eval()
no_iter = str(int(cp.split('_')[0]))
org_respth = respth[:]
respth = os.path.join(respth, no_iter)
if not os.path.exists(respth):
os.makedirs(respth)
to_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
''' added '''
cropsize = [448, 448]
n_img_per_gpu = 16
data_root = '/home/jihyun/workspace/face_parsing/dataset/CelebAMask-HQ/'
ds = FaceMask(data_root, cropsize=cropsize, mode='val')
dl = DataLoader(ds, batch_size=16, shuffle=False, drop_last=True)
n_min = n_img_per_gpu * cropsize[0] * cropsize[1] // 16
score_thres = 0.7
ignore_idx = -100
loss_avg = []
LossP = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
Loss2 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
Loss3 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
with torch.no_grad():
for i, sample in enumerate(dl):
im, lb = sample
im = im.cuda()
lb = lb.cuda()
lb = torch.squeeze(lb, 1)
out, out16, out32 = net(im)
lossp = LossP(out, lb)
loss2 = Loss2(out16, lb)
loss3 = Loss3(out32, lb)
loss = lossp + loss2 + loss3
loss_avg.append(loss.item())
loss_avg = sum(loss_avg) / len(loss_avg)
f = open(osp.join(org_respth, 'loss.log'), 'a')
f.write(' eval_loss: ' + str(loss_avg) + '\n')
f.close()
for image_path in os.listdir(dspth):
img = Image.open(osp.join(dspth, image_path))
image = img.resize((512, 512), Image.BILINEAR)
img = to_tensor(image)
img = torch.unsqueeze(img, 0)
img = img.cuda()
out, out16, out32 = net(img)
parsing = out.squeeze(0).cpu().numpy().argmax(0)
vis_parsing_maps(image, parsing, stride=1, save_im=True, save_path=osp.join(respth, image_path))
if __name__ == "__main__":
setup_logger('./res')
evaluate()
| [
"loss.OhemCELoss",
"logger.setup_logger",
"numpy.array",
"torch.squeeze",
"os.path.exists",
"model.BiSeNet",
"os.listdir",
"numpy.where",
"torch.unsqueeze",
"numpy.max",
"torchvision.transforms.ToTensor",
"cv2.cvtColor",
"torchvision.transforms.Normalize",
"cv2.resize",
"face_dataset.Fac... | [((1175, 1187), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1183, 1187), True, 'import numpy as np\n'), ((1311, 1405), 'cv2.resize', 'cv2.resize', (['vis_parsing_anno', 'None'], {'fx': 'stride', 'fy': 'stride', 'interpolation': 'cv2.INTER_NEAREST'}), '(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.\n INTER_NEAREST)\n', (1321, 1405), False, 'import cv2\n'), ((1524, 1548), 'numpy.max', 'np.max', (['vis_parsing_anno'], {}), '(vis_parsing_anno)\n', (1530, 1548), True, 'import numpy as np\n'), ((2263, 2291), 'model.BiSeNet', 'BiSeNet', ([], {'n_classes': 'n_classes'}), '(n_classes=n_classes)\n', (2270, 2291), False, 'from model import BiSeNet\n'), ((2322, 2348), 'os.path.join', 'osp.join', (['respth', '"""cp"""', 'cp'], {}), "(respth, 'cp', cp)\n", (2330, 2348), True, 'import os.path as osp\n'), ((2492, 2521), 'os.path.join', 'os.path.join', (['respth', 'no_iter'], {}), '(respth, no_iter)\n', (2504, 2521), False, 'import os\n'), ((2893, 2943), 'face_dataset.FaceMask', 'FaceMask', (['data_root'], {'cropsize': 'cropsize', 'mode': '"""val"""'}), "(data_root, cropsize=cropsize, mode='val')\n", (2901, 2943), False, 'from face_dataset import FaceMask\n'), ((2953, 3013), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': '(16)', 'shuffle': '(False)', 'drop_last': '(True)'}), '(ds, batch_size=16, shuffle=False, drop_last=True)\n', (2963, 3013), False, 'from torch.utils.data import DataLoader\n'), ((3150, 3215), 'loss.OhemCELoss', 'OhemCELoss', ([], {'thresh': 'score_thres', 'n_min': 'n_min', 'ignore_lb': 'ignore_idx'}), '(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)\n', (3160, 3215), False, 'from loss import OhemCELoss\n'), ((3228, 3293), 'loss.OhemCELoss', 'OhemCELoss', ([], {'thresh': 'score_thres', 'n_min': 'n_min', 'ignore_lb': 'ignore_idx'}), '(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)\n', (3238, 3293), False, 'from loss import OhemCELoss\n'), ((3306, 3371), 'loss.OhemCELoss', 'OhemCELoss', ([], {'thresh': 'score_thres', 'n_min': 'n_min', 'ignore_lb': 'ignore_idx'}), '(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)\n', (3316, 3371), False, 'from loss import OhemCELoss\n'), ((4511, 4532), 'logger.setup_logger', 'setup_logger', (['"""./res"""'], {}), "('./res')\n", (4523, 4532), False, 'from logger import setup_logger\n'), ((1430, 1497), 'numpy.zeros', 'np.zeros', (['(vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)'], {}), '((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3))\n', (1438, 1497), True, 'import numpy as np\n'), ((1608, 1640), 'numpy.where', 'np.where', (['(vis_parsing_anno == pi)'], {}), '(vis_parsing_anno == pi)\n', (1616, 1640), True, 'import numpy as np\n'), ((1868, 1907), 'cv2.cvtColor', 'cv2.cvtColor', (['vis_im', 'cv2.COLOR_RGB2BGR'], {}), '(vis_im, cv2.COLOR_RGB2BGR)\n', (1880, 1907), False, 'import cv2\n'), ((2181, 2203), 'os.path.exists', 'os.path.exists', (['respth'], {}), '(respth)\n', (2195, 2203), False, 'import os\n'), ((2213, 2232), 'os.makedirs', 'os.makedirs', (['respth'], {}), '(respth)\n', (2224, 2232), False, 'import os\n'), ((2373, 2393), 'torch.load', 'torch.load', (['save_pth'], {}), '(save_pth)\n', (2383, 2393), False, 'import torch\n'), ((2534, 2556), 'os.path.exists', 'os.path.exists', (['respth'], {}), '(respth)\n', (2548, 2556), False, 'import os\n'), ((2566, 2585), 'os.makedirs', 'os.makedirs', (['respth'], {}), '(respth)\n', (2577, 2585), False, 'import os\n'), ((3382, 3397), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3395, 3397), False, 'import torch\n'), ((4015, 4032), 'os.listdir', 'os.listdir', (['dspth'], {}), '(dspth)\n', (4025, 4032), False, 'import os\n'), ((2632, 2653), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2651, 2653), True, 'import torchvision.transforms as transforms\n'), ((2663, 2729), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (2683, 2729), True, 'import torchvision.transforms as transforms\n'), ((3547, 3567), 'torch.squeeze', 'torch.squeeze', (['lb', '(1)'], {}), '(lb, 1)\n', (3560, 3567), False, 'import torch\n'), ((3876, 3908), 'os.path.join', 'osp.join', (['org_respth', '"""loss.log"""'], {}), "(org_respth, 'loss.log')\n", (3884, 3908), True, 'import os.path as osp\n'), ((4204, 4227), 'torch.unsqueeze', 'torch.unsqueeze', (['img', '(0)'], {}), '(img, 0)\n', (4219, 4227), False, 'import torch\n'), ((4063, 4090), 'os.path.join', 'osp.join', (['dspth', 'image_path'], {}), '(dspth, image_path)\n', (4071, 4090), True, 'import os.path as osp\n'), ((4440, 4468), 'os.path.join', 'osp.join', (['respth', 'image_path'], {}), '(respth, image_path)\n', (4448, 4468), True, 'import os.path as osp\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 17:26:11 2019
@author: samghosal
"""
from __future__ import division
"""----------------------------------------------------------------------------------------------
README: Simple Python Code for Testing and evaluating the trained CNN model on MNIST test data
----------------------------------------------------------------------------------------------
For each section of the code, there are Headings depicting what the subsequent lines of code
do.
Evaluation Metrics: Test Loss, Test Accuracy, Confusion Matrix, Precision, Recall and F1-Score
Test Accuracy achieved: 99.23%.
PLease check README within the training code - 'mnist_train.py' for other details.
"""
###################
# IMPORT PACKAGES #
###################
import numpy as np
import gzip
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
from keras.utils import to_categorical
# from keras.layers.normalization import BatchNormalization
from keras import backend as K
from model import load_model
K.tensorflow_backend._get_available_gpus()
config = tf.ConfigProto(device_count = {'GPU': 1})
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
np.random.seed(1337) # set seed for reproducability
###########################################
# LOADING THE TEST DATA and PREPROCESSING #
###########################################
# write files to directory
filename3 = "MNIST/t10k-images-idx3-ubyte.gz"
filename4 = "MNIST/t10k-labels-idx1-ubyte.gz"
# image parameters
img_size = 784 # size (vectorized sample)
# input image dimensions
img_rows, img_cols = 28, 28 # information obtained from metadata
with gzip.open(filename3, 'rb') as f:
test_img = np.frombuffer(f.read(), np.uint8, offset=16)
test_img = test_img.reshape(-1, img_size)
f.close()
with gzip.open(filename4, 'rb') as f:
test_lab = np.frombuffer(f.read(), np.uint8, offset=8)
f.close()
X_test = test_img
Y_test_pre = test_lab
num_classes = 10
# Transform to Categorical Variables (One-Hot Encode)
Y_test = to_categorical(Y_test_pre, num_classes=num_classes)
# Reshape and Normalize the Image data
if K.image_data_format() == 'channels_first':
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# Normalize the data and cast to float32
X_test = X_test/255
X_test = X_test.astype('float32')
print('Test Data shape...................')
print('Test data shape:', X_test.shape)
##########################
# Load Model and weights #
##########################
model = load_model(num_classes, True)
#####################################
# Evaluating loaded model & weights #
#####################################
# Test Data Scores
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# Getting Test Predictions
y_test_predictions = model.predict(x=X_test)
y_test_class = np.argmax(Y_test, axis=1)
y_pred_class = np.argmax(y_test_predictions, axis=1)
# Evaluation Metrics
from sklearn.metrics import confusion_matrix, classification_report
import itertools
# Print Classification Report
print('Printing Classification Report (per class Precision, Recall and F1-Score): ')
print(classification_report(Y_test_pre, y_pred_class))
# Plot Confusion Matrix
cm = confusion_matrix(Y_test_pre, y_pred_class)
cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]
print('Printing Confusion Matrix: ')
print(cm)
plt.figure()
plt.imshow(cm, cmap = 'plasma')
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, np.arange(num_classes), rotation=45)
plt.yticks(tick_marks, np.arange(num_classes))
fmt = '.1f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), fontsize=6,
horizontalalignment="center",
color="red" if cm[i, j] > thresh else "white")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('results/cm.png')
| [
"matplotlib.pyplot.ylabel",
"gzip.open",
"sklearn.metrics.classification_report",
"keras.utils.to_categorical",
"numpy.arange",
"matplotlib.pyplot.imshow",
"keras.backend.image_data_format",
"tensorflow.Session",
"matplotlib.pyplot.xlabel",
"numpy.random.seed",
"tensorflow.ConfigProto",
"sklea... | [((1102, 1144), 'keras.backend.tensorflow_backend._get_available_gpus', 'K.tensorflow_backend._get_available_gpus', ([], {}), '()\n', (1142, 1144), True, 'from keras import backend as K\n'), ((1154, 1193), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 1}"}), "(device_count={'GPU': 1})\n", (1168, 1193), True, 'import tensorflow as tf\n'), ((1243, 1268), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1253, 1268), True, 'import tensorflow as tf\n'), ((1270, 1301), 'keras.backend.set_session', 'keras.backend.set_session', (['sess'], {}), '(sess)\n', (1295, 1301), False, 'import keras\n'), ((1304, 1324), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (1318, 1324), True, 'import numpy as np\n'), ((2164, 2215), 'keras.utils.to_categorical', 'to_categorical', (['Y_test_pre'], {'num_classes': 'num_classes'}), '(Y_test_pre, num_classes=num_classes)\n', (2178, 2215), False, 'from keras.utils import to_categorical\n'), ((2804, 2833), 'model.load_model', 'load_model', (['num_classes', '(True)'], {}), '(num_classes, True)\n', (2814, 2833), False, 'from model import load_model\n'), ((3171, 3196), 'numpy.argmax', 'np.argmax', (['Y_test'], {'axis': '(1)'}), '(Y_test, axis=1)\n', (3180, 3196), True, 'import numpy as np\n'), ((3212, 3249), 'numpy.argmax', 'np.argmax', (['y_test_predictions'], {'axis': '(1)'}), '(y_test_predictions, axis=1)\n', (3221, 3249), True, 'import numpy as np\n'), ((3558, 3600), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_test_pre', 'y_pred_class'], {}), '(Y_test_pre, y_pred_class)\n', (3574, 3600), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((3708, 3720), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3718, 3720), True, 'import matplotlib.pyplot as plt\n'), ((3721, 3750), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'cmap': '"""plasma"""'}), "(cm, cmap='plasma')\n", (3731, 3750), True, 'import matplotlib.pyplot as plt\n'), ((3753, 3782), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion matrix"""'], {}), "('Confusion matrix')\n", (3762, 3782), True, 'import matplotlib.pyplot as plt\n'), ((3783, 3797), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3795, 3797), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3833), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (3820, 3833), True, 'import numpy as np\n'), ((4206, 4224), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4222, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4249), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (4235, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4279), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (4260, 4279), True, 'import matplotlib.pyplot as plt\n'), ((4280, 4309), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/cm.png"""'], {}), "('results/cm.png')\n", (4291, 4309), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1798), 'gzip.open', 'gzip.open', (['filename3', '"""rb"""'], {}), "(filename3, 'rb')\n", (1781, 1798), False, 'import gzip\n'), ((1935, 1961), 'gzip.open', 'gzip.open', (['filename4', '"""rb"""'], {}), "(filename4, 'rb')\n", (1944, 1961), False, 'import gzip\n'), ((2259, 2280), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (2278, 2280), True, 'from keras import backend as K\n'), ((3479, 3526), 'sklearn.metrics.classification_report', 'classification_report', (['Y_test_pre', 'y_pred_class'], {}), '(Y_test_pre, y_pred_class)\n', (3500, 3526), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((3857, 3879), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (3866, 3879), True, 'import numpy as np\n'), ((3917, 3939), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (3926, 3939), True, 'import numpy as np\n')] |
from portfolio import Portfolio, PM
import datetime as dt
from collections import OrderedDict
import utility
import copy
import numpy as np
class Backtester:
def __init__(self, universeObj, start=None, end=None):
if start is None:
start = universeObj.dateRange[0]
if end is None:
end = universeObj.dateRange[1]
self.universe = universeObj
# Can still be None, None, which will be checked later
self.dateRange = [start, end]
if self.dateRange[0] is None or self.dateRange[1] is None:
start, end = PM.getPortfolioDateRange(universeObj.assets)
if self.dateRange[0] is None:
self.dateRange[0] = start
if self.dateRange[1] is None:
self.dateRange[1] = end
self.dateRange = tuple(copy.deepcopy(self.dateRange))
def load(self):
self.universe.make()
self.originals = copy.deepcopy(self.universe.data)
def reduce(self, frame):
return frame.loc[(frame.index >= self.visibility[0]) & (frame.index <= self.visibility[1])]
def reduceVisibility(self, dateRange):
self.visibility = (np.datetime64(dateRange[0]), np.datetime64(dateRange[1]))
for key in self.universe.data.keys():
self.universe.data[key] = self.reduce(self.originals[key].copy())
def backtest(self, strategy, frequency, daysBeforeVisibility=360, inverse=False):
"""
Strategy function is a function that receives universeObj, start and end and should return
assets, allocation, direction vectors
frequency is the jump
daysBeforeVisibility is the start and currentDay the end of which the strategy function should see
"""
history = OrderedDict()
firstDayData = self.dateRange[0] - dt.timedelta(days=daysBeforeVisibility)
self.universe.dateRange = (firstDayData, self.dateRange[1])
print("Loading universe data from {0} to {1}.".format(self.universe.dateRange[0], self.universe.dateRange[1]))
self.load()
print("Backtesting from {0} to {1}".format(self.dateRange[0], self.dateRange[1]))
prevPi = None
kwdict = {}
currentDay = self.dateRange[0]
while currentDay < self.dateRange[1]:
startVisibility = currentDay - dt.timedelta(days=daysBeforeVisibility)
self.reduceVisibility((startVisibility, currentDay))
print("")
print('[BACKTESTER]: Running strategy with data from: \n {0} to {1}'.format(startVisibility, currentDay))
assets, allocation, direction, kwdict = strategy(self.universe, kwdict)
if inverse:
direction *= -1.0
nextRun = min(utility.next_weekday(currentDay + dt.timedelta(days=frequency)), self.dateRange[1])
print('[BACKTESTER]: Checking Portfolio from: \n {0} to {1}'.format(currentDay, nextRun))
print("")
pi = Portfolio(assets, allocation, direction, self.universe.riskFree)
if prevPi is None:
pi.setDateRange(currentDay, nextRun)
else:
pi.setDateRange(prevPi.pReturn.index.max(), nextRun)
pi.factors = self.universe.factors
pi.make()
pi.summarize()
history[currentDay] = {'assets': assets,
'n_assets': len(assets),
'allocation': allocation,
'direction': direction,
'pi': pi}
currentDay = nextRun
prevPi = pi
return history
| [
"collections.OrderedDict",
"portfolio.Portfolio",
"copy.deepcopy",
"portfolio.PM.getPortfolioDateRange",
"numpy.datetime64",
"datetime.timedelta"
] | [((937, 970), 'copy.deepcopy', 'copy.deepcopy', (['self.universe.data'], {}), '(self.universe.data)\n', (950, 970), False, 'import copy\n'), ((1771, 1784), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1782, 1784), False, 'from collections import OrderedDict\n'), ((588, 632), 'portfolio.PM.getPortfolioDateRange', 'PM.getPortfolioDateRange', (['universeObj.assets'], {}), '(universeObj.assets)\n', (612, 632), False, 'from portfolio import Portfolio, PM\n'), ((830, 859), 'copy.deepcopy', 'copy.deepcopy', (['self.dateRange'], {}), '(self.dateRange)\n', (843, 859), False, 'import copy\n'), ((1173, 1200), 'numpy.datetime64', 'np.datetime64', (['dateRange[0]'], {}), '(dateRange[0])\n', (1186, 1200), True, 'import numpy as np\n'), ((1202, 1229), 'numpy.datetime64', 'np.datetime64', (['dateRange[1]'], {}), '(dateRange[1])\n', (1215, 1229), True, 'import numpy as np\n'), ((1828, 1867), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'daysBeforeVisibility'}), '(days=daysBeforeVisibility)\n', (1840, 1867), True, 'import datetime as dt\n'), ((2980, 3044), 'portfolio.Portfolio', 'Portfolio', (['assets', 'allocation', 'direction', 'self.universe.riskFree'], {}), '(assets, allocation, direction, self.universe.riskFree)\n', (2989, 3044), False, 'from portfolio import Portfolio, PM\n'), ((2337, 2376), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'daysBeforeVisibility'}), '(days=daysBeforeVisibility)\n', (2349, 2376), True, 'import datetime as dt\n'), ((2787, 2815), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'frequency'}), '(days=frequency)\n', (2799, 2815), True, 'import datetime as dt\n')] |
# Graphics for Exploratory Analysis Script
# ==============================================================================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ^^^ pyforest auto-imports - don't write above this line
# ==============================================================================
# Auto Import Dependencies
# ==============================================================================
# pyforest imports dependencies according to use in the notebook
# ==============================================================================
# Dependencies not Included in Auto Import*
# ==============================================================================
import matplotlib.ticker as ticker
# Disribution of Target Variable
# ==============================================================================
def Target_Distribution(df, target):
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(15, 10))
sns.distplot(
df[target],
hist = False,
rug = True,
color = "navy",
kde_kws = {'shade': True, 'linewidth': 1},
ax = axes[0]
)
axes[0].set_title("Original layout", fontsize = 'medium')
axes[0].set_xlabel(f'{target}', fontsize='small')
axes[0].tick_params(labelsize = 6)
sns.distplot(
np.sqrt(df[target]),
hist = False,
rug = True,
color = "purple",
kde_kws = {'shade': True, 'linewidth': 1},
ax = axes[1]
)
axes[1].set_title("Square root transformation", fontsize = 'medium')
axes[1].set_xlabel(f'sqrt(var)', fontsize='small')
axes[1].tick_params(labelsize = 6)
"""sns.distplot(
np.log(df[target]),
hist = False,
rug = True,
color = "coral",
kde_kws = {'shade': True, 'linewidth': 1},
ax = axes[2]
)
axes[2].set_title("Logarithmic transformation", fontsize = 'medium')
axes[2].set_xlabel(f'log({target})', fontsize='small')
axes[2].tick_params(labelsize = 6)
"""
fig.suptitle(f'Distribution of the {target} Variable', fontsize = 30, fontweight = "bold")
fig.tight_layout()
plt.savefig(f"../reports/figures/{target}_Distribution_Variable.png")
# Distribution graph for each numerical variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Numerical_Distribution(df, var, name, cols, rows):
fig, axes = plt.subplots(ncols=cols, nrows=rows, figsize=(cols*5, rows*5))
axes = axes.flat
columnas_numeric = df.select_dtypes(include=['float64', 'int']).columns
columnas_numeric = columnas_numeric.drop(f'{var}')
for i, colum in enumerate(columnas_numeric):
sns.histplot(
data = df,
x = colum,
stat = "count",
kde = True,
color = (list(plt.rcParams['axes.prop_cycle'])*2)[i]["color"],
line_kws= {'linewidth': 2},
alpha = 0.3,
ax = axes[i]
)
axes[i].set_title(colum, fontsize = 16, fontweight = "bold")
axes[i].tick_params(labelsize = 16)
axes[i].set_xlabel("")
fig.tight_layout()
plt.subplots_adjust(top = 0.9)
fig.suptitle(f'Distribution Numerical Variable {name}' , fontsize = cols*4, fontweight = "bold")
plt.savefig(f'../reports/figures/Distribution_Numerical_Variable_{name}.png')
# Correlation & Distribution graph for each numerical variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Numerical_Correlation(df, target, drop ,cols, rows):
fig, axes = plt.subplots(ncols=cols, nrows=rows, figsize=(cols*5, rows*5))
axes = axes.flat
columnas_numeric = df.select_dtypes(include=['float64', 'int']).columns
columnas_numeric = columnas_numeric.drop(drop)
for i, colum in enumerate(columnas_numeric):
sns.regplot(
x = df[colum],
y = df[target],
color = "navy",
marker = '.',
scatter_kws = {"alpha":0.4},
line_kws = {"color":"r","alpha":0.7},
ax = axes[i]
)
axes[i].set_title(f"{target} vs {colum}", fontsize = 16, fontweight = "bold")
#axes[i].ticklabel_format(style='sci', scilimits=(-4,4), axis='both')
axes[i].yaxis.set_major_formatter(ticker.EngFormatter())
axes[i].xaxis.set_major_formatter(ticker.EngFormatter())
axes[i].tick_params(labelsize = 16)
axes[i].set_xlabel("")
axes[i].set_ylabel("")
#if (i-1 >= len(columnas_numeric)-1): break
# Empty axes are removed
"""for i in [8]:
fig.delaxes(axes[i])"""
fig.tight_layout()
plt.subplots_adjust(top=0.9)
fig.suptitle(f'Correlation with {target}', fontsize = cols*4, fontweight = "bold")
plt.savefig(f"../reports/figures/Correlation_Each_Numerical_Variable_with_{target}.png")
# Correlation between numeric columns
# ==============================================================================
def tidy_corr_matrix(df):
# Function to convert a pandas correlation matrix to tidy format
#df.drop(drop)
corr_mat = df.select_dtypes(include=['float64', 'int']).corr(method='pearson')
corr_mat = corr_mat.stack().reset_index()
corr_mat.columns = ['variable_1','variable_2','r']
corr_mat = corr_mat.loc[corr_mat['variable_1'] != corr_mat['variable_2'], :]
corr_mat['abs_r'] = np.abs(corr_mat['r'])
corr_mat = corr_mat.sort_values('abs_r', ascending=False)
return corr_mat
# Heatmap matrix of correlations
# ==============================================================================
def heat_map(df, name):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
#df.drop(drop)
corr= df.select_dtypes(include=['float64', 'int']).corr(method='pearson').corr()
# Getting the Upper Triangle of the co-relation matrix
matrix = np.triu(corr)
# using the upper triangle matrix as mask
sns.heatmap(corr,
annot=True,
mask=matrix,
cmap=sns.diverging_palette(150, 275, s=80, l=55, n=9),
annot_kws = {"size": 10})
ax.set_xticklabels(
ax.get_xticklabels(),
rotation = 45,
horizontalalignment = 'right',
)
ax.set_yticklabels(
ax.get_yticklabels(),
rotation = 0,
horizontalalignment = 'right',
)
ax.tick_params(labelsize = 15)
fig.suptitle(f'Heatmap Correlation Matrix {name}', fontsize = 30, fontweight = "bold")
plt.savefig(f"../reports/figures/Heatmap_Matrix_Correlations_{name}.png")
# Graph for each qualitative variable
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Qualitative_Distribution(df, name, rows, cols):
fig, axes = plt.subplots(nrows=rows, ncols=cols, figsize=(rows*10, rows*50))
axes = axes.flat
columnas_object = df.select_dtypes(include=['object']).columns
for i, colum in enumerate(columnas_object):
df[colum].value_counts().plot.barh(ax = axes[i])
axes[i].set_title(colum, fontsize = 16, fontweight = "bold")
axes[i].tick_params(labelsize = 11)
axes[i].set_xlabel("")
# Empty axes are removed
#for i in [7, 8]:
#fig.delaxes(axes[i])
fig.tight_layout()
plt.subplots_adjust(top=0.9)
fig.suptitle(f'Qualitative variable distribution {name}',
fontsize = 30, fontweight = "bold")
plt.savefig(f"../reports/figures/Each_Qualtitative_Variable_{name}.png")
# Graph relationship between the Target and each qualitative variables
# ==============================================================================
# Adjust number of subplots based on the number of columns
def Qualitative_Relationship(df, var, rows, cols):
fig, axes = plt.subplots(nrows=rows, ncols=cols, figsize=(100, 60))
axes = axes.flat
columnas_object = df.select_dtypes(include=['object']).columns
for i, colum in enumerate(columnas_object):
sns.violinplot(
x = colum,
y = var,
data = df,
color = "coral",
ax = axes[i]
)
axes[i].set_title(f"{colum} vs {var}", fontsize = 30, fontweight = "bold")
axes[i].yaxis.set_major_formatter(ticker.EngFormatter())
axes[i].tick_params(labelsize = 22)
axes[i].set_xticklabels(axes[i].get_xticklabels(),rotation = 45, horizontalalignment = 'right')
axes[i].set_xlabel("")
axes[i].set_ylabel("")
# Empty axes are removed
#for i in [7, 8]:
#fig.delaxes(axes[i])
fig.tight_layout()
plt.subplots_adjust(top=0.9)
fig.suptitle(f'{var} distribution by group', fontsize = 60, fontweight = "bold")
plt.savefig(f"../reports/figures/Target_vs_Qualitative_Variable_{var}.png")
# Graph adjusted intertia BestK for KMeans
# ==============================================================================
def inertia(results):
# plot the results
plt.figure(figsize=(14,8))
plt.plot(results,'-o')
plt.title('Adjusted Inertia for each K')
plt.xlabel('K')
plt.ylabel('Adjusted Inertia')
plt.xticks(range(2,len(results),1))
plt.savefig("../../reports/figures/BestK_for_KMeans.png");
# Graph PCA
# ==============================================================================
def pca(pca):
PC = range(1, pca.n_components_+1)
plt.figure(figsize=(12,6))
plt.bar(PC, pca.explained_variance_ratio_, color=('navy','b','g','r','coral','c','m','y','k','gray'))
plt.xlabel('Principal Components')
plt.ylabel('Variance %')
plt.title('Principal Components Variance')
plt.xticks(PC);
plt.savefig(f"../../reports/figures/Principal_Components{pca.n_components}.png"); | [
"numpy.abs",
"seaborn.regplot",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.xticks",
"seaborn.distplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"seaborn.diverging_palette",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"seab... | [((949, 997), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(1)', 'figsize': '(15, 10)'}), '(nrows=3, ncols=1, figsize=(15, 10))\n', (961, 997), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1120), 'seaborn.distplot', 'sns.distplot', (['df[target]'], {'hist': '(False)', 'rug': '(True)', 'color': '"""navy"""', 'kde_kws': "{'shade': True, 'linewidth': 1}", 'ax': 'axes[0]'}), "(df[target], hist=False, rug=True, color='navy', kde_kws={\n 'shade': True, 'linewidth': 1}, ax=axes[0])\n", (1014, 1120), True, 'import seaborn as sns\n'), ((2233, 2302), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../reports/figures/{target}_Distribution_Variable.png"""'], {}), "(f'../reports/figures/{target}_Distribution_Variable.png')\n", (2244, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2633), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'cols', 'nrows': 'rows', 'figsize': '(cols * 5, rows * 5)'}), '(ncols=cols, nrows=rows, figsize=(cols * 5, rows * 5))\n', (2579, 2633), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3361), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (3352, 3361), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3546), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../reports/figures/Distribution_Numerical_Variable_{name}.png"""'], {}), "(f'../reports/figures/Distribution_Numerical_Variable_{name}.png')\n", (3480, 3546), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3892), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'cols', 'nrows': 'rows', 'figsize': '(cols * 5, rows * 5)'}), '(ncols=cols, nrows=rows, figsize=(cols * 5, rows * 5))\n', (3838, 3892), True, 'import matplotlib.pyplot as plt\n'), ((4959, 4987), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (4978, 4987), True, 'import matplotlib.pyplot as plt\n'), ((5079, 5177), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../reports/figures/Correlation_Each_Numerical_Variable_with_{target}.png"""'], {}), "(\n f'../reports/figures/Correlation_Each_Numerical_Variable_with_{target}.png'\n )\n", (5090, 5177), True, 'import matplotlib.pyplot as plt\n'), ((5698, 5719), 'numpy.abs', 'np.abs', (["corr_mat['r']"], {}), "(corr_mat['r'])\n", (5704, 5719), True, 'import numpy as np\n'), ((5962, 6010), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(15, 15)'}), '(nrows=1, ncols=1, figsize=(15, 15))\n', (5974, 6010), True, 'import matplotlib.pyplot as plt\n'), ((6199, 6212), 'numpy.triu', 'np.triu', (['corr'], {}), '(corr)\n', (6206, 6212), True, 'import numpy as np\n'), ((6803, 6876), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../reports/figures/Heatmap_Matrix_Correlations_{name}.png"""'], {}), "(f'../reports/figures/Heatmap_Matrix_Correlations_{name}.png')\n", (6814, 6876), True, 'import matplotlib.pyplot as plt\n'), ((7131, 7199), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'rows', 'ncols': 'cols', 'figsize': '(rows * 10, rows * 50)'}), '(nrows=rows, ncols=cols, figsize=(rows * 10, rows * 50))\n', (7143, 7199), True, 'import matplotlib.pyplot as plt\n'), ((7648, 7676), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (7667, 7676), True, 'import matplotlib.pyplot as plt\n'), ((7792, 7864), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../reports/figures/Each_Qualtitative_Variable_{name}.png"""'], {}), "(f'../reports/figures/Each_Qualtitative_Variable_{name}.png')\n", (7803, 7864), True, 'import matplotlib.pyplot as plt\n'), ((8151, 8206), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'rows', 'ncols': 'cols', 'figsize': '(100, 60)'}), '(nrows=rows, ncols=cols, figsize=(100, 60))\n', (8163, 8206), True, 'import matplotlib.pyplot as plt\n'), ((8983, 9011), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (9002, 9011), True, 'import matplotlib.pyplot as plt\n'), ((9101, 9176), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../reports/figures/Target_vs_Qualitative_Variable_{var}.png"""'], {}), "(f'../reports/figures/Target_vs_Qualitative_Variable_{var}.png')\n", (9112, 9176), True, 'import matplotlib.pyplot as plt\n'), ((9357, 9384), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)'}), '(figsize=(14, 8))\n', (9367, 9384), True, 'import matplotlib.pyplot as plt\n'), ((9388, 9411), 'matplotlib.pyplot.plot', 'plt.plot', (['results', '"""-o"""'], {}), "(results, '-o')\n", (9396, 9411), True, 'import matplotlib.pyplot as plt\n'), ((9415, 9455), 'matplotlib.pyplot.title', 'plt.title', (['"""Adjusted Inertia for each K"""'], {}), "('Adjusted Inertia for each K')\n", (9424, 9455), True, 'import matplotlib.pyplot as plt\n'), ((9460, 9475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""K"""'], {}), "('K')\n", (9470, 9475), True, 'import matplotlib.pyplot as plt\n'), ((9480, 9510), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Adjusted Inertia"""'], {}), "('Adjusted Inertia')\n", (9490, 9510), True, 'import matplotlib.pyplot as plt\n'), ((9555, 9612), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../../reports/figures/BestK_for_KMeans.png"""'], {}), "('../../reports/figures/BestK_for_KMeans.png')\n", (9566, 9612), True, 'import matplotlib.pyplot as plt\n'), ((9774, 9801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (9784, 9801), True, 'import matplotlib.pyplot as plt\n'), ((9805, 9919), 'matplotlib.pyplot.bar', 'plt.bar', (['PC', 'pca.explained_variance_ratio_'], {'color': "('navy', 'b', 'g', 'r', 'coral', 'c', 'm', 'y', 'k', 'gray')"}), "(PC, pca.explained_variance_ratio_, color=('navy', 'b', 'g', 'r',\n 'coral', 'c', 'm', 'y', 'k', 'gray'))\n", (9812, 9919), True, 'import matplotlib.pyplot as plt\n'), ((9911, 9945), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Principal Components"""'], {}), "('Principal Components')\n", (9921, 9945), True, 'import matplotlib.pyplot as plt\n'), ((9950, 9974), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Variance %"""'], {}), "('Variance %')\n", (9960, 9974), True, 'import matplotlib.pyplot as plt\n'), ((9979, 10021), 'matplotlib.pyplot.title', 'plt.title', (['"""Principal Components Variance"""'], {}), "('Principal Components Variance')\n", (9988, 10021), True, 'import matplotlib.pyplot as plt\n'), ((10026, 10040), 'matplotlib.pyplot.xticks', 'plt.xticks', (['PC'], {}), '(PC)\n', (10036, 10040), True, 'import matplotlib.pyplot as plt\n'), ((10046, 10131), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../../reports/figures/Principal_Components{pca.n_components}.png"""'], {}), "(f'../../reports/figures/Principal_Components{pca.n_components}.png'\n )\n", (10057, 10131), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1396), 'numpy.sqrt', 'np.sqrt', (['df[target]'], {}), '(df[target])\n', (1384, 1396), True, 'import numpy as np\n'), ((4096, 4248), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'df[colum]', 'y': 'df[target]', 'color': '"""navy"""', 'marker': '"""."""', 'scatter_kws': "{'alpha': 0.4}", 'line_kws': "{'color': 'r', 'alpha': 0.7}", 'ax': 'axes[i]'}), "(x=df[colum], y=df[target], color='navy', marker='.',\n scatter_kws={'alpha': 0.4}, line_kws={'color': 'r', 'alpha': 0.7}, ax=\n axes[i])\n", (4107, 4248), True, 'import seaborn as sns\n'), ((8352, 8418), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': 'colum', 'y': 'var', 'data': 'df', 'color': '"""coral"""', 'ax': 'axes[i]'}), "(x=colum, y=var, data=df, color='coral', ax=axes[i])\n", (8366, 8418), True, 'import seaborn as sns\n'), ((4593, 4614), 'matplotlib.ticker.EngFormatter', 'ticker.EngFormatter', ([], {}), '()\n', (4612, 4614), True, 'import matplotlib.ticker as ticker\n'), ((4658, 4679), 'matplotlib.ticker.EngFormatter', 'ticker.EngFormatter', ([], {}), '()\n', (4677, 4679), True, 'import matplotlib.ticker as ticker\n'), ((6352, 6400), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(150)', '(275)'], {'s': '(80)', 'l': '(55)', 'n': '(9)'}), '(150, 275, s=80, l=55, n=9)\n', (6373, 6400), True, 'import seaborn as sns\n'), ((8636, 8657), 'matplotlib.ticker.EngFormatter', 'ticker.EngFormatter', ([], {}), '()\n', (8655, 8657), True, 'import matplotlib.ticker as ticker\n')] |
import os
import csv
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import cv2
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout
import pickle
from keras.callbacks import TensorBoard, ModelCheckpoint
BASE_DIR = '/home/workspace/CarND-Behavioral-Cloning-P3/img'
def get_data(filename):
samples = []
with open(os.path.join(BASE_DIR, filename)) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
samples = samples[1:]
print('The dataset is {} records'.format(len(samples)))
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
return train_samples, validation_samples
def generator(samples, batch_size=32):
num_samples = len(samples)
while True:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
name = BASE_DIR + '/IMG/'+ batch_sample[0].split('/')[-1]
image = cv2.imread(name)
angle = float(batch_sample[1])
augmented_image = cv2.flip(image, 1)
augmented_angle = angle*-1.0
images.append(image)
angles.append(angle)
images.append(augmented_image)
angles.append(augmented_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
def train(train_samples, validation_samples, callbacks_list):
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5,input_shape=(160,320,3) ))
model.add(Cropping2D(cropping=((70,25), (0,0))))
model.add(Conv2D(24, (5, 5), subsample=(2,2), activation='relu'))
model.add(Conv2D(36,(5, 5), subsample=(2,2), activation='relu'))
model.add(Conv2D(48,(5, 5), subsample=(2,2), activation='relu'))
model.add(Conv2D(64,(3, 3), activation='relu'))
model.add(Conv2D(64,(3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.2))
model.add(Dense(50))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
history = model.fit_generator(train_generator,
steps_per_epoch=len(train_samples),
validation_data=validation_generator,
validation_steps=len(validation_samples),
epochs=5, verbose=1, callbacks=callbacks_list)
with open('./history.pickle', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
if __name__ == '__main__':
filename = 'driving.csv'
train_samples, validation_samples = get_data(filename)
keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0,
write_graph=True, write_images=True)
callback_list = [ModelCheckpoint(filepath='model_final.h5',
monitor='val_loss',
save_best_only=True),
TensorBoard(log_dir='./logs', histogram_freq=0,
write_graph=True, write_images=False)]
train(train_samples, validation_samples, callback_list)
| [
"keras.layers.Conv2D",
"pickle.dump",
"keras.layers.Flatten",
"keras.callbacks.ModelCheckpoint",
"cv2.flip",
"sklearn.model_selection.train_test_split",
"sklearn.utils.shuffle",
"keras.layers.Lambda",
"os.path.join",
"keras.models.Sequential",
"keras.callbacks.TensorBoard",
"numpy.array",
"k... | [((735, 775), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (751, 775), False, 'from sklearn.model_selection import train_test_split\n'), ((1974, 1986), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1984, 1986), False, 'from keras.models import Sequential\n'), ((517, 536), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (527, 536), False, 'import csv\n'), ((924, 940), 'sklearn.utils.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (931, 940), False, 'from sklearn.utils import shuffle\n'), ((2001, 2061), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (2007, 2061), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2077, 2116), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (2087, 2116), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2130, 2185), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(24, (5, 5), subsample=(2, 2), activation='relu')\n", (2136, 2185), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2200, 2255), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(36, (5, 5), subsample=(2, 2), activation='relu')\n", (2206, 2255), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2269, 2324), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5, 5)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(48, (5, 5), subsample=(2, 2), activation='relu')\n", (2275, 2324), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2338, 2375), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2344, 2375), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2390, 2427), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2396, 2427), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2442, 2451), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2449, 2451), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2467, 2477), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (2472, 2477), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2493, 2505), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2500, 2505), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2521, 2530), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (2526, 2530), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2546, 2558), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2553, 2558), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2574, 2583), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (2579, 2583), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2599, 2611), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2606, 2611), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((2627, 2635), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2632, 2635), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPooling2D, Cropping2D, Dropout\n'), ((3062, 3099), 'pickle.dump', 'pickle.dump', (['history.history', 'file_pi'], {}), '(history.history, file_pi)\n', (3073, 3099), False, 'import pickle\n'), ((3364, 3451), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""model_final.h5"""', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(filepath='model_final.h5', monitor='val_loss',\n save_best_only=True)\n", (3379, 3451), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint\n'), ((3539, 3628), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""./logs"""', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(False)'}), "(log_dir='./logs', histogram_freq=0, write_graph=True,\n write_images=False)\n", (3550, 3628), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint\n'), ((454, 486), 'os.path.join', 'os.path.join', (['BASE_DIR', 'filename'], {}), '(BASE_DIR, filename)\n', (466, 486), False, 'import os\n'), ((1635, 1651), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1643, 1651), True, 'import numpy as np\n'), ((1674, 1690), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (1682, 1690), True, 'import numpy as np\n'), ((1266, 1282), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (1276, 1282), False, 'import cv2\n'), ((1364, 1382), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (1372, 1382), False, 'import cv2\n'), ((1722, 1747), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1729, 1747), False, 'from sklearn.utils import shuffle\n')] |
from numpy import genfromtxt
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
''' ResNet-56 '''
train_error_52 = './epoch_error_train_52.csv'
train_error_52 = genfromtxt(train_error_52, delimiter=',')
valid_error_52 = './epoch_error_valid_52.csv'
valid_error_52 = genfromtxt(valid_error_52, delimiter=',')
train_error_53 = './epoch_error_train_53.csv'
train_error_53 = genfromtxt(train_error_53, delimiter=',')
valid_error_53 = './epoch_error_valid_53.csv'
valid_error_53 = genfromtxt(valid_error_53, delimiter=',')
# resnet56 (model 52)
# Training time: 127m 41s, 281 epochs
#
# Best [Valid] | epoch: 221 - loss: 0.3129 - acc: 0.9396
# [Test] loss 0.3042 - acc: 0.9356 - acc_topk: 0.9795
# resnet56 alpha (model 53)
# Training time: 134m 44s, 303 epochs
#
# Best [Valid] | epoch: 243 - loss: 0.2934 - acc: 0.9369
# [Test] loss 0.3062 - acc: 0.9345 - acc_topk: 0.9788
fig = plt.figure()
plt.xlabel('epoch', fontsize=14)
plt.ylabel('error (%)', fontsize=14)
plt.title('ResNet-56', fontsize=16)
plt.plot(train_error_53[1:,1], 100*train_error_53[1:,2], label='train (ReZero)', zorder=1)#, linewidth=1.5)
plt.plot(valid_error_53[1:,1], 100*valid_error_53[1:,2], label='valid (ReZero)', zorder=2)#, linewidth=1.5)
plt.plot(train_error_52[1:,1], 100*train_error_52[1:,2], 'c', label='train', zorder=3)#, linewidth=1.5)
plt.plot(valid_error_52[1:,1], 100*valid_error_52[1:,2], 'm', label='valid', zorder=4)#, linewidth=1.5)
plt.ticklabel_format(axis='y', style='sci')
plt.grid(True)
plt.legend(loc='upper right', fontsize='x-large') # upper right, lower right, lower left
# plt.ylim(5, 80)
# plt.xlim(1, 30)
# plt.savefig('resnet56_error_0_30.png', box_inches='tight')
plt.ylim(-2, 35)
plt.xlim(0, 250)
plt.savefig('resnet56_error.png', box_inches='tight')
plt.show()
train_loss_52 = './epoch_loss_train_52.csv'
train_loss_52 = genfromtxt(train_loss_52, delimiter=',')
valid_loss_52 = './epoch_loss_valid_52.csv'
valid_loss_52 = genfromtxt(valid_loss_52, delimiter=',')
train_loss_53 = './epoch_loss_train_53.csv'
train_loss_53 = genfromtxt(train_loss_53, delimiter=',')
valid_loss_53 = './epoch_loss_valid_53.csv'
valid_loss_53 = genfromtxt(valid_loss_53, delimiter=',')
fig = plt.figure()
plt.xlabel('epoch', fontsize=14)
plt.ylabel('loss', fontsize=14)
plt.title('ResNet-56', fontsize=16)
plt.plot(train_loss_53[1:,1], train_loss_53[1:,2], label='train (ReZero)', zorder=1)#, linewidth=1.5)
plt.plot(valid_loss_53[1:,1], valid_loss_53[1:,2], label='valid (ReZero)', zorder=2)#, linewidth=1.5)
plt.plot(train_loss_52[1:,1], train_loss_52[1:,2], 'c', label='train', zorder=3)#, linewidth=1.5)
plt.plot(valid_loss_52[1:,1], valid_loss_52[1:,2], 'm', label='valid', zorder=4)#, linewidth=1.5)
plt.ticklabel_format(axis='y', style='sci')
plt.grid(True)
plt.legend(loc='upper right', fontsize='x-large') # upper right, lower right, lower left
plt.ylim(.2, 2.5)
plt.xlim(1, 30)
plt.savefig('resnet56_loss_0_30.png', box_inches='tight')
# plt.ylim(-.1, 1.2)
# plt.xlim(0, 250)
# plt.savefig('resnet56_loss.png', box_inches='tight')
plt.show()
''' ResNet-20 '''
train_error_54 = './epoch_error_train_54.csv'
train_error_54 = genfromtxt(train_error_54, delimiter=',')
valid_error_54 = './epoch_error_valid_54.csv'
valid_error_54 = genfromtxt(valid_error_54, delimiter=',')
train_error_55 = './epoch_error_train_55.csv'
train_error_55 = genfromtxt(train_error_55, delimiter=',')
valid_error_55 = './epoch_error_valid_55.csv'
valid_error_55 = genfromtxt(valid_error_55, delimiter=',')
# resnet-20 alpha (model 54)
# Training time: 63m 9s, 327
#
# Best [Valid] | epoch: 267 - loss: 0.3237 - acc: 0.9256
# [Test] loss 0.3491 - acc: 0.9206 - acc_topk: 0.9723
# resnet-20 (model 55)
# Training time: 70m 3s, 398
#
# Best [Valid] | epoch: 338 - loss: 0.3026 - acc: 0.9237
# [Test] loss 0.3055 - acc: 0.9202 - acc_topk: 0.9742
fig = plt.figure()
plt.xlabel('epoch', fontsize=14)
plt.ylabel('error (%)', fontsize=14)
plt.title('ResNet-20', fontsize=16)
plt.plot(train_error_54[1:,1], 100*train_error_54[1:,2], label='train (ReZero)', zorder=1)#, linewidth=1.5)
plt.plot(valid_error_54[1:,1], 100*valid_error_54[1:,2], label='valid (ReZero)', zorder=2)#, linewidth=1.5)
plt.plot(train_error_55[1:,1], 100*train_error_55[1:,2], 'c', label='train', zorder=3)#, linewidth=1.5)
plt.plot(valid_error_55[1:,1], 100*valid_error_55[1:,2], 'm', label='valid', zorder=4)#, linewidth=1.5)
plt.ticklabel_format(axis='y', style='sci')
plt.grid(True)
plt.legend(loc='upper right', fontsize='x-large') # upper right, lower right, lower left
plt.ylim(5, 60)
plt.xlim(1, 30)
plt.savefig('resnet20_error_0_30.png', box_inches='tight')
# plt.ylim(-2, 35)
# plt.xlim(0, 300)
# plt.savefig('resnet20_error.png', box_inches='tight')
plt.show()
train_loss_54 = './epoch_loss_train_54.csv'
train_loss_54 = genfromtxt(train_loss_54, delimiter=',')
valid_loss_54 = './epoch_loss_valid_54.csv'
valid_loss_54 = genfromtxt(valid_loss_54, delimiter=',')
train_loss_55 = './epoch_loss_train_55.csv'
train_loss_55 = genfromtxt(train_loss_55, delimiter=',')
valid_loss_55 = './epoch_loss_valid_55.csv'
valid_loss_55 = genfromtxt(valid_loss_55, delimiter=',')
fig = plt.figure()
plt.xlabel('epoch', fontsize=14)
plt.ylabel('loss', fontsize=14)
plt.title('ResNet-20', fontsize=16)
plt.plot(train_loss_54[1:,1], train_loss_54[1:,2], label='train (ReZero)', zorder=1)#, linewidth=1.5)
plt.plot(valid_loss_54[1:,1], valid_loss_54[1:,2], label='valid (ReZero)', zorder=2)#, linewidth=1.5)
plt.plot(train_loss_55[1:,1], train_loss_55[1:,2], 'c', label='train', zorder=3)#, linewidth=1.5)
plt.plot(valid_loss_55[1:,1], valid_loss_55[1:,2], 'm', label='valid', zorder=4)#, linewidth=1.5)
plt.ticklabel_format(axis='y', style='sci')
plt.grid(True)
plt.legend(loc='upper right', fontsize='x-large') # upper right, lower right, lower left
plt.ylim(.2, 1.6)
plt.xlim(1, 30)
plt.savefig('resnet20_loss_0_30.png', box_inches='tight')
# plt.ylim(-.1, 1.2)
# plt.xlim(0, 300)
# plt.savefig('resnet20_loss.png', box_inches='tight')
plt.show()
''' ResNet-110 '''
train_error_58 = './epoch_error_train_58.csv'
train_error_58 = genfromtxt(train_error_58, delimiter=',')
valid_error_58 = './epoch_error_valid_58.csv'
valid_error_58 = genfromtxt(valid_error_58, delimiter=',')
train_error_59 = './epoch_error_train_59.csv'
train_error_59 = genfromtxt(train_error_59, delimiter=',')
valid_error_59 = './epoch_error_valid_59.csv'
valid_error_59 = genfromtxt(valid_error_59, delimiter=',')
# resnet-110 alpha (model 58)
# Training time: 301m 19s, 410 epochs
#
# Best [Valid] | epoch: 350 - loss: 0.2925 - acc: 0.9416
# [Test] loss 0.2796 - acc: 0.9412 - acc_topk: 0.9816
# resnet-110 (model 59)
# Training time: 240m 53s, 313 epochs
#
# Best [Valid] | epoch: 253 - loss: 0.3480 - acc: 0.9400
# [Test] loss 0.3392 - acc: 0.9361 - acc_topk: 0.9809
fig = plt.figure()
plt.xlabel('epoch', fontsize=14)
plt.ylabel('error (%)', fontsize=14)
plt.title('ResNet-110', fontsize=16)
plt.plot(train_error_58[1:,1], 100*train_error_58[1:,2], label='train (ReZero)', zorder=1)#, linewidth=1.5)
plt.plot(valid_error_58[1:,1], 100*valid_error_58[1:,2], label='valid (ReZero)', zorder=2)#, linewidth=1.5)
plt.plot(train_error_59[1:,1], 100*train_error_59[1:,2], 'c', label='train', zorder=3)#, linewidth=1.5)
plt.plot(valid_error_59[1:,1], 100*valid_error_59[1:,2], 'm', label='valid', zorder=4)#, linewidth=1.5)
plt.ticklabel_format(axis='y', style='sci')
plt.grid(True)
plt.legend(loc='upper right', fontsize='x-large') # upper right, lower right, lower left
# plt.ylim(5, 70)
# plt.xlim(1, 30)
# plt.savefig('resnet110_error_0_30.png', box_inches='tight')
plt.ylim(-2, 40)
plt.xlim(0, 300)
plt.savefig('resnet110_error.png', box_inches='tight')
plt.show()
train_loss_58 = './epoch_loss_train_58.csv'
train_loss_58 = genfromtxt(train_loss_58, delimiter=',')
valid_loss_58 = './epoch_loss_valid_58.csv'
valid_loss_58 = genfromtxt(valid_loss_58, delimiter=',')
train_loss_59 = './epoch_loss_train_59.csv'
train_loss_59 = genfromtxt(train_loss_59, delimiter=',')
valid_loss_59 = './epoch_loss_valid_59.csv'
valid_loss_59 = genfromtxt(valid_loss_59, delimiter=',')
fig = plt.figure()
plt.xlabel('epoch', fontsize=14)
plt.ylabel('loss', fontsize=14)
plt.title('ResNet-110', fontsize=16)
plt.plot(train_loss_58[1:,1], train_loss_58[1:,2], label='train (ReZero)', zorder=1)#, linewidth=1.5)
plt.plot(valid_loss_58[1:,1], valid_loss_58[1:,2], label='valid (ReZero)', zorder=2)#, linewidth=1.5)
plt.plot(train_loss_59[1:,1], train_loss_59[1:,2], 'c', label='train', zorder=3)#, linewidth=1.5)
plt.plot(valid_loss_59[1:,1], valid_loss_59[1:,2], 'm', label='valid', zorder=4)#, linewidth=1.5)
plt.ticklabel_format(axis='y', style='sci')
plt.grid(True)
plt.legend(loc='upper right', fontsize='x-large') # upper right, lower right, lower left
# plt.ylim(.2, 1.8)
# plt.xlim(1, 30)
# plt.savefig('resnet110_loss_0_30.png', box_inches='tight')
plt.ylim(-.1, 1.4)
plt.xlim(0, 300)
plt.savefig('resnet110_loss.png', box_inches='tight')
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ticklabel_format",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"numpy.genfrom... | [((186, 227), 'numpy.genfromtxt', 'genfromtxt', (['train_error_52'], {'delimiter': '""","""'}), "(train_error_52, delimiter=',')\n", (196, 227), False, 'from numpy import genfromtxt\n'), ((291, 332), 'numpy.genfromtxt', 'genfromtxt', (['valid_error_52'], {'delimiter': '""","""'}), "(valid_error_52, delimiter=',')\n", (301, 332), False, 'from numpy import genfromtxt\n'), ((397, 438), 'numpy.genfromtxt', 'genfromtxt', (['train_error_53'], {'delimiter': '""","""'}), "(train_error_53, delimiter=',')\n", (407, 438), False, 'from numpy import genfromtxt\n'), ((502, 543), 'numpy.genfromtxt', 'genfromtxt', (['valid_error_53'], {'delimiter': '""","""'}), "(valid_error_53, delimiter=',')\n", (512, 543), False, 'from numpy import genfromtxt\n'), ((905, 917), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (915, 917), True, 'import matplotlib.pyplot as plt\n'), ((918, 950), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {'fontsize': '(14)'}), "('epoch', fontsize=14)\n", (928, 950), True, 'import matplotlib.pyplot as plt\n'), ((951, 987), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error (%)"""'], {'fontsize': '(14)'}), "('error (%)', fontsize=14)\n", (961, 987), True, 'import matplotlib.pyplot as plt\n'), ((988, 1023), 'matplotlib.pyplot.title', 'plt.title', (['"""ResNet-56"""'], {'fontsize': '(16)'}), "('ResNet-56', fontsize=16)\n", (997, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1124), 'matplotlib.pyplot.plot', 'plt.plot', (['train_error_53[1:, 1]', '(100 * train_error_53[1:, 2])'], {'label': '"""train (ReZero)"""', 'zorder': '(1)'}), "(train_error_53[1:, 1], 100 * train_error_53[1:, 2], label=\n 'train (ReZero)', zorder=1)\n", (1033, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1232), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_error_53[1:, 1]', '(100 * valid_error_53[1:, 2])'], {'label': '"""valid (ReZero)"""', 'zorder': '(2)'}), "(valid_error_53[1:, 1], 100 * valid_error_53[1:, 2], label=\n 'valid (ReZero)', zorder=2)\n", (1141, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1241, 1336), 'matplotlib.pyplot.plot', 'plt.plot', (['train_error_52[1:, 1]', '(100 * train_error_52[1:, 2])', '"""c"""'], {'label': '"""train"""', 'zorder': '(3)'}), "(train_error_52[1:, 1], 100 * train_error_52[1:, 2], 'c', label=\n 'train', zorder=3)\n", (1249, 1336), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1440), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_error_52[1:, 1]', '(100 * valid_error_52[1:, 2])', '"""m"""'], {'label': '"""valid"""', 'zorder': '(4)'}), "(valid_error_52[1:, 1], 100 * valid_error_52[1:, 2], 'm', label=\n 'valid', zorder=4)\n", (1353, 1440), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1493), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""y"""', 'style': '"""sci"""'}), "(axis='y', style='sci')\n", (1470, 1493), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1508), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1502, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1509, 1558), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '"""x-large"""'}), "(loc='upper right', fontsize='x-large')\n", (1519, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1711), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2)', '(35)'], {}), '(-2, 35)\n', (1703, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1728), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(250)'], {}), '(0, 250)\n', (1720, 1728), True, 'import matplotlib.pyplot as plt\n'), ((1729, 1782), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resnet56_error.png"""'], {'box_inches': '"""tight"""'}), "('resnet56_error.png', box_inches='tight')\n", (1740, 1782), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1791, 1793), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1895), 'numpy.genfromtxt', 'genfromtxt', (['train_loss_52'], {'delimiter': '""","""'}), "(train_loss_52, delimiter=',')\n", (1865, 1895), False, 'from numpy import genfromtxt\n'), ((1956, 1996), 'numpy.genfromtxt', 'genfromtxt', (['valid_loss_52'], {'delimiter': '""","""'}), "(valid_loss_52, delimiter=',')\n", (1966, 1996), False, 'from numpy import genfromtxt\n'), ((2058, 2098), 'numpy.genfromtxt', 'genfromtxt', (['train_loss_53'], {'delimiter': '""","""'}), "(train_loss_53, delimiter=',')\n", (2068, 2098), False, 'from numpy import genfromtxt\n'), ((2159, 2199), 'numpy.genfromtxt', 'genfromtxt', (['valid_loss_53'], {'delimiter': '""","""'}), "(valid_loss_53, delimiter=',')\n", (2169, 2199), False, 'from numpy import genfromtxt\n'), ((2207, 2219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2217, 2219), True, 'import matplotlib.pyplot as plt\n'), ((2220, 2252), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {'fontsize': '(14)'}), "('epoch', fontsize=14)\n", (2230, 2252), True, 'import matplotlib.pyplot as plt\n'), ((2253, 2284), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {'fontsize': '(14)'}), "('loss', fontsize=14)\n", (2263, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2320), 'matplotlib.pyplot.title', 'plt.title', (['"""ResNet-56"""'], {'fontsize': '(16)'}), "('ResNet-56', fontsize=16)\n", (2294, 2320), True, 'import matplotlib.pyplot as plt\n'), ((2322, 2412), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_53[1:, 1]', 'train_loss_53[1:, 2]'], {'label': '"""train (ReZero)"""', 'zorder': '(1)'}), "(train_loss_53[1:, 1], train_loss_53[1:, 2], label='train (ReZero)',\n zorder=1)\n", (2330, 2412), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2514), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss_53[1:, 1]', 'valid_loss_53[1:, 2]'], {'label': '"""valid (ReZero)"""', 'zorder': '(2)'}), "(valid_loss_53[1:, 1], valid_loss_53[1:, 2], label='valid (ReZero)',\n zorder=2)\n", (2432, 2514), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2612), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_52[1:, 1]', 'train_loss_52[1:, 2]', '"""c"""'], {'label': '"""train"""', 'zorder': '(3)'}), "(train_loss_52[1:, 1], train_loss_52[1:, 2], 'c', label='train',\n zorder=3)\n", (2534, 2612), True, 'import matplotlib.pyplot as plt\n'), ((2624, 2710), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss_52[1:, 1]', 'valid_loss_52[1:, 2]', '"""m"""'], {'label': '"""valid"""', 'zorder': '(4)'}), "(valid_loss_52[1:, 1], valid_loss_52[1:, 2], 'm', label='valid',\n zorder=4)\n", (2632, 2710), True, 'import matplotlib.pyplot as plt\n'), ((2723, 2766), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""y"""', 'style': '"""sci"""'}), "(axis='y', style='sci')\n", (2743, 2766), True, 'import matplotlib.pyplot as plt\n'), ((2767, 2781), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2775, 2781), True, 'import matplotlib.pyplot as plt\n'), ((2782, 2831), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '"""x-large"""'}), "(loc='upper right', fontsize='x-large')\n", (2792, 2831), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2889), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.2)', '(2.5)'], {}), '(0.2, 2.5)\n', (2879, 2889), True, 'import matplotlib.pyplot as plt\n'), ((2889, 2904), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(30)'], {}), '(1, 30)\n', (2897, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2905, 2962), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resnet56_loss_0_30.png"""'], {'box_inches': '"""tight"""'}), "('resnet56_loss_0_30.png', box_inches='tight')\n", (2916, 2962), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3066, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3152, 3193), 'numpy.genfromtxt', 'genfromtxt', (['train_error_54'], {'delimiter': '""","""'}), "(train_error_54, delimiter=',')\n", (3162, 3193), False, 'from numpy import genfromtxt\n'), ((3257, 3298), 'numpy.genfromtxt', 'genfromtxt', (['valid_error_54'], {'delimiter': '""","""'}), "(valid_error_54, delimiter=',')\n", (3267, 3298), False, 'from numpy import genfromtxt\n'), ((3363, 3404), 'numpy.genfromtxt', 'genfromtxt', (['train_error_55'], {'delimiter': '""","""'}), "(train_error_55, delimiter=',')\n", (3373, 3404), False, 'from numpy import genfromtxt\n'), ((3468, 3509), 'numpy.genfromtxt', 'genfromtxt', (['valid_error_55'], {'delimiter': '""","""'}), "(valid_error_55, delimiter=',')\n", (3478, 3509), False, 'from numpy import genfromtxt\n'), ((3855, 3867), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3865, 3867), True, 'import matplotlib.pyplot as plt\n'), ((3868, 3900), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {'fontsize': '(14)'}), "('epoch', fontsize=14)\n", (3878, 3900), True, 'import matplotlib.pyplot as plt\n'), ((3901, 3937), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error (%)"""'], {'fontsize': '(14)'}), "('error (%)', fontsize=14)\n", (3911, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3938, 3973), 'matplotlib.pyplot.title', 'plt.title', (['"""ResNet-20"""'], {'fontsize': '(16)'}), "('ResNet-20', fontsize=16)\n", (3947, 3973), True, 'import matplotlib.pyplot as plt\n'), ((3975, 4074), 'matplotlib.pyplot.plot', 'plt.plot', (['train_error_54[1:, 1]', '(100 * train_error_54[1:, 2])'], {'label': '"""train (ReZero)"""', 'zorder': '(1)'}), "(train_error_54[1:, 1], 100 * train_error_54[1:, 2], label=\n 'train (ReZero)', zorder=1)\n", (3983, 4074), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4182), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_error_54[1:, 1]', '(100 * valid_error_54[1:, 2])'], {'label': '"""valid (ReZero)"""', 'zorder': '(2)'}), "(valid_error_54[1:, 1], 100 * valid_error_54[1:, 2], label=\n 'valid (ReZero)', zorder=2)\n", (4091, 4182), True, 'import matplotlib.pyplot as plt\n'), ((4191, 4286), 'matplotlib.pyplot.plot', 'plt.plot', (['train_error_55[1:, 1]', '(100 * train_error_55[1:, 2])', '"""c"""'], {'label': '"""train"""', 'zorder': '(3)'}), "(train_error_55[1:, 1], 100 * train_error_55[1:, 2], 'c', label=\n 'train', zorder=3)\n", (4199, 4286), True, 'import matplotlib.pyplot as plt\n'), ((4295, 4390), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_error_55[1:, 1]', '(100 * valid_error_55[1:, 2])', '"""m"""'], {'label': '"""valid"""', 'zorder': '(4)'}), "(valid_error_55[1:, 1], 100 * valid_error_55[1:, 2], 'm', label=\n 'valid', zorder=4)\n", (4303, 4390), True, 'import matplotlib.pyplot as plt\n'), ((4400, 4443), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""y"""', 'style': '"""sci"""'}), "(axis='y', style='sci')\n", (4420, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4444, 4458), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4452, 4458), True, 'import matplotlib.pyplot as plt\n'), ((4459, 4508), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '"""x-large"""'}), "(loc='upper right', fontsize='x-large')\n", (4469, 4508), True, 'import matplotlib.pyplot as plt\n'), ((4548, 4563), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(5)', '(60)'], {}), '(5, 60)\n', (4556, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4564, 4579), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(30)'], {}), '(1, 30)\n', (4572, 4579), True, 'import matplotlib.pyplot as plt\n'), ((4580, 4638), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resnet20_error_0_30.png"""'], {'box_inches': '"""tight"""'}), "('resnet20_error_0_30.png', box_inches='tight')\n", (4591, 4638), True, 'import matplotlib.pyplot as plt\n'), ((4733, 4743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4741, 4743), True, 'import matplotlib.pyplot as plt\n'), ((4805, 4845), 'numpy.genfromtxt', 'genfromtxt', (['train_loss_54'], {'delimiter': '""","""'}), "(train_loss_54, delimiter=',')\n", (4815, 4845), False, 'from numpy import genfromtxt\n'), ((4906, 4946), 'numpy.genfromtxt', 'genfromtxt', (['valid_loss_54'], {'delimiter': '""","""'}), "(valid_loss_54, delimiter=',')\n", (4916, 4946), False, 'from numpy import genfromtxt\n'), ((5008, 5048), 'numpy.genfromtxt', 'genfromtxt', (['train_loss_55'], {'delimiter': '""","""'}), "(train_loss_55, delimiter=',')\n", (5018, 5048), False, 'from numpy import genfromtxt\n'), ((5109, 5149), 'numpy.genfromtxt', 'genfromtxt', (['valid_loss_55'], {'delimiter': '""","""'}), "(valid_loss_55, delimiter=',')\n", (5119, 5149), False, 'from numpy import genfromtxt\n'), ((5157, 5169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5167, 5169), True, 'import matplotlib.pyplot as plt\n'), ((5170, 5202), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {'fontsize': '(14)'}), "('epoch', fontsize=14)\n", (5180, 5202), True, 'import matplotlib.pyplot as plt\n'), ((5203, 5234), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {'fontsize': '(14)'}), "('loss', fontsize=14)\n", (5213, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5235, 5270), 'matplotlib.pyplot.title', 'plt.title', (['"""ResNet-20"""'], {'fontsize': '(16)'}), "('ResNet-20', fontsize=16)\n", (5244, 5270), True, 'import matplotlib.pyplot as plt\n'), ((5272, 5362), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_54[1:, 1]', 'train_loss_54[1:, 2]'], {'label': '"""train (ReZero)"""', 'zorder': '(1)'}), "(train_loss_54[1:, 1], train_loss_54[1:, 2], label='train (ReZero)',\n zorder=1)\n", (5280, 5362), True, 'import matplotlib.pyplot as plt\n'), ((5374, 5464), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss_54[1:, 1]', 'valid_loss_54[1:, 2]'], {'label': '"""valid (ReZero)"""', 'zorder': '(2)'}), "(valid_loss_54[1:, 1], valid_loss_54[1:, 2], label='valid (ReZero)',\n zorder=2)\n", (5382, 5464), True, 'import matplotlib.pyplot as plt\n'), ((5476, 5562), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_55[1:, 1]', 'train_loss_55[1:, 2]', '"""c"""'], {'label': '"""train"""', 'zorder': '(3)'}), "(train_loss_55[1:, 1], train_loss_55[1:, 2], 'c', label='train',\n zorder=3)\n", (5484, 5562), True, 'import matplotlib.pyplot as plt\n'), ((5574, 5660), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss_55[1:, 1]', 'valid_loss_55[1:, 2]', '"""m"""'], {'label': '"""valid"""', 'zorder': '(4)'}), "(valid_loss_55[1:, 1], valid_loss_55[1:, 2], 'm', label='valid',\n zorder=4)\n", (5582, 5660), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5716), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""y"""', 'style': '"""sci"""'}), "(axis='y', style='sci')\n", (5693, 5716), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5731), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5725, 5731), True, 'import matplotlib.pyplot as plt\n'), ((5732, 5781), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '"""x-large"""'}), "(loc='upper right', fontsize='x-large')\n", (5742, 5781), True, 'import matplotlib.pyplot as plt\n'), ((5821, 5839), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.2)', '(1.6)'], {}), '(0.2, 1.6)\n', (5829, 5839), True, 'import matplotlib.pyplot as plt\n'), ((5839, 5854), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(30)'], {}), '(1, 30)\n', (5847, 5854), True, 'import matplotlib.pyplot as plt\n'), ((5855, 5912), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resnet20_loss_0_30.png"""'], {'box_inches': '"""tight"""'}), "('resnet20_loss_0_30.png', box_inches='tight')\n", (5866, 5912), True, 'import matplotlib.pyplot as plt\n'), ((6008, 6018), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6016, 6018), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6144), 'numpy.genfromtxt', 'genfromtxt', (['train_error_58'], {'delimiter': '""","""'}), "(train_error_58, delimiter=',')\n", (6113, 6144), False, 'from numpy import genfromtxt\n'), ((6208, 6249), 'numpy.genfromtxt', 'genfromtxt', (['valid_error_58'], {'delimiter': '""","""'}), "(valid_error_58, delimiter=',')\n", (6218, 6249), False, 'from numpy import genfromtxt\n'), ((6314, 6355), 'numpy.genfromtxt', 'genfromtxt', (['train_error_59'], {'delimiter': '""","""'}), "(train_error_59, delimiter=',')\n", (6324, 6355), False, 'from numpy import genfromtxt\n'), ((6419, 6460), 'numpy.genfromtxt', 'genfromtxt', (['valid_error_59'], {'delimiter': '""","""'}), "(valid_error_59, delimiter=',')\n", (6429, 6460), False, 'from numpy import genfromtxt\n'), ((6826, 6838), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6836, 6838), True, 'import matplotlib.pyplot as plt\n'), ((6839, 6871), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {'fontsize': '(14)'}), "('epoch', fontsize=14)\n", (6849, 6871), True, 'import matplotlib.pyplot as plt\n'), ((6872, 6908), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error (%)"""'], {'fontsize': '(14)'}), "('error (%)', fontsize=14)\n", (6882, 6908), True, 'import matplotlib.pyplot as plt\n'), ((6909, 6945), 'matplotlib.pyplot.title', 'plt.title', (['"""ResNet-110"""'], {'fontsize': '(16)'}), "('ResNet-110', fontsize=16)\n", (6918, 6945), True, 'import matplotlib.pyplot as plt\n'), ((6947, 7046), 'matplotlib.pyplot.plot', 'plt.plot', (['train_error_58[1:, 1]', '(100 * train_error_58[1:, 2])'], {'label': '"""train (ReZero)"""', 'zorder': '(1)'}), "(train_error_58[1:, 1], 100 * train_error_58[1:, 2], label=\n 'train (ReZero)', zorder=1)\n", (6955, 7046), True, 'import matplotlib.pyplot as plt\n'), ((7055, 7154), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_error_58[1:, 1]', '(100 * valid_error_58[1:, 2])'], {'label': '"""valid (ReZero)"""', 'zorder': '(2)'}), "(valid_error_58[1:, 1], 100 * valid_error_58[1:, 2], label=\n 'valid (ReZero)', zorder=2)\n", (7063, 7154), True, 'import matplotlib.pyplot as plt\n'), ((7163, 7258), 'matplotlib.pyplot.plot', 'plt.plot', (['train_error_59[1:, 1]', '(100 * train_error_59[1:, 2])', '"""c"""'], {'label': '"""train"""', 'zorder': '(3)'}), "(train_error_59[1:, 1], 100 * train_error_59[1:, 2], 'c', label=\n 'train', zorder=3)\n", (7171, 7258), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7362), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_error_59[1:, 1]', '(100 * valid_error_59[1:, 2])', '"""m"""'], {'label': '"""valid"""', 'zorder': '(4)'}), "(valid_error_59[1:, 1], 100 * valid_error_59[1:, 2], 'm', label=\n 'valid', zorder=4)\n", (7275, 7362), True, 'import matplotlib.pyplot as plt\n'), ((7372, 7415), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""y"""', 'style': '"""sci"""'}), "(axis='y', style='sci')\n", (7392, 7415), True, 'import matplotlib.pyplot as plt\n'), ((7416, 7430), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7424, 7430), True, 'import matplotlib.pyplot as plt\n'), ((7431, 7480), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '"""x-large"""'}), "(loc='upper right', fontsize='x-large')\n", (7441, 7480), True, 'import matplotlib.pyplot as plt\n'), ((7618, 7634), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2)', '(40)'], {}), '(-2, 40)\n', (7626, 7634), True, 'import matplotlib.pyplot as plt\n'), ((7635, 7651), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(300)'], {}), '(0, 300)\n', (7643, 7651), True, 'import matplotlib.pyplot as plt\n'), ((7652, 7706), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resnet110_error.png"""'], {'box_inches': '"""tight"""'}), "('resnet110_error.png', box_inches='tight')\n", (7663, 7706), True, 'import matplotlib.pyplot as plt\n'), ((7707, 7717), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7715, 7717), True, 'import matplotlib.pyplot as plt\n'), ((7779, 7819), 'numpy.genfromtxt', 'genfromtxt', (['train_loss_58'], {'delimiter': '""","""'}), "(train_loss_58, delimiter=',')\n", (7789, 7819), False, 'from numpy import genfromtxt\n'), ((7880, 7920), 'numpy.genfromtxt', 'genfromtxt', (['valid_loss_58'], {'delimiter': '""","""'}), "(valid_loss_58, delimiter=',')\n", (7890, 7920), False, 'from numpy import genfromtxt\n'), ((7982, 8022), 'numpy.genfromtxt', 'genfromtxt', (['train_loss_59'], {'delimiter': '""","""'}), "(train_loss_59, delimiter=',')\n", (7992, 8022), False, 'from numpy import genfromtxt\n'), ((8083, 8123), 'numpy.genfromtxt', 'genfromtxt', (['valid_loss_59'], {'delimiter': '""","""'}), "(valid_loss_59, delimiter=',')\n", (8093, 8123), False, 'from numpy import genfromtxt\n'), ((8131, 8143), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8141, 8143), True, 'import matplotlib.pyplot as plt\n'), ((8144, 8176), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {'fontsize': '(14)'}), "('epoch', fontsize=14)\n", (8154, 8176), True, 'import matplotlib.pyplot as plt\n'), ((8177, 8208), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {'fontsize': '(14)'}), "('loss', fontsize=14)\n", (8187, 8208), True, 'import matplotlib.pyplot as plt\n'), ((8209, 8245), 'matplotlib.pyplot.title', 'plt.title', (['"""ResNet-110"""'], {'fontsize': '(16)'}), "('ResNet-110', fontsize=16)\n", (8218, 8245), True, 'import matplotlib.pyplot as plt\n'), ((8247, 8337), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_58[1:, 1]', 'train_loss_58[1:, 2]'], {'label': '"""train (ReZero)"""', 'zorder': '(1)'}), "(train_loss_58[1:, 1], train_loss_58[1:, 2], label='train (ReZero)',\n zorder=1)\n", (8255, 8337), True, 'import matplotlib.pyplot as plt\n'), ((8349, 8439), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss_58[1:, 1]', 'valid_loss_58[1:, 2]'], {'label': '"""valid (ReZero)"""', 'zorder': '(2)'}), "(valid_loss_58[1:, 1], valid_loss_58[1:, 2], label='valid (ReZero)',\n zorder=2)\n", (8357, 8439), True, 'import matplotlib.pyplot as plt\n'), ((8451, 8537), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss_59[1:, 1]', 'train_loss_59[1:, 2]', '"""c"""'], {'label': '"""train"""', 'zorder': '(3)'}), "(train_loss_59[1:, 1], train_loss_59[1:, 2], 'c', label='train',\n zorder=3)\n", (8459, 8537), True, 'import matplotlib.pyplot as plt\n'), ((8549, 8635), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss_59[1:, 1]', 'valid_loss_59[1:, 2]', '"""m"""'], {'label': '"""valid"""', 'zorder': '(4)'}), "(valid_loss_59[1:, 1], valid_loss_59[1:, 2], 'm', label='valid',\n zorder=4)\n", (8557, 8635), True, 'import matplotlib.pyplot as plt\n'), ((8648, 8691), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""y"""', 'style': '"""sci"""'}), "(axis='y', style='sci')\n", (8668, 8691), True, 'import matplotlib.pyplot as plt\n'), ((8692, 8706), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8700, 8706), True, 'import matplotlib.pyplot as plt\n'), ((8707, 8756), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '"""x-large"""'}), "(loc='upper right', fontsize='x-large')\n", (8717, 8756), True, 'import matplotlib.pyplot as plt\n'), ((8895, 8914), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.4)'], {}), '(-0.1, 1.4)\n', (8903, 8914), True, 'import matplotlib.pyplot as plt\n'), ((8914, 8930), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(300)'], {}), '(0, 300)\n', (8922, 8930), True, 'import matplotlib.pyplot as plt\n'), ((8931, 8984), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resnet110_loss.png"""'], {'box_inches': '"""tight"""'}), "('resnet110_loss.png', box_inches='tight')\n", (8942, 8984), True, 'import matplotlib.pyplot as plt\n'), ((8985, 8995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8993, 8995), True, 'import matplotlib.pyplot as plt\n')] |
"""Collection of classes for processed datasets."""
import os
import random
from glob import glob
from typing import List, Tuple
import numpy as np
import torch.utils.data
import torchvision.transforms
from facenet_pytorch import fixed_image_standardization
from torch import Tensor
from src.features import transform
class TripletDataset(torch.utils.data.Dataset):
"""Dataset composed of triplets."""
def __init__(self, triplet_file_id: int, transforms: bool = False):
"""
Instantiate a TripletDataset.
:param triplet_file_id: id of the file of triplets
"""
super().__init__()
self._filepath = self._get_filepath(triplet_file_id)
if self._filepath is None:
raise ValueError(f"{triplet_file_id} is an invalid file id")
self._triplets = np.load(self._filepath)
transformations = [fixed_image_standardization]
if transforms:
transformations.append(torchvision.transforms.RandomHorizontalFlip(0.5))
self._transforms = torchvision.transforms.Compose(transformations)
def __getitem__(self, idx: int) -> Tuple[List[Tensor], List[str]]:
images, classes = self._triplets[idx][:3], self._triplets[idx][3:]
images = transform.images_to_tensors(*images)
images = [self._transforms(image) for image in images]
return images, classes
def __len__(self):
return len(self._triplets)
def get_triplet(self, idx: int) -> Tuple[List[str], List[str]]:
"""
Get paths and classes of a triplet instead of tensors and classes.
:param idx: id of the triplet.
"""
paths, classes = self._triplets[idx][:3], self._triplets[idx][3:]
return paths, classes
@staticmethod
def _get_filepath(triplet_file_id: int) -> str:
"""
Retrieve the path to the triplet file given its id
:param triplet_file_id: id of the file of triplets
:return: path to triplet file
"""
triplet_path = os.path.join("data", "processed", "triplets")
triplet_files = glob(os.path.join(triplet_path, "*.npy"))
triplet_file_id = str(triplet_file_id).zfill(2)
path = None
for file in triplet_files:
if os.path.basename(file).startswith(triplet_file_id):
path = file
break
return path
def get_name(self) -> str:
"""
Extract the name of the dataset from the name of the triplet file on which is based.
"""
basename = os.path.basename(self._filepath)
basename, _ = os.path.splitext(basename)
parts = basename.split("_")[1:-1]
return "_".join(parts)
class FolderDataset(torchvision.datasets.ImageFolder):
def __init__(self, image_path, transform=False, shuffle=False):
super().__init__(image_path)
self.root = image_path
self.images = self.samples.copy()
if shuffle:
random.shuffle(self.images)
transformations = [fixed_image_standardization]
if transform:
transformations.append(torchvision.transforms.RandomHorizontalFlip(0.5))
self._transforms = torchvision.transforms.Compose(transformations)
def __getitem__(self, index):
image, class_ = self.images[index]
image = transform.images_to_tensors(image)
image = self._transforms(image)
return image, class_
def __len__(self):
return len(self.images)
| [
"random.shuffle",
"os.path.join",
"os.path.splitext",
"src.features.transform.images_to_tensors",
"os.path.basename",
"numpy.load"
] | [((827, 850), 'numpy.load', 'np.load', (['self._filepath'], {}), '(self._filepath)\n', (834, 850), True, 'import numpy as np\n'), ((1257, 1293), 'src.features.transform.images_to_tensors', 'transform.images_to_tensors', (['*images'], {}), '(*images)\n', (1284, 1293), False, 'from src.features import transform\n'), ((2038, 2083), 'os.path.join', 'os.path.join', (['"""data"""', '"""processed"""', '"""triplets"""'], {}), "('data', 'processed', 'triplets')\n", (2050, 2083), False, 'import os\n'), ((2569, 2601), 'os.path.basename', 'os.path.basename', (['self._filepath'], {}), '(self._filepath)\n', (2585, 2601), False, 'import os\n'), ((2624, 2650), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (2640, 2650), False, 'import os\n'), ((3353, 3387), 'src.features.transform.images_to_tensors', 'transform.images_to_tensors', (['image'], {}), '(image)\n', (3380, 3387), False, 'from src.features import transform\n'), ((2113, 2148), 'os.path.join', 'os.path.join', (['triplet_path', '"""*.npy"""'], {}), "(triplet_path, '*.npy')\n", (2125, 2148), False, 'import os\n'), ((2992, 3019), 'random.shuffle', 'random.shuffle', (['self.images'], {}), '(self.images)\n', (3006, 3019), False, 'import random\n'), ((2278, 2300), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (2294, 2300), False, 'import os\n')] |
'''
Created on Apr 15, 2016
Evaluate the performance of Top-K recommendation:
Protocol: leave-1-out evaluation
Measures: Hit Ratio and NDCG
(more details are in: <NAME>, et al. Fast Matrix Factorization for Online Recommendation with Implicit Feedback. SIGIR'16)
@author: hexiangnan
'''
import math
import heapq # for retrieval topK
from multiprocessing import cpu_count
from multiprocessing import Pool
import numpy as np
from time import time
#from numba import jit, autojit
# Global variables that are shared across processes
_model = None
_sess = None
_dataset = None
_K = None
_DictList = None
_gtItem = None
_user_prediction = None
_model_name = None
_feed_dict = None
def init_evaluate_model(model, dataset, args):
DictList = []
for idx in xrange(len(dataset.testRatings)):
user, gtItem = dataset.testRatings[idx]
items = range(dataset.num_items) # rank on all items
items.append(gtItem)
user_input = np.full(len(items), user, dtype='int32')[:, None]
item_input = np.array(items)[:,None]
if args.model == 'CMF':
feed_dict = {model.user_input: user_input, model.item_input_buy: item_input}
else:
feed_dict = {model.user_input: user_input, model.item_input: item_input}
DictList.append(feed_dict)
# print("already initiate the evaluate model...")
return DictList
def gen_feed_dict(dataset):
DictList = []
for idx in xrange(len(dataset.testRatings)):
user, gtItem = dataset.testRatings[idx]
items = range(dataset.num_items) # rank on all items
items.append(gtItem)
user_input = np.full(len(items), user, dtype='int32')[:, None]
item_input = np.array(items)[:,None]
feed_dict = {'input_data/user_input:0': user_input,
'input_data/item_input:0': item_input}
DictList.append(feed_dict)
return DictList
def eval(model, sess, dataset, DictList, args, behave_type = None):
global _model
global _K
global _DictList
global _sess
global _dataset
global _gtItem
global _user_prediction
global _model_name
global _feed_dict
_dataset = dataset
_model = model
_sess = sess
_K = args.topK
_model_name = args.model
if (args.dataset == 'ali2') and (behave_type == 'cart'):
behave_type = 'buy'
hits50,hits100,hits200, ndcgs50,ndcgs100,ndcgs200, ranks, _gtItem, _user_prediction = [], [], [], [], [],[],[],[],[]
# give predictions on users
# for idx in xrange(len(_DictList)):
# if args.model == 'Multi_GMF':
# _gtItem.append(_dataset[0].testRatings[idx][1])
# _user_prediction.append(_sess.run(_model.score_buy, feed_dict = _DictList[idx]))
# else:
# _gtItem.append(_dataset.testRatings[idx][1])
# _user_prediction.append(_sess.run(_model.output, feed_dict = _DictList[idx]))
# cpu_num = 4
# pool = Pool(cpu_num)
# res = pool.map(_eval_one_rating, range(len(_DictList)))
# pool.close()
# pool.join()
# hits = [r[0] for r in res]
# ndcgs = [r[1] for r in res]
_DictList = DictList
for idx in xrange(len(_DictList)):
(hr50,hr100,hr200, ndcg50,ndcg100,ndcg200, rank) = _eval_one_rating(idx, behave_type)
hits50.append(hr50)
ndcgs50.append(ndcg50)
hits100.append(hr100)
ndcgs100.append(ndcg100)
hits200.append(hr200)
ndcgs200.append(ndcg200)
ranks.append(rank)
return (hits50,hits100,hits200, ndcgs50,ndcgs100,ndcgs200, ranks)
def eval_FISM(model, sess, dataset, args, behave_type = None):
global _model
global _K
global _DictList
global _sess
global _dataset
global _gtItem
global _user_prediction
global _model_name
global _feed_dict
_dataset = dataset
_model = model
_sess = sess
_K = args.topK
_model_name = args.model
hits, ndcgs, ranks, _gtItem, _user_prediction = [], [], [], [], []
for idx in xrange(len(dataset.testRatings)):
t1 = time()
user, gtItem = dataset.testRatings[idx]
items = range(dataset.num_items) # rank on all items
items.append(gtItem)
user_input = np.full(len(items), user, dtype='int32')[:, None]
item_input = np.array(items)[:,None]
# item rate / item_num
item_rate, item_num = [], []
item_rate_1 = dataset.trainDict[user]['buy']
for i in items:
item_rate_2 = filter(lambda x:x != i, item_rate_1)
item_num.append(len(item_rate_2))
item_rate_2 = item_rate_2 + [dataset.num_items]*(dataset.max_rate - len(item_rate_2))
item_rate.append(item_rate_2)
assert len(item_rate_2) == dataset.max_rate
feed_dict = {model.user_input: user_input, model.item_input: item_input,
model.item_rate: np.array(item_rate), model.item_num: np.array(item_num).reshape(-1, 1)}
_feed_dict = feed_dict
(hr, ndcg, rank) = _eval_one_rating_FISM(idx)
hits.append(hr)
ndcgs.append(ndcg)
ranks.append(rank)
return (hits, ndcgs, ranks)
def _eval_one_rating_FISM(idx):
gtItem = _dataset.testRatings[idx][1]
predictions = _sess.run(_model.output, feed_dict = _feed_dict)
rank = 0
rank_score = predictions[gtItem]
for i in predictions:
if i > rank_score:
rank += 1
# deal with anomoly
if (predictions[gtItem] == predictions[0]) and (predictions[gtItem] == predictions[1]) and (predictions[gtItem] == predictions[2]):
rank = 99999
if rank < _K:
hr = 1
ndcg = math.log(2) / math.log(rank + 2)
else:
hr = 0
ndcg = 0
# real ranking should be this
rank_real = rank + 1
return (hr, ndcg, rank_real)
def _eval_one_rating(idx, behave_type):
# predictions = _user_prediction[idx]
# gtItem = _gtItem[idx]
if _model_name in ['Multi_GMF', 'Multi_MLP', 'Multi_NCF']:
gtItem = _dataset[0].testRatings[idx][1]
Train_k= _dataset[0].trainMatrix
if behave_type == 'ipv':
predictions = _sess.run(_model.score_ipv, feed_dict = _DictList[idx])
elif behave_type == 'cart':
predictions = _sess.run(_model.score_cart, feed_dict = _DictList[idx])
else:
predictions = _sess.run(_model.score_buy, feed_dict = _DictList[idx])
elif _model_name == 'CMF':
gtItem = _dataset.testRatings[idx][1]
Train_k= _dataset.trainDict[idx]['buy']
predictions = _sess.run(_model.output_buy, feed_dict = _DictList[idx])
else:
gtItem = _dataset.testRatings[idx][1]
Train_k= _dataset.trainMatrix
if behave_type == 'ipv':
predictions = _sess.run(_model.score_ipv, feed_dict = _DictList[idx])
elif behave_type == 'cart':
predictions = _sess.run(_model.score_cart, feed_dict = _DictList[idx])
elif behave_type == 'buy':
predictions = _sess.run(_model.score_buy, feed_dict = _DictList[idx])
else:
predictions = _sess.run(_model.output, feed_dict = _DictList[idx])
rank = 0
rank_score = predictions[gtItem]
tk=0
#print Train_k.keys()
if _model_name == 'CMF':
for i in predictions:
if i > rank_score:
if tk not in Train_k:
rank += 1
tk=tk+1
else:
for i in predictions:
if i > rank_score:
if not Train_k.has_key((idx,tk)):
rank += 1
tk=tk+1
# deal with anomoly
if (predictions[gtItem] == predictions[0]) and (predictions[gtItem] == predictions[1]) and (predictions[gtItem] == predictions[2]):
rank = 99999
if rank < 50:#+Train_k:
hr50 = 1
ndcg50 = math.log(2) / math.log(rank + 2)
else:
hr50 = 0
ndcg50 = 0
if rank < 100:#+Train_k:
hr100 = 1
ndcg100 = math.log(2) / math.log(rank + 2)
else:
hr100 = 0
ndcg100 = 0
if rank < 200:#+Train_k:
hr200 = 1
ndcg200 = math.log(2) / math.log(rank + 2)
else:
hr200 = 0
ndcg200 = 0
# real ranking should be this
rank_real = rank + 1
return (hr50,hr100,hr200, ndcg50,ndcg100,ndcg200, rank_real)
| [
"numpy.array",
"time.time",
"math.log"
] | [((4202, 4208), 'time.time', 'time', ([], {}), '()\n', (4206, 4208), False, 'from time import time\n'), ((1067, 1082), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (1075, 1082), True, 'import numpy as np\n'), ((1767, 1782), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (1775, 1782), True, 'import numpy as np\n'), ((4444, 4459), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (4452, 4459), True, 'import numpy as np\n'), ((5052, 5071), 'numpy.array', 'np.array', (['item_rate'], {}), '(item_rate)\n', (5060, 5071), True, 'import numpy as np\n'), ((5864, 5875), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (5872, 5875), False, 'import math\n'), ((5878, 5896), 'math.log', 'math.log', (['(rank + 2)'], {}), '(rank + 2)\n', (5886, 5896), False, 'import math\n'), ((8148, 8159), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (8156, 8159), False, 'import math\n'), ((8162, 8180), 'math.log', 'math.log', (['(rank + 2)'], {}), '(rank + 2)\n', (8170, 8180), False, 'import math\n'), ((8300, 8311), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (8308, 8311), False, 'import math\n'), ((8314, 8332), 'math.log', 'math.log', (['(rank + 2)'], {}), '(rank + 2)\n', (8322, 8332), False, 'import math\n'), ((8454, 8465), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (8462, 8465), False, 'import math\n'), ((8468, 8486), 'math.log', 'math.log', (['(rank + 2)'], {}), '(rank + 2)\n', (8476, 8486), False, 'import math\n'), ((5089, 5107), 'numpy.array', 'np.array', (['item_num'], {}), '(item_num)\n', (5097, 5107), True, 'import numpy as np\n')] |
import numpy as np
import scipy.io as spio
from . import calc_R1_function_python_GEN
def calculate_r1_factor(proj, proj_angles, atom_positions, atomic_spec, atomic_numbers,
resolution,z_direction, b_factor, h_factor, axis_convention):
Result = calc_R1_function_python_GEN.calc_R1_function_indivFA_python(atom_positions,
atomic_spec, proj, proj_angles, resolution, np.array(b_factor), np.array(h_factor), np.array(axis_convention), atomic_numbers, z_direction)
return Result[1]
| [
"numpy.array"
] | [((403, 421), 'numpy.array', 'np.array', (['b_factor'], {}), '(b_factor)\n', (411, 421), True, 'import numpy as np\n'), ((423, 441), 'numpy.array', 'np.array', (['h_factor'], {}), '(h_factor)\n', (431, 441), True, 'import numpy as np\n'), ((443, 468), 'numpy.array', 'np.array', (['axis_convention'], {}), '(axis_convention)\n', (451, 468), True, 'import numpy as np\n')] |
import json
import numpy as np
import matplotlib.pyplot as plt
def to_seconds(s):
hr, min, sec = [float(x) for x in s.split(':')]
return hr*3600 + min*60 + sec
def extract(gst_log, script_log, debug=False):
with open(gst_log, "r") as f:
lines = f.readlines()
id_s = "create:<v4l2src"
st_s = "sync to "
ts_s = "out ts "
lf_s = "lost frames detected: count = "
st_l, ts_l, lf_l = [[], []], [[], []], [[], []]
for line in lines:
id_p = line.find(id_s)
st_p = line.find(st_s)
ts_p = line.find(ts_s)
lf_p = line.find(lf_s)
if id_p > 0:
id_p += len(id_s)
id = int(line[id_p:id_p+1])
if st_p > 0 and ts_p > 0:
st_p += len(st_s)
ts_p += len(ts_s)
st = to_seconds(line[st_p:st_p+17])
ts = to_seconds(line[ts_p:ts_p+17])
if debug:
print(id, st, ts)
st_l[id].append(st)
ts_l[id].append(ts)
if lf_p > 0:
lf_p += len(lf_s)
line = line[lf_p:]
lf = int(line.split()[0])
p = line.find("ts: ")
t = to_seconds(line[p+4:])
if debug:
print("lf", id, lf, t)
lf_l[id].append((lf, t))
st, ts, lf = st_l, ts_l, lf_l
# print(st)
# print(ts)
with open(script_log, "r") as f:
lines = f.readlines()
new_s = "Samples_"
pull_s = "Pulled_"
over_s = "Overrun_"
n, p, o = [[], []], [[], []], [[], []]
for line in lines:
new_p = line.find(new_s)
pull_p = line.find(pull_s)
over_p = line.find(over_s)
if new_p >= 0:
new_p += len(new_s)
id = int(line[new_p])
n[id].append([float(x) for x in line[new_p + 2:].split()])
if pull_p >= 0:
pull_p += len(pull_s)
id = int(line[pull_p])
p[id].append([float(x) for x in line[pull_p + 2:].split() if x != "at"])
if over_p >= 0:
over_p += len(over_s)
id = int(line[over_p])
o[id].append([float(x) for x in line[over_p + 2:].split()])
# print(n)
# print(p)
if debug:
print(o)
with open(script_log + ".json", "w") as f:
d = {"st" : st, "ts" : ts, "lf" : lf, "n" : n, "p" : p, "o" : o}
json.dump(d, f, indent=4)
def load(json_filename):
with open(json_filename, "r") as f:
return json.load(f)
# return d["st"], d["ts"], d["lf"], d["n"], d["p"], d["o"]
def plot(d, ids):
st, ts, lf, n, p, o = d["st"], d["ts"], d["lf"], d["n"], d["p"], d["o"]
plt.figure("v4l2src")
for id in range(ids):
lf[id] = np.array(lf[id])
# plt.figure(str(id))
plt.plot(ts[id], st[id], ".-", label="good_"+str(id))
if len(lf[id].shape) > 1:
plt.plot(lf[id][:, 1], lf[id][:, 0], ".-", label="lost_"+str(id))
plt.legend()
plt.tight_layout()
plt.figure("appsink")
for id in range(ids):
n[id], p[id], o[id] = np.array(n[id]), np.array(p[id]), np.array(o[id])
# plt.figure(str(id))
plt.plot(n[id][:, 1], n[id][:, 0] / 10., ".-", label="new_"+str(id))
plt.plot(p[id][:, 2], p[id][:, 1], ".-", label="pull_"+str(id))
if len(o[id].shape) > 1:
plt.plot(o[id][:, 1], o[id][:, 0] / 10., ".-", label="drop_"+str(id))
plt.legend()
plt.tight_layout()
if __name__ == "__main__":
gst_log = "/tmp/gst_logs"
script_log = "/home/reip/software/experiments/log"
gst_log = script_log = "/home/reip/software/experiments/log_both"
# extract(gst_log, script_log, debug=True)
# script_log += "_overrun"
# script_log += "_lost"
d = load(script_log + ".json")
plot(d, ids=2)
plt.show()
| [
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"json.load",
"json.dump",
"matplotlib.pyplot.show"
] | [((2646, 2667), 'matplotlib.pyplot.figure', 'plt.figure', (['"""v4l2src"""'], {}), "('v4l2src')\n", (2656, 2667), True, 'import matplotlib.pyplot as plt\n'), ((2937, 2949), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2947, 2949), True, 'import matplotlib.pyplot as plt\n'), ((2954, 2972), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2970, 2972), True, 'import matplotlib.pyplot as plt\n'), ((2978, 2999), 'matplotlib.pyplot.figure', 'plt.figure', (['"""appsink"""'], {}), "('appsink')\n", (2988, 2999), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3417), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3415, 3417), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3440), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3438, 3440), True, 'import matplotlib.pyplot as plt\n'), ((3798, 3808), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3806, 3808), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2388), 'json.dump', 'json.dump', (['d', 'f'], {'indent': '(4)'}), '(d, f, indent=4)\n', (2372, 2388), False, 'import json\n'), ((2470, 2482), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2479, 2482), False, 'import json\n'), ((2711, 2727), 'numpy.array', 'np.array', (['lf[id]'], {}), '(lf[id])\n', (2719, 2727), True, 'import numpy as np\n'), ((3056, 3071), 'numpy.array', 'np.array', (['n[id]'], {}), '(n[id])\n', (3064, 3071), True, 'import numpy as np\n'), ((3073, 3088), 'numpy.array', 'np.array', (['p[id]'], {}), '(p[id])\n', (3081, 3088), True, 'import numpy as np\n'), ((3090, 3105), 'numpy.array', 'np.array', (['o[id]'], {}), '(o[id])\n', (3098, 3105), True, 'import numpy as np\n')] |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data = np.load("scores.npy", allow_pickle=True).item()
fig, axs = plt.subplots(3, 1, figsize=(20, 20))
for i, score in enumerate(["insert", "delete", "irof"]):
ax = axs[i]
df = data[score]
for key in df:
if key=="rbm_flip_detection" or key=="mean":
d = dict(linewidth=5, linestyle="--")
else:
d = dict(linewidth=2)
sns.distplot(df[key], ax=ax, kde_kws=d)
# sns.distplot(df["mean"], ax=ax)
ax.legend(df.keys())
ax.set_title(score)
plt.show() | [
"seaborn.distplot",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((141, 177), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(20, 20)'}), '(3, 1, figsize=(20, 20))\n', (153, 177), True, 'import matplotlib.pyplot as plt\n'), ((582, 592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (590, 592), True, 'import matplotlib.pyplot as plt\n'), ((81, 121), 'numpy.load', 'np.load', (['"""scores.npy"""'], {'allow_pickle': '(True)'}), "('scores.npy', allow_pickle=True)\n", (88, 121), True, 'import numpy as np\n'), ((450, 489), 'seaborn.distplot', 'sns.distplot', (['df[key]'], {'ax': 'ax', 'kde_kws': 'd'}), '(df[key], ax=ax, kde_kws=d)\n', (462, 489), True, 'import seaborn as sns\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 11 14:45:29 2019
@author: txuslopez
"""
'''
This Script is a RUN function which uses the cellular automation defined in 'biosystem.py' to classify data from the popular Iris Flower dataset. Error between predicted results is then calculated and compared to other models.
'''
#import os
#os.system("%matplotlib inline")
from skmultiflow.trees.hoeffding_tree import HoeffdingTree
from skmultiflow.lazy.knn import KNN
from copy import deepcopy
from skmultiflow.drift_detection.adwin import ADWIN
#from collections import deque
from skmultiflow.drift_detection.eddm import EDDM
from skmultiflow.drift_detection.ddm import DDM
from skmultiflow.bayes import NaiveBayes
from skmultiflow.drift_detection.page_hinkley import PageHinkley
#from sklearn import preprocessing
from timeit import default_timer as timer
from sklearn.model_selection import RandomizedSearchCV,GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
#from skmultiflow.data import DataStream
#from pylatex import Document, LongTable, MultiColumn
from CA_VonNeumann_estimator import CA_VonNeumann_Classifier
from sklearn import preprocessing
#import matplotlib.animation as animat; animat.writers.list()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
import warnings
import pickle
import psutil
import sys
import traceback
import logging
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams.update({'font.size': 12})
#mpl.rcParams['lines.linewidth'] = 2.0
#style.use('seaborn-dark') #sets the size of the charts
#style.use('ggplot')
#==============================================================================
# CLASSES
#==============================================================================
#==============================================================================
# FUNCTIONS
#==============================================================================
def empty_mutant(b):
invB = np.flip(b, axis=0)
empty = [0]
for b in invB:
build = deepcopy(empty)
empty = []
for i in range(0,b):
empty.append(build)
return np.array(empty).tolist()
def empties(b):
invB = np.flip(b, axis=0)
empty = []
for b in invB:
build = deepcopy(empty)
empty = []
for i in range(0,b):
empty.append(build)
return np.array(empty).tolist()
def plot_CA_boundaries_allCAs(cellular_aut,ca_names,punto,num_automs,buch_X,buch_y,X_columns,y_columns,mutant_cs,mutants_t,mutants_d):#mutants_w
images=[]
for ca in range(num_automs):
dim=cellular_aut[ca].dimensions
# Create image arrays
img = deepcopy(empties(dim))
# Set variables to model results
cells = cellular_aut[ca].cells
for i in range(0, len(cells)):
for j in range(0, len(cells)):
if cells[i][j]:
s = cells[i][j][0].species
if int(s)==0:
rgb = [255, 157, 137]#254,232,138-99,194,131
else:
rgb = [255, 82, 115]#196,121,0-99,100,139
img[i][j] = rgb
else:
img[i][j] = [255,255,255]
# Convert image arrays to appropriate data types
rotated_img= np.rot90(img, 1)
img = np.array(rotated_img, dtype='uint8')
images.append(img)
# Show the results
# fig, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(14, 7))
fig = plt.figure(figsize=(30, 15))
# ax1 = fig.add_subplot(1,5,1, aspect=1.0)
buch_pd_X=pd.DataFrame(buch_X)
buch_pd_X.columns=X_columns
buch_pd_y=pd.DataFrame(buch_y)
buch_pd_y.columns=[y_columns]
todo=pd.concat([buch_pd_X,buch_pd_y],axis=1)
X1=todo[todo[y_columns]==0]
X2=todo[todo[y_columns]==1]
# X3=todo[todo['class']==2]
# Data Subplot
ax1 = fig.add_subplot(1,5,1,aspect=0.8)
# ax1.set_xlim([0.0,1.0])
# ax1.set_ylim([0.0,1.0])
ax1.set_xlabel('$x_1$',fontsize=22)
ax1.set_ylabel('$x_2$',fontsize=22)
ax1.title.set_text('Learned instances')
ax1.scatter(X1.iloc[:,0], X1.iloc[:,1], color='#ff9d89', marker='.',edgecolors='k',linewidths=0.0, s=200)#FEE88A-#63c283
ax1.scatter(X2.iloc[:,0], X2.iloc[:,1], color='#ff5273', marker='.',edgecolors='k',linewidths=0.0, s=200)#C47900-#63648b
if num_automs==1:
ax2_t = fig.add_subplot(1,5,2)
elif num_automs==2:
ax2_t = fig.add_subplot(1,5,2)
ax3_t = fig.add_subplot(1,5,3)
if num_automs==1:
ax2_t.set_xticks([], [])
ax2_t.set_yticks([], [])
ax2_t.title.set_text('CURIE 2x10')
ax2_t.imshow(images[0])
flipped_mutants_t=np.flip(mutants_t[0],0)
rotated_mutant_t= np.rot90(flipped_mutants_t, 2)
elif num_automs==2:
ax2_t.set_xticks([], [])
ax2_t.set_yticks([], [])
ax2_t.title.set_text('CURIE 2x10')
ax2_t.imshow(images[0])
flipped_mutants_t=np.flip(mutants_t[0],0)
rotated_mutant_t= np.rot90(flipped_mutants_t, 2)
for i in range(0, len(rotated_mutant_t)):
for j in range(0, len(rotated_mutant_t)):
ax2_t.text(i,j,rotated_mutant_t[i][j][0],ha='center',va='center')
ax3_t.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
ax3_t.title.set_text(ca_names[1]+': t last mut')
ax3_t.imshow(images[1])
flipped_mutants_t=np.flip(mutants_t[1],0)
rotated_mutant_t= np.rot90(flipped_mutants_t, 2)
for i in range(0, len(rotated_mutant_t)):
for j in range(0, len(rotated_mutant_t)):
ax3_t.text(i,j,rotated_mutant_t[i][j][0],ha='center',va='center')
fig.tight_layout()
plt.savefig('current_image_'+str(punto)+'.svg')
plt.show()
def prequential_acc(predicted_class,Y_tst,PREQ_ACCS,t,f):
#Prequential accuracy
pred=0
if predicted_class==Y_tst:
pred=1
else:
pred=0
if t==0:
preqAcc=1
else:
preqAcc=(PREQ_ACCS[-1]+float((pred-PREQ_ACCS[-1])/(t-f+1)))
return preqAcc
#def cellular_automatas_naming(cellular_automatas):
#
# ca_names=[str()]*len(cellular_automatas)
# for ca in range(len(cellular_automatas)):
# ca_names[ca]=r'\texttt{sCA}$'
#
# return ca_names
#def automatas_Texttable(cellular_automatas,automatas_results_mean,automatas_results_std,bd,ad,drift_position,measure_position_after_drift,t_automatas,title,names):
#
# bd_automatas_mean=[[]]*len(cellular_automatas)
# bd_automatas_std=[[]]*len(cellular_automatas)
# for h in range(len(cellular_automatas)):
# bd_automatas_mean[h]=np.round((automatas_results_mean[h][bd]),3)
# bd_automatas_std[h]=np.round((automatas_results_std[h][bd]),3)
#
# d_automatas_mean=[[]]*len(cellular_automatas)
# d_automatas_std=[[]]*len(cellular_automatas)
# for h in range(len(cellular_automatas)):
# d_automatas_mean[h]=np.round((automatas_results_mean[h][measure_position_after_drift]),3)
# d_automatas_std[h]=np.round((automatas_results_std[h][measure_position_after_drift]),3)
#
# ad_automatas_mean=[[]]*len(cellular_automatas)
# ad_automatas_std=[[]]*len(cellular_automatas)
# for h in range(len(cellular_automatas)):
# ad_automatas_mean[h]=np.round((automatas_results_mean[h][ad]),3)
# ad_automatas_std[h]=np.round((automatas_results_std[h][ad]),3)
#
# for h in range(len(cellular_automatas)):
# t_automatas.add_rows([['AUTOMATAS_'+title, 'Accuracy BD','Accuracy D','Accuracy AD'],[str(names[h]),str(bd_automatas_mean[h])+str('+-')+str(bd_automatas_std[h]),str(d_automatas_mean[h])+str('+-')+str(d_automatas_std[h]),str(ad_automatas_mean[h])+str('+-')+str(ad_automatas_std[h])]])
#
# print (t_automatas.draw())
def plot_automatas(size_X,size_Y,color,font_size,title,ca_name,no_scores,drift_pos,mean_scores):
fig, axes = plt.subplots(1,1,figsize=(size_X,size_Y))
plt.title(title,size=font_size)
axes.set_xlabel(r't',size=font_size)
axes.set_ylabel(r'Prequential accuracy',size=font_size)
plt.ylim(0.0,1.0)
axes.set_xlim(0,len(mean_scores))
axes.plot(mean_scores,color='b',label=ca_name,linestyle='-')
axes.axvspan(0, no_scores, alpha=0.5, color='#C47900')
for ps in range(len(drift_pos)):
axes.axvline(x=drift_pos[ps],color='k', linestyle='-')
plt.show()
#def plot_learners(size_X,size_Y,color,font_size,title,learner_name,no_scores,drift_pos,mean_scores,stds_scores):
#
# fig, axes = plt.subplots(1,1,figsize=(size_X,size_Y))
#
# plt.title(title,size=font_size)
# axes.set_xlabel(r't',size=font_size)
# axes.set_ylabel(r'Prequential accuracy',size=font_size)
# plt.ylim(0.0,1.0)
# axes.set_xlim(0,len(mean_scores))
#
# axes.plot(mean_scores,color='b',label=learner_name,linestyle='-')
# axes.fill_between(range(len(mean_scores)), mean_scores-stds_scores, mean_scores+stds_scores,facecolor='#C47900', alpha=0.1)
#
# axes.axvspan(0, no_scores, alpha=0.5, color='#C47900')
#
# for ps in range(len(drift_pos)):
# axes.axvline(x=drift_pos[ps],color='k', linestyle='-')
#
# plt.show()
def get_neighbourhood(matrix, coordinates, distance):
dimensions = len(coordinates)
neigh = []
app = neigh.append
def recc_von_neumann(arr, curr_dim=0, remaining_distance=distance, isCenter=True):
#the breaking statement of the recursion
if curr_dim == dimensions:
if not isCenter:
app(arr)
return
dimensions_coordinate = coordinates[curr_dim]
if not (0 <= dimensions_coordinate < len(arr)):
return
dimesion_span = range(dimensions_coordinate - remaining_distance,
dimensions_coordinate + remaining_distance + 1)
for c in dimesion_span:
if 0 <= c < len(arr):
recc_von_neumann(arr[c],
curr_dim + 1,
remaining_distance - abs(dimensions_coordinate - c),
isCenter and dimensions_coordinate == c)
return
recc_von_neumann(matrix)
return neigh
#def prequential_mut_calc(m,alpha,t,prev_fading_sum,prev_fading_increment):
#
# f_sum=m+(alpha*prev_fading_sum)
# f_increment=1+(alpha*prev_fading_increment)
# preq_mut=f_sum/f_increment
#
# return preq_mut
def hyperparametertuning_classifiers(learn,X,y,knn_max_w_size):
cl_name=learn.__class__.__name__
# print (cl_name)
scor='balanced_accuracy'
cv=10
if cl_name=='KNN':
KNN_grid = {'n_neighbors': [3,5,7,10,15],
'leaf_size': [3,5,7,10,15],
'algorithm':['kd_tree']
}
grid_cv_KNN = GridSearchCV(estimator=KNeighborsClassifier(), cv=cv,scoring=scor,param_grid=KNN_grid)
# grid_cv_KNN = RandomizedSearchCV(estimator=KNeighborsClassifier(), cv=cv,scoring=scor,param_distributions=KNN_grid)
grid_cv_KNN.fit(X.as_matrix(),y.as_matrix().ravel())
# print('grid_cv_KNN.best_params_: ',grid_cv_KNN.best_params_)
n_neighbors=grid_cv_KNN.best_params_['n_neighbors']
leaf_size=grid_cv_KNN.best_params_['leaf_size']
tuned_params = {'n_neighbors': n_neighbors,'leaf_size': leaf_size,'max_window_size':knn_max_w_size}
tuned_learn=KNN()
tuned_learn.set_params(**tuned_params)
tuned_learn.fit(X.as_matrix(), y.as_matrix().ravel())
elif cl_name=='HoeffdingTree':
grace_period_range=np.array([25,75,150,300])
tie_threshold_range=np.linspace(0.001,1.0,5)
split_confidence_range=np.linspace(0.000000001,0.1,5)
split_criterion_range=['gini','info_gain', 'hellinger']
leaf_prediction_range=['mc','nb', 'nba']
HT_grid = {
'grace_period': grace_period_range,
'tie_threshold': tie_threshold_range,
'split_confidence': split_confidence_range,
'split_criterion':split_criterion_range,
'leaf_prediction':leaf_prediction_range
}
grid_cv_HT=GridSearchCV(estimator=learn,scoring=scor,cv=cv,param_grid=HT_grid)
# grid_cv_HT=RandomizedSearchCV(estimator=learn,scoring=scor,cv=cv,param_distributions=HT_grid)
grid_cv_HT.fit(X.as_matrix(), y.as_matrix().ravel())
# print('grid_cv_HT.best_params_: ',grid_cv_HT.best_params_)
tuned_params=grid_cv_HT.best_params_
tuned_learn=grid_cv_HT.best_estimator_
elif cl_name=='NaiveBayes':
tuned_params = {'nominal_attributes': None}
tuned_learn=NaiveBayes()
tuned_learn.set_params(**tuned_params)
tuned_learn.fit(X.as_matrix(), y.as_matrix().ravel())
# print('Final tuned algorithm: ',tuned_learn)
return tuned_learn,tuned_params
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
#def genenerate_LatexTable():
# geometry_options = {
# "margin": "2.54cm",
# "includeheadfoot": True
# }
# doc = Document(page_numbers=True, geometry_options=geometry_options)
#
#
# # Generate data table
# with doc.create(LongTable("l l l")) as data_table:
#
# data_table.add_hline()
# data_table.add_row(["header 1", "header 2", "header 3"])
# data_table.add_hline()
# data_table.end_table_header()
# data_table.add_hline()
# data_table.add_row((MultiColumn(3, align='r',data='Continued on Next Page'),))
# data_table.add_hline()
# data_table.end_table_footer()
# data_table.add_hline()
# data_table.add_row((MultiColumn(3, align='r',data='Not Continued on Next Page'),))
# data_table.add_hline()
# data_table.end_table_last_footer()
#
# row = ["Content1", "9", "Longer String"]
# for i in range(3):
# data_table.add_row(row)
#
# doc.generate_pdf('ejemplo', clean_tex=False)
#
# doc.generate_pdf('synteticos', clean_tex=False)
#==============================================================================
# DATASETS REALES
#==============================================================================
#==============================================================================
# DATASETS SINTETICOS
#==============================================================================
#TXUS
#name_data='txus'
datasets=['sine','rt','mixed','sea','stagger']#['noaa']#['gmsc']#['poker']
tipos=['abrupto','gradual']#['real']
#noise=0.0
#==============================================================================
# VARIABLES
#==============================================================================
#CA
bins_margin=0.001
mutation_period=10#5,10,20,50
num_mutantneighs_fordetection=2#2-synthetics,4-real
preparatory_size=50#50. Para real=500
sliding_window_size=preparatory_size#50
radius=2#2
#row_ref=2
#column_ref=3
#==============================================================================
# MAIN
#==============================================================================
# Ignore warnings
warnings.simplefilter("ignore")
path_saving_results='//home//txuslopez//Insync//<EMAIL>//Google Drive//Dropbox//jlopezlobo//Publicaciones//ECML_2021//Results//F2//'
DAT_SCORES=[]
DAT_TIMES=[]
DAT_RAMS=[]
DAT_DETECTIONS=[]
for dats in datasets:
TIPO_SCORES=[]
TIPO_TIMES=[]
TIPO_RAMS=[]
TIPO_DETECTIONS=[]
for tipo in tipos:
if dats=='sine':
functions_order=[3,2,1,0]
functions_name_file=[3,2,1,0]
columns=['X1','X2','class']
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)
n_bins=20#divisiones por feature
elif dats=='rt':
functions_order=[2563,7896,9856,8873]
functions_name_file=[2563,7896,9856,8873]
columns=['X1','X2','class']
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)
n_bins=20#divisiones por feature
elif dats=='mixed':
functions_order=[1,0,1,0]
functions_name_file=[1,0,1,0]
columns=['X1','X2','X3','X4','class']
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)
n_bins=10#divisiones por feature
elif dats=='sea':
functions_order=[3,2,1,0]
functions_name_file=[3,2,1,0]
columns=['X1','X2','X3','class']
noise=0.2#0.0,0.2
file_name=str(dats)+'_'+str(functions_name_file[0])+str(functions_name_file[1])+str(functions_name_file[2])+str(functions_name_file[3])+'_'+str(tipo)+'_noise_'+str(noise)
n_bins=10#divisiones por feature
elif dats=='stagger':
functions_order=[2,1,0,2]
functions_name_file=[2,1,0,2]
columns=['X1','X2','X3','class']
n_bins=10#divisiones por feature
# elif dats=='noaa':
# columns=['X1','X2','X3','X4','X5','X6','X7','X8','class']
# n_bins=3#3#divisiones por feature
# elif dats=='gmsc':
# columns=['X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','class']
# n_bins=3#3#divisiones por feature
# elif dats=='poker':
# columns=['X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','class']
# n_bins=3#3#divisiones por feature
if tipo=='gradual':
drift_positions=[9500,20000,30500]
anch=1000
elif tipo=='abrupto':
drift_positions=[10000,20000,30000]
lengt_concept=9500
anch=1
# if dats=='noaa':
# path='/home/txuslopez/Insync/<EMAIL>/Google Drive/Dropbox/jlopezlobo/PY/multiflow_txus/scikit-multiflow-master/src/skmultiflow/data/datasets/weather.csv'
# raw_data= pd.read_csv(path, sep=',',header=0)
#
# x = raw_data.values
# min_max_scaler = preprocessing.MinMaxScaler()
# x_scaled = min_max_scaler.fit_transform(x)
# raw_data = pd.DataFrame(x_scaled)
#
# elif dats=='gmsc':
# path='/home/txuslopez/Insync/<EMAIL>/Google Drive/Dropbox/jlopezlobo/Data sets/Non stationary environments/GMSC/cs-training_Amazon_def.csv'
# raw_data = pd.read_csv(path, sep=',', header=0)
#
# raw_data = raw_data.drop('Unnamed: 0', 1)#Quitamos la primera columna
# raw_data=raw_data.dropna(how='any')#Se quitan las filas con Nan
# raw_data=raw_data[0:20000]#Limitar datos a 20k samples
# raw_data.columns=['RevolvingUtilizationOfUnsecuredLines', 'age',
# 'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome',
# 'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate',
# 'NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse',
# 'NumberOfDependents', 'class']
#
#
# x = raw_data.values
# min_max_scaler = preprocessing.MinMaxScaler()
# x_scaled = min_max_scaler.fit_transform(x)
# raw_data = pd.DataFrame(x_scaled)
#
# elif dats=='poker':
# path='/home/txuslopez/Insync/<EMAIL>/Google Drive/Dropbox/jlopezlobo/Data sets/Non stationary environments/Poker_hand/norm.csv'
# raw_data = pd.read_csv(path, sep=',', header=None)
# raw_data=raw_data.iloc[np.random.permutation(len(raw_data))]
# raw_data=raw_data.iloc[:20000]
#
# else:
path='//home//txuslopez//Insync//<EMAIL>//Google Drive//Dropbox//jlopezlobo//Publicaciones//ECML_2021//Data//F2//'
raw_data= pd.read_csv(path +file_name+'.csv', sep=',',header=True)
#print(path +file_name+'.csv')
if dats=='sine' or dats=='rt':
caso=raw_data[raw_data.columns[0:3]]
XT=caso.iloc[:,0:2]
YT=caso.iloc[:,2]
elif dats=='mixed':
caso=raw_data[raw_data.columns[0:5]]
XT=caso.iloc[:,0:4]
YT=caso.iloc[:,4]
elif dats=='sea':
caso=raw_data[raw_data.columns[0:4]]
XT=caso.iloc[:,0:3]
YT=caso.iloc[:,3]
elif dats=='stagger':
caso=raw_data[raw_data.columns[0:4]]
XT=caso.iloc[:,0:3]
YT=caso.iloc[:,3]
elif dats=='noaa':
caso=raw_data[raw_data.columns[0:9]]
XT=caso.iloc[:,0:8]
YT=caso.iloc[:,8]
elif dats=='gmsc':
caso=raw_data[raw_data.columns[0:11]]
XT=caso.iloc[:,0:10]
YT=caso.iloc[:,10]
elif dats=='poker':
caso=raw_data[raw_data.columns[0:11]]
XT=caso.iloc[:,0:10]
YT=caso.iloc[:,10]
caso.columns=columns
columns=columns[:-1]#le quitamos el class a partir de ahora
n_feats=len(columns)
#Data
features=pd.DataFrame(XT)
labels=pd.DataFrame(YT)
features.columns=columns
labels.columns=['class']
n_samples=XT.shape[0]-preparatory_size
######################## CURIE ###################
lst_dim=[n_bins]*n_feats
curie=CA_VonNeumann_Classifier(bins=[],bins_margin=bins_margin,dimensions=lst_dim, cells=empties(lst_dim))
limits_automata=list(np.zeros(1))
#ca_names=['CURIE']
mutants_time=empty_mutant(curie.dimensions)
######################## LEARNERS ###################
learners_ref=[HoeffdingTree(),KNN(),NaiveBayes()]
######################## DETECTORS ###################
detectores_ref=[DDM(),EDDM(),ADWIN(),PageHinkley(),curie]
n_pasos=len(datasets)*len(tipos)*len(learners_ref)*len(detectores_ref)
SCORES_LER=[]
TIMES_LER=[]
RAMS_LER=[]
DETECTIONS_LER=[]
for ler in range(len(learners_ref)):
learner=deepcopy(learners_ref[ler])
SCORES_DET=[]
TIMES_DET=[]
RAMS_DET=[]
DETECTIONS_DET=[]
for det in range(len(detectores_ref)):
scores_ler=[]
time_ler=0
ram_ler=0
f_ler=1
detections=[]
detector=deepcopy(detectores_ref[det])
for s in range(features.shape[0]):
sample=np.array(features.iloc[s,:]).reshape(1, -1)
lab=np.array(labels.iloc[s,:])
if s<preparatory_size-1:
scores_ler.append(np.nan)
# time_ler.append(np.nan)
# ram_ler.append(np.nan)
elif s==preparatory_size:
# print ('PREPARATORY PROCESS ...')
X_init=features.iloc[0:preparatory_size,:]
y_init=labels.iloc[0:preparatory_size,:]
#Hyperparameter tuning for learners
tuned_learner,tuned_params=hyperparametertuning_classifiers(learner,X_init,y_init,sliding_window_size)
learner=deepcopy(tuned_learner)
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
learner.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
#CURIE
if detector.__class__.__name__=='CA_VonNeumann_Classifier':
detector,lim_automat=detector.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
scores_ler.append(np.nan)
time_ler+=process_time
ram_ler+=process_ram
elif s>preparatory_size:
# print ('TEST-THEN-TRAIN PROCESS ...')
#Testing
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
pred=learner.predict(sample)
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
time_ler+=process_time
ram_ler+=process_ram
#Scoring
if str(scores_ler[-1])=='nan':
if pred==lab:
scores_ler.append(1.0)
else:
scores_ler.append(0.0)
else:
preqAcc=prequential_acc(pred,lab,scores_ler,s,f_ler)
scores_ler.append(preqAcc)
#Training
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
learner.partial_fit(sample,lab)
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
time_ler+=process_time
ram_ler+=process_ram
############
#DETECTION
############
change=False
start_time = timer()#time.clock()
start_ram = psutil.virtual_memory().used#measured in bytes
if detector.__class__.__name__=='CA_VonNeumann_Classifier':
#Train
detector,lim_automat,muta,indxs=detector.partial_fit(sample,lab,s,lim_automat)
if muta:
if dats=='sine' or dats=='rt':
mutants_time[indxs[0]][indxs[1]][0]=s
elif dats=='mixed':
mutants_time[indxs[0]][indxs[1]][indxs[2]][indxs[3]][0]=s
elif dats=='sea' or dats=='stagger':
mutants_time[indxs[0]][indxs[1]][indxs[2]][0]=s
#Buscamos drift
vecinos_mutantes_drift=get_neighbourhood(mutants_time, indxs, radius)
num_mutantes_drift=0
ms=[]
for v in range(len(vecinos_mutantes_drift)):
if vecinos_mutantes_drift[v][0]>s-mutation_period and vecinos_mutantes_drift[v][0]<=s:
num_mutantes_drift+=1
ms.append(vecinos_mutantes_drift[v][0])
#Si hay drift:
if num_mutantes_drift>=num_mutantneighs_fordetection:
change=True
#Adaptacion
mutants_time=empty_mutant(detector.dimensions)
X_init=features.iloc[s-preparatory_size:s,:]
y_init=labels.iloc[s-preparatory_size:s,:]
detector=deepcopy(curie)
detector,lim_automat=detector.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
else:
if pred==lab:
detector.add_element(1)
else:
detector.add_element(0)
if detector.detected_change():
change=True
if change:
############
#ADAPTATION
############
f_ler=s
detections.append(s)
#Se reinicia el detector
detector=deepcopy(detectores_ref[det])
X_init=features.iloc[s-preparatory_size:s,:]
y_init=labels.iloc[s-preparatory_size:s,:]
learner=deepcopy(learners_ref[ler])
learner.set_params(**tuned_params)
learner.fit(X_init.as_matrix(), y_init.as_matrix().ravel())
process_time=timer()-start_time
process_ram=psutil.virtual_memory().used-start_ram
if process_ram<0:
process_ram=0
time_ler+=process_time
ram_ler+=process_ram
SCORES_DET.append(scores_ler)
TIMES_DET.append(time_ler)
RAMS_DET.append(ram_ler)
DETECTIONS_DET.append(detections)
SCORES_LER.append(SCORES_DET)
TIMES_LER.append(TIMES_DET)
RAMS_LER.append(RAMS_DET)
DETECTIONS_LER.append(DETECTIONS_DET)
TIPO_SCORES.append(SCORES_LER)
TIPO_TIMES.append(TIMES_LER)
TIPO_RAMS.append(RAMS_LER)
TIPO_DETECTIONS.append(DETECTIONS_LER)
DAT_SCORES.append(TIPO_SCORES)
DAT_TIMES.append(TIPO_TIMES)
DAT_RAMS.append(TIPO_RAMS)
DAT_DETECTIONS.append(TIPO_DETECTIONS)
######################## SAVING ########################
output = open(path_saving_results+'DAT_SCORES_'+dats+'.pkl', 'wb')
pickle.dump(DAT_SCORES, output)
output.close()
output = open(path_saving_results+'DAT_TIMES_'+dats+'.pkl', 'wb')
pickle.dump(DAT_TIMES, output)
output.close()
output = open(path_saving_results+'DAT_RAMS_'+dats+'.pkl', 'wb')
pickle.dump(DAT_RAMS, output)
output.close()
output = open(path_saving_results+'DAT_DETECTIONS_'+dats+'.pkl', 'wb')
pickle.dump(DAT_DETECTIONS, output)
output.close()
######################## RESUMEN ########################
for ds in range(len(datasets)):
print('######## DATASET: ',datasets[ds])
######################## LOADING RESULTS AND METRICS ########################
fil = open(path_saving_results+'DAT_SCORES_'+dats+'.pkl','rb')
DAT_SCORES = pickle.load(fil)
fil.close()
fil = open(path_saving_results+'DAT_TIMES_'+dats+'.pkl','rb')
DAT_TIMES = pickle.load(fil)
fil.close()
fil = open(path_saving_results+'DAT_RAMS_'+dats+'.pkl','rb')
DAT_RAMS = pickle.load(fil)
fil.close()
fil = open(path_saving_results+'DAT_DETECTIONS_'+dats+'.pkl','rb')
DAT_DETECTIONS = pickle.load(fil)
fil.close()
dat_score=DAT_SCORES[ds]
dat_time=DAT_TIMES[ds]
dat_ram=DAT_RAMS[ds]
dat_detections=DAT_DETECTIONS[ds]
for tip in range(len(tipos)):
print('###### TIPO: ',tipos[tip])
tipo_score=dat_score[tip]
tipo_times=dat_time[tip]
tipo_rams=dat_ram[tip]
tipo_detections=dat_detections[tip]
for l in range(len(learners_ref)):
print('#### LEARNER: ',learners_ref[l].__class__.__name__)
scores_ler=tipo_score[l]
times_ler=tipo_times[l]
rams_ler=tipo_rams[l]
detections_ler=tipo_detections[l]
for d in range(len(detectores_ref)):
print('## DETECTOR: ',detectores_ref[d].__class__.__name__)
scores_det=scores_ler[d]
times_det=times_ler[d]
rams_det=rams_ler[d]
detections_det=detections_ler[d]
print('')
print('-MEAN PREQ.ACC: ',np.nanmean(scores_det))
print('-TIME: ',np.nanmean(times_det))
print('-RAM: ',np.nanmean(rams_det))
print('-RAM-Hour: ',(np.nanmean(rams_det)/1073741824)*(np.nanmean(times_det)/360))#bytes/secs to gigas/hours)
print('-DETECTIONS: ',detections_det)
print('')
with open(path_saving_results+'temp.csv', mode='w') as results_synths:
results_synths_writer = csv.writer(results_synths, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
results_synths_writer.writerow(['Dataset','Type','Learner','Detector','pACC','RAM-Hours','TP','FP','TN','FN','UD','Precision','Recall','MCC'])
# try:
friedman_DDM_preqacc=[]
friedman_DDM_ramhours=[]
friedman_DDM_ud=[]
friedman_DDM_mcc=[]
friedman_EDDM_preqacc=[]
friedman_EDDM_ramhours=[]
friedman_EDDM_ud=[]
friedman_EDDM_mcc=[]
friedman_ADWIN_preqacc=[]
friedman_ADWIN_ramhours=[]
friedman_ADWIN_ud=[]
friedman_ADWIN_mcc=[]
friedman_PH_preqacc=[]
friedman_PH_ramhours=[]
friedman_PH_ud=[]
friedman_PH_mcc=[]
friedman_CURIE_preqacc=[]
friedman_CURIE_ramhours=[]
friedman_CURIE_ud=[]
friedman_CURIE_mcc=[]
for ds in range(len(datasets)):
dat_score=DAT_SCORES[ds]
dat_time=DAT_TIMES[ds]
dat_ram=DAT_RAMS[ds]
dat_detections=DAT_DETECTIONS[ds]
for tip in range(len(tipos)):
tipo_score=dat_score[tip]
tipo_times=dat_time[tip]
tipo_rams=dat_ram[tip]
tipo_detections=dat_detections[tip]
for l in range(len(learners_ref)):
scores_ler=tipo_score[l]
times_ler=tipo_times[l]
rams_ler=tipo_rams[l]
detections_ler=tipo_detections[l]
for det in range(len(detectores_ref)):
scores_det=scores_ler[det]
times_det=times_ler[det]
rams_det=rams_ler[det]
detections_det=detections_ler[det]
if tipos[tip]=='abrupto' or tipos[tip]=='gradual':
if tipos[tip]=='abrupto':
detection_margin=0.02
elif tipos[tip]=='gradual':
detection_margin=0.1
lear_tp=0
lear_fp=0
lear_tn=0
lear_fn=0
lear_mcc=0
lear_udd=0
cont_udd=0
for d in detections_det:
#Checking BEFORE drift 1
if d<drift_positions[0]:
lear_fp+=1
#Checking drift 1
elif d>drift_positions[0] and d<drift_positions[1] and d-drift_positions[0]<=detection_margin*lengt_concept:
lear_tp+=1
lear_udd+=(d-drift_positions[0])
cont_udd+=1
elif d>drift_positions[0] and d<drift_positions[1] and d-drift_positions[0]>detection_margin*lengt_concept:
lear_fp+=1
#Checking drift 2
elif d>drift_positions[1] and d<drift_positions[2] and d-drift_positions[1]<=detection_margin*lengt_concept:
lear_tp+=1
lear_udd+=(d-drift_positions[1])
cont_udd+=1
elif d>drift_positions[1] and d<drift_positions[2] and d-drift_positions[1]>detection_margin*lengt_concept:
lear_fp+=1
#Checking drift 3
elif d>drift_positions[2] and d-drift_positions[2]<=detection_margin*lengt_concept:
lear_tp+=1
lear_udd+=(d-drift_positions[2])
cont_udd+=1
elif d>drift_positions[2] and d-drift_positions[2]>detection_margin*lengt_concept:
lear_fp+=1
lear_tn=n_samples-len(detections_det)
lear_fn=len(drift_positions)-lear_tp
if lear_fn<0:
lear_fn=0
if cont_udd>0:
lear_udd=np.round(lear_udd/cont_udd,2)
else:
lear_udd=np.inf
if (lear_tp+lear_fp)==0:
lear_precision=0.0
else:
lear_precision=lear_tp/(lear_tp+lear_fp)
if (lear_tp+lear_fn)==0:
lear_recall=0.0
else:
lear_recall=lear_tp/(lear_tp+lear_fn)
if np.sqrt((lear_tp+lear_fp)*(lear_tp+lear_fn)*(lear_tn+lear_fp)*(lear_tn+lear_fn))==0:
lear_mcc=0.0
else:
lear_mcc=((lear_tp*lear_tn)-(lear_fp*lear_fn))/np.sqrt((lear_tp+lear_fp)*(lear_tp+lear_fn)*(lear_tn+lear_fp)*(lear_tn+lear_fn))
lear_ram_hours=(rams_det/1073741824)*(times_det/360)#bytes/secs to gigas/hours
results_synths_writer.writerow([datasets[ds],tipos[tip],learners_ref[l].__class__.__name__,detectores_ref[det].__class__.__name__,np.round(np.nanmean(scores_det),2),np.round(lear_ram_hours,6),lear_tp,lear_fp,lear_tn,lear_fn,np.round(lear_udd,2),np.round(lear_precision,2),np.round(lear_recall,2),np.round(lear_mcc,2)])
###### FRIEDMAN
if detectores_ref[det].__class__.__name__=='DDM':
friedman_DDM_preqacc.append(np.round(np.nanmean(scores_det),2))
friedman_DDM_ramhours.append(np.round(lear_ram_hours,6))
friedman_DDM_ud.append(np.round(lear_udd,2))
friedman_DDM_mcc.append(np.round(lear_mcc,2))
elif detectores_ref[det].__class__.__name__=='EDDM':
friedman_EDDM_preqacc.append(np.round(np.nanmean(scores_det),2))
friedman_EDDM_ramhours.append(np.round(lear_ram_hours,6))
friedman_EDDM_ud.append(np.round(lear_udd,2))
friedman_EDDM_mcc.append(np.round(lear_mcc,2))
elif detectores_ref[det].__class__.__name__=='ADWIN':
friedman_ADWIN_preqacc.append(np.round(np.nanmean(scores_det),2))
friedman_ADWIN_ramhours.append(np.round(lear_ram_hours,6))
friedman_ADWIN_ud.append(np.round(lear_udd,2))
friedman_ADWIN_mcc.append(np.round(lear_mcc,2))
elif detectores_ref[det].__class__.__name__=='PageHinkley':
friedman_PH_preqacc.append(np.round(np.nanmean(scores_det),2))
friedman_PH_ramhours.append(np.round(lear_ram_hours,6))
friedman_PH_ud.append(np.round(lear_udd,2))
friedman_PH_mcc.append(np.round(lear_mcc,2))
elif detectores_ref[det].__class__.__name__=='CA_VonNeumann_Classifier':
friedman_CURIE_preqacc.append(np.round(np.nanmean(scores_det),2))
friedman_CURIE_ramhours.append(np.round(lear_ram_hours,6))
friedman_CURIE_ud.append(np.round(lear_udd,2))
friedman_CURIE_mcc.append(np.round(lear_mcc,2))
elif tipos[tip]=='real':
lear_ram_hours=(rams_det/1073741824)*(times_det/360)#bytes/secs to gigas/hours
results_synths_writer.writerow([datasets[ds],tipos[tip],learners_ref[l].__class__.__name__,detectores_ref[det].__class__.__name__,np.round(np.nanmean(scores_det),2),np.round(lear_ram_hours,6),np.inf,np.inf,np.inf,np.inf,np.inf,np.inf,np.inf,np.inf])
# except Exception as e:
# print('En ',ds,'_',tip,'_',l,'_',det)
# print (e.__doc__)
# print (e.message)
######################## FRIEDMAN TESTS ########################
from scipy.stats import friedmanchisquare
alpha = 0.05
#FOR PREQ.ACC
stat_preqacc,p_preqacc = friedmanchisquare(friedman_DDM_preqacc,friedman_EDDM_preqacc,friedman_ADWIN_preqacc,friedman_PH_preqacc,friedman_CURIE_preqacc)
print('---- PREQ. ACC ----')
print('Statistics=%.3f, p=%.3f' % (stat_preqacc, p_preqacc))
if p_preqacc > alpha:
print('Same distributions (fail to reject H0)')
else:
print('Different distributions (reject H0)')
print('')
#FOR RAM-HOURS
stat_ramhours,p_ramhours = friedmanchisquare(friedman_DDM_ramhours,friedman_EDDM_ramhours,friedman_ADWIN_ramhours,friedman_PH_ramhours,friedman_CURIE_ramhours)
print('---- RAM HOURS ----')
print('Statistics=%.3f, p=%.3f' % (stat_ramhours, p_ramhours))
if p_ramhours > alpha:
print('Same distributions (fail to reject H0)')
else:
print('Different distributions (reject H0)')
print('')
#FOR UD
stat_ud,p_ud = friedmanchisquare(friedman_DDM_ud,friedman_EDDM_ud,friedman_ADWIN_ud,friedman_PH_ud,friedman_CURIE_ud)
print('---- UD ----')
print('Statistics=%.3f, p=%.3f' % (stat_ud, p_ud))
if p_ud > alpha:
print('Same distributions (fail to reject H0)')
else:
print('Different distributions (reject H0)')
print('')
#FOR UD
stat_mcc,p_mcc= friedmanchisquare(friedman_DDM_mcc,friedman_EDDM_mcc,friedman_ADWIN_mcc,friedman_PH_mcc,friedman_CURIE_mcc)
print('---- MCC ----')
print('Statistics=%.3f, p=%.3f' % (stat_mcc, p_mcc))
if p_mcc > alpha:
print('Same distributions (fail to reject H0)')
else:
print('Different distributions (reject H0)')
print('')
######################## NEMENYI TESTS AND GRAPHICS ########################
import Orange
names = ["DDM", "EDDM", "ADWIN", "PH","CURIE" ]
print('---------PREQ.ACC:')
avranks_preqacc = [2.72,4.00,2.18,3.24,2.81]#Cogido del excel res.xlsx
cd_preqacc = Orange.evaluation.compute_CD(avranks_preqacc, 20) #tested on 20 datasets
Orange.evaluation.graph_ranks(avranks_preqacc, names, cd=cd_preqacc, width=6, textspace=1.5)
plt.show()
print('CD=',cd_preqacc)
print('')
print('---------RAM-HOURS:')
avranks_ramhours = [3.31,2.56,3.00,2.32,3.82]#Cogido del excel res.xlsx
cd_ramhours = Orange.evaluation.compute_CD(avranks_ramhours, 20) #tested on 20 datasets
Orange.evaluation.graph_ranks(avranks_ramhours, names, cd=cd_ramhours, width=6, textspace=1.5)
plt.show()
print('CD=',cd_ramhours)
print('')
print('---------UD:')
avranks_ud = [3.81,2.85,2.40,3.54,1.90]#Cogido del excel res.xlsx
cd_ud = Orange.evaluation.compute_CD(avranks_ud, 20) #tested on 20 datasets
Orange.evaluation.graph_ranks(avranks_ud, names, cd=cd_ud, width=6, textspace=1.5)
plt.show()
print('CD=',cd_ud)
print('')
print('---------MCC:')
avranks_mcc = [3.93,3.22,2.53,3.56,1.76]#Cogido del excel res.xlsx
cd_mcc = Orange.evaluation.compute_CD(avranks_mcc, 20) #tested on 20 datasets
Orange.evaluation.graph_ranks(avranks_mcc, names, cd=cd_mcc, width=6, textspace=1.5)
print('CD=',cd_mcc)
plt.show()
| [
"sklearn.model_selection.GridSearchCV",
"numpy.sqrt",
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"psutil.virtual_memory",
"scipy.stats.friedmanchisquare",
"numpy.array",
"numpy.nanmean",
"numpy.rot90",
"copy.deepcopy",
"skmultiflow.drift_detection.page_hinkley.PageHinkley",
"... | [((1422, 1449), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1428, 1449), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1480), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (1456, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1519), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (1500, 1519), True, 'import matplotlib.pyplot as plt\n'), ((16350, 16381), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (16371, 16381), False, 'import warnings\n'), ((45703, 45838), 'scipy.stats.friedmanchisquare', 'friedmanchisquare', (['friedman_DDM_preqacc', 'friedman_EDDM_preqacc', 'friedman_ADWIN_preqacc', 'friedman_PH_preqacc', 'friedman_CURIE_preqacc'], {}), '(friedman_DDM_preqacc, friedman_EDDM_preqacc,\n friedman_ADWIN_preqacc, friedman_PH_preqacc, friedman_CURIE_preqacc)\n', (45720, 45838), False, 'from scipy.stats import friedmanchisquare\n'), ((46097, 46237), 'scipy.stats.friedmanchisquare', 'friedmanchisquare', (['friedman_DDM_ramhours', 'friedman_EDDM_ramhours', 'friedman_ADWIN_ramhours', 'friedman_PH_ramhours', 'friedman_CURIE_ramhours'], {}), '(friedman_DDM_ramhours, friedman_EDDM_ramhours,\n friedman_ADWIN_ramhours, friedman_PH_ramhours, friedman_CURIE_ramhours)\n', (46114, 46237), False, 'from scipy.stats import friedmanchisquare\n'), ((46480, 46590), 'scipy.stats.friedmanchisquare', 'friedmanchisquare', (['friedman_DDM_ud', 'friedman_EDDM_ud', 'friedman_ADWIN_ud', 'friedman_PH_ud', 'friedman_CURIE_ud'], {}), '(friedman_DDM_ud, friedman_EDDM_ud, friedman_ADWIN_ud,\n friedman_PH_ud, friedman_CURIE_ud)\n', (46497, 46590), False, 'from scipy.stats import friedmanchisquare\n'), ((46809, 46924), 'scipy.stats.friedmanchisquare', 'friedmanchisquare', (['friedman_DDM_mcc', 'friedman_EDDM_mcc', 'friedman_ADWIN_mcc', 'friedman_PH_mcc', 'friedman_CURIE_mcc'], {}), '(friedman_DDM_mcc, friedman_EDDM_mcc, friedman_ADWIN_mcc,\n friedman_PH_mcc, friedman_CURIE_mcc)\n', (46826, 46924), False, 'from scipy.stats import friedmanchisquare\n'), ((47383, 47432), 'Orange.evaluation.compute_CD', 'Orange.evaluation.compute_CD', (['avranks_preqacc', '(20)'], {}), '(avranks_preqacc, 20)\n', (47411, 47432), False, 'import Orange\n'), ((47456, 47553), 'Orange.evaluation.graph_ranks', 'Orange.evaluation.graph_ranks', (['avranks_preqacc', 'names'], {'cd': 'cd_preqacc', 'width': '(6)', 'textspace': '(1.5)'}), '(avranks_preqacc, names, cd=cd_preqacc, width=\n 6, textspace=1.5)\n', (47485, 47553), False, 'import Orange\n'), ((47549, 47559), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (47557, 47559), True, 'import matplotlib.pyplot as plt\n'), ((47711, 47761), 'Orange.evaluation.compute_CD', 'Orange.evaluation.compute_CD', (['avranks_ramhours', '(20)'], {}), '(avranks_ramhours, 20)\n', (47739, 47761), False, 'import Orange\n'), ((47785, 47883), 'Orange.evaluation.graph_ranks', 'Orange.evaluation.graph_ranks', (['avranks_ramhours', 'names'], {'cd': 'cd_ramhours', 'width': '(6)', 'textspace': '(1.5)'}), '(avranks_ramhours, names, cd=cd_ramhours,\n width=6, textspace=1.5)\n', (47814, 47883), False, 'import Orange\n'), ((47880, 47890), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (47888, 47890), True, 'import matplotlib.pyplot as plt\n'), ((48024, 48068), 'Orange.evaluation.compute_CD', 'Orange.evaluation.compute_CD', (['avranks_ud', '(20)'], {}), '(avranks_ud, 20)\n', (48052, 48068), False, 'import Orange\n'), ((48092, 48178), 'Orange.evaluation.graph_ranks', 'Orange.evaluation.graph_ranks', (['avranks_ud', 'names'], {'cd': 'cd_ud', 'width': '(6)', 'textspace': '(1.5)'}), '(avranks_ud, names, cd=cd_ud, width=6,\n textspace=1.5)\n', (48121, 48178), False, 'import Orange\n'), ((48175, 48185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (48183, 48185), True, 'import matplotlib.pyplot as plt\n'), ((48316, 48361), 'Orange.evaluation.compute_CD', 'Orange.evaluation.compute_CD', (['avranks_mcc', '(20)'], {}), '(avranks_mcc, 20)\n', (48344, 48361), False, 'import Orange\n'), ((48385, 48473), 'Orange.evaluation.graph_ranks', 'Orange.evaluation.graph_ranks', (['avranks_mcc', 'names'], {'cd': 'cd_mcc', 'width': '(6)', 'textspace': '(1.5)'}), '(avranks_mcc, names, cd=cd_mcc, width=6,\n textspace=1.5)\n', (48414, 48473), False, 'import Orange\n'), ((48490, 48500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (48498, 48500), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2040), 'numpy.flip', 'np.flip', (['b'], {'axis': '(0)'}), '(b, axis=0)\n', (2029, 2040), True, 'import numpy as np\n'), ((2261, 2279), 'numpy.flip', 'np.flip', (['b'], {'axis': '(0)'}), '(b, axis=0)\n', (2268, 2279), True, 'import numpy as np\n'), ((3750, 3778), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 15)'}), '(figsize=(30, 15))\n', (3760, 3778), True, 'import matplotlib.pyplot as plt\n'), ((3844, 3864), 'pandas.DataFrame', 'pd.DataFrame', (['buch_X'], {}), '(buch_X)\n', (3856, 3864), True, 'import pandas as pd\n'), ((3911, 3931), 'pandas.DataFrame', 'pd.DataFrame', (['buch_y'], {}), '(buch_y)\n', (3923, 3931), True, 'import pandas as pd\n'), ((3980, 4021), 'pandas.concat', 'pd.concat', (['[buch_pd_X, buch_pd_y]'], {'axis': '(1)'}), '([buch_pd_X, buch_pd_y], axis=1)\n', (3989, 4021), True, 'import pandas as pd\n'), ((6276, 6286), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6284, 6286), True, 'import matplotlib.pyplot as plt\n'), ((8428, 8472), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(size_X, size_Y)'}), '(1, 1, figsize=(size_X, size_Y))\n', (8440, 8472), True, 'import matplotlib.pyplot as plt\n'), ((8479, 8511), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': 'font_size'}), '(title, size=font_size)\n', (8488, 8511), True, 'import matplotlib.pyplot as plt\n'), ((8616, 8634), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (8624, 8634), True, 'import matplotlib.pyplot as plt\n'), ((8965, 8975), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8973, 8975), True, 'import matplotlib.pyplot as plt\n'), ((33898, 33929), 'pickle.dump', 'pickle.dump', (['DAT_SCORES', 'output'], {}), '(DAT_SCORES, output)\n', (33909, 33929), False, 'import pickle\n'), ((34028, 34058), 'pickle.dump', 'pickle.dump', (['DAT_TIMES', 'output'], {}), '(DAT_TIMES, output)\n', (34039, 34058), False, 'import pickle\n'), ((34156, 34185), 'pickle.dump', 'pickle.dump', (['DAT_RAMS', 'output'], {}), '(DAT_RAMS, output)\n', (34167, 34185), False, 'import pickle\n'), ((34289, 34324), 'pickle.dump', 'pickle.dump', (['DAT_DETECTIONS', 'output'], {}), '(DAT_DETECTIONS, output)\n', (34300, 34324), False, 'import pickle\n'), ((34766, 34782), 'pickle.load', 'pickle.load', (['fil'], {}), '(fil)\n', (34777, 34782), False, 'import pickle\n'), ((34886, 34902), 'pickle.load', 'pickle.load', (['fil'], {}), '(fil)\n', (34897, 34902), False, 'import pickle\n'), ((35004, 35020), 'pickle.load', 'pickle.load', (['fil'], {}), '(fil)\n', (35015, 35020), False, 'import pickle\n'), ((35134, 35150), 'pickle.load', 'pickle.load', (['fil'], {}), '(fil)\n', (35145, 35150), False, 'import pickle\n'), ((36884, 36972), 'csv.writer', 'csv.writer', (['results_synths'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(results_synths, delimiter=\',\', quotechar=\'"\', quoting=csv.\n QUOTE_MINIMAL)\n', (36894, 36972), False, 'import csv\n'), ((2092, 2107), 'copy.deepcopy', 'deepcopy', (['empty'], {}), '(empty)\n', (2100, 2107), False, 'from copy import deepcopy\n'), ((2330, 2345), 'copy.deepcopy', 'deepcopy', (['empty'], {}), '(empty)\n', (2338, 2345), False, 'from copy import deepcopy\n'), ((3545, 3561), 'numpy.rot90', 'np.rot90', (['img', '(1)'], {}), '(img, 1)\n', (3553, 3561), True, 'import numpy as np\n'), ((3576, 3612), 'numpy.array', 'np.array', (['rotated_img'], {'dtype': '"""uint8"""'}), "(rotated_img, dtype='uint8')\n", (3584, 3612), True, 'import numpy as np\n'), ((5025, 5049), 'numpy.flip', 'np.flip', (['mutants_t[0]', '(0)'], {}), '(mutants_t[0], 0)\n', (5032, 5049), True, 'import numpy as np\n'), ((5075, 5105), 'numpy.rot90', 'np.rot90', (['flipped_mutants_t', '(2)'], {}), '(flipped_mutants_t, 2)\n', (5083, 5105), True, 'import numpy as np\n'), ((12152, 12157), 'skmultiflow.lazy.knn.KNN', 'KNN', ([], {}), '()\n', (12155, 12157), False, 'from skmultiflow.lazy.knn import KNN\n'), ((21303, 21363), 'pandas.read_csv', 'pd.read_csv', (["(path + file_name + '.csv')"], {'sep': '""","""', 'header': '(True)'}), "(path + file_name + '.csv', sep=',', header=True)\n", (21314, 21363), True, 'import pandas as pd\n'), ((22668, 22684), 'pandas.DataFrame', 'pd.DataFrame', (['XT'], {}), '(XT)\n', (22680, 22684), True, 'import pandas as pd\n'), ((22700, 22716), 'pandas.DataFrame', 'pd.DataFrame', (['YT'], {}), '(YT)\n', (22712, 22716), True, 'import pandas as pd\n'), ((2200, 2215), 'numpy.array', 'np.array', (['empty'], {}), '(empty)\n', (2208, 2215), True, 'import numpy as np\n'), ((2438, 2453), 'numpy.array', 'np.array', (['empty'], {}), '(empty)\n', (2446, 2453), True, 'import numpy as np\n'), ((5308, 5332), 'numpy.flip', 'np.flip', (['mutants_t[0]', '(0)'], {}), '(mutants_t[0], 0)\n', (5315, 5332), True, 'import numpy as np\n'), ((5358, 5388), 'numpy.rot90', 'np.rot90', (['flipped_mutants_t', '(2)'], {}), '(flipped_mutants_t, 2)\n', (5366, 5388), True, 'import numpy as np\n'), ((5877, 5901), 'numpy.flip', 'np.flip', (['mutants_t[1]', '(0)'], {}), '(mutants_t[1], 0)\n', (5884, 5901), True, 'import numpy as np\n'), ((5927, 5957), 'numpy.rot90', 'np.rot90', (['flipped_mutants_t', '(2)'], {}), '(flipped_mutants_t, 2)\n', (5935, 5957), True, 'import numpy as np\n'), ((12359, 12387), 'numpy.array', 'np.array', (['[25, 75, 150, 300]'], {}), '([25, 75, 150, 300])\n', (12367, 12387), True, 'import numpy as np\n'), ((12413, 12439), 'numpy.linspace', 'np.linspace', (['(0.001)', '(1.0)', '(5)'], {}), '(0.001, 1.0, 5)\n', (12424, 12439), True, 'import numpy as np\n'), ((12469, 12495), 'numpy.linspace', 'np.linspace', (['(1e-09)', '(0.1)', '(5)'], {}), '(1e-09, 0.1, 5)\n', (12480, 12495), True, 'import numpy as np\n'), ((12946, 13016), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'learn', 'scoring': 'scor', 'cv': 'cv', 'param_grid': 'HT_grid'}), '(estimator=learn, scoring=scor, cv=cv, param_grid=HT_grid)\n', (12958, 13016), False, 'from sklearn.model_selection import RandomizedSearchCV, GridSearchCV\n'), ((23104, 23115), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (23112, 23115), True, 'import numpy as np\n'), ((23290, 23305), 'skmultiflow.trees.hoeffding_tree.HoeffdingTree', 'HoeffdingTree', ([], {}), '()\n', (23303, 23305), False, 'from skmultiflow.trees.hoeffding_tree import HoeffdingTree\n'), ((23306, 23311), 'skmultiflow.lazy.knn.KNN', 'KNN', ([], {}), '()\n', (23309, 23311), False, 'from skmultiflow.lazy.knn import KNN\n'), ((23312, 23324), 'skmultiflow.bayes.NaiveBayes', 'NaiveBayes', ([], {}), '()\n', (23322, 23324), False, 'from skmultiflow.bayes import NaiveBayes\n'), ((23413, 23418), 'skmultiflow.drift_detection.ddm.DDM', 'DDM', ([], {}), '()\n', (23416, 23418), False, 'from skmultiflow.drift_detection.ddm import DDM\n'), ((23419, 23425), 'skmultiflow.drift_detection.eddm.EDDM', 'EDDM', ([], {}), '()\n', (23423, 23425), False, 'from skmultiflow.drift_detection.eddm import EDDM\n'), ((23426, 23433), 'skmultiflow.drift_detection.adwin.ADWIN', 'ADWIN', ([], {}), '()\n', (23431, 23433), False, 'from skmultiflow.drift_detection.adwin import ADWIN\n'), ((23434, 23447), 'skmultiflow.drift_detection.page_hinkley.PageHinkley', 'PageHinkley', ([], {}), '()\n', (23445, 23447), False, 'from skmultiflow.drift_detection.page_hinkley import PageHinkley\n'), ((23728, 23755), 'copy.deepcopy', 'deepcopy', (['learners_ref[ler]'], {}), '(learners_ref[ler])\n', (23736, 23755), False, 'from copy import deepcopy\n'), ((11553, 11575), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (11573, 11575), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((13470, 13482), 'skmultiflow.bayes.NaiveBayes', 'NaiveBayes', ([], {}), '()\n', (13480, 13482), False, 'from skmultiflow.bayes import NaiveBayes\n'), ((24117, 24146), 'copy.deepcopy', 'deepcopy', (['detectores_ref[det]'], {}), '(detectores_ref[det])\n', (24125, 24146), False, 'from copy import deepcopy\n'), ((24359, 24386), 'numpy.array', 'np.array', (['labels.iloc[s, :]'], {}), '(labels.iloc[s, :])\n', (24367, 24386), True, 'import numpy as np\n'), ((36377, 36399), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (36387, 36399), True, 'import numpy as np\n'), ((36433, 36454), 'numpy.nanmean', 'np.nanmean', (['times_det'], {}), '(times_det)\n', (36443, 36454), True, 'import numpy as np\n'), ((36487, 36507), 'numpy.nanmean', 'np.nanmean', (['rams_det'], {}), '(rams_det)\n', (36497, 36507), True, 'import numpy as np\n'), ((24291, 24320), 'numpy.array', 'np.array', (['features.iloc[s, :]'], {}), '(features.iloc[s, :])\n', (24299, 24320), True, 'import numpy as np\n'), ((25172, 25195), 'copy.deepcopy', 'deepcopy', (['tuned_learner'], {}), '(tuned_learner)\n', (25180, 25195), False, 'from copy import deepcopy\n'), ((25274, 25281), 'timeit.default_timer', 'timer', ([], {}), '()\n', (25279, 25281), True, 'from timeit import default_timer as timer\n'), ((36546, 36566), 'numpy.nanmean', 'np.nanmean', (['rams_det'], {}), '(rams_det)\n', (36556, 36566), True, 'import numpy as np\n'), ((36580, 36601), 'numpy.nanmean', 'np.nanmean', (['times_det'], {}), '(times_det)\n', (36590, 36601), True, 'import numpy as np\n'), ((41247, 41279), 'numpy.round', 'np.round', (['(lear_udd / cont_udd)', '(2)'], {}), '(lear_udd / cont_udd, 2)\n', (41255, 41279), True, 'import numpy as np\n'), ((41938, 42037), 'numpy.sqrt', 'np.sqrt', (['((lear_tp + lear_fp) * (lear_tp + lear_fn) * (lear_tn + lear_fp) * (lear_tn +\n lear_fn))'], {}), '((lear_tp + lear_fp) * (lear_tp + lear_fn) * (lear_tn + lear_fp) * (\n lear_tn + lear_fn))\n', (41945, 42037), True, 'import numpy as np\n'), ((25363, 25386), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (25384, 25386), False, 'import psutil\n'), ((25876, 25883), 'timeit.default_timer', 'timer', ([], {}), '()\n', (25881, 25883), True, 'from timeit import default_timer as timer\n'), ((26535, 26542), 'timeit.default_timer', 'timer', ([], {}), '()\n', (26540, 26542), True, 'from timeit import default_timer as timer\n'), ((27670, 27677), 'timeit.default_timer', 'timer', ([], {}), '()\n', (27675, 27677), True, 'from timeit import default_timer as timer\n'), ((28388, 28395), 'timeit.default_timer', 'timer', ([], {}), '()\n', (28393, 28395), True, 'from timeit import default_timer as timer\n'), ((42202, 42301), 'numpy.sqrt', 'np.sqrt', (['((lear_tp + lear_fp) * (lear_tp + lear_fn) * (lear_tn + lear_fp) * (lear_tn +\n lear_fn))'], {}), '((lear_tp + lear_fp) * (lear_tp + lear_fn) * (lear_tn + lear_fp) * (\n lear_tn + lear_fn))\n', (42209, 42301), True, 'import numpy as np\n'), ((42608, 42635), 'numpy.round', 'np.round', (['lear_ram_hours', '(6)'], {}), '(lear_ram_hours, 6)\n', (42616, 42635), True, 'import numpy as np\n'), ((42667, 42688), 'numpy.round', 'np.round', (['lear_udd', '(2)'], {}), '(lear_udd, 2)\n', (42675, 42688), True, 'import numpy as np\n'), ((42688, 42715), 'numpy.round', 'np.round', (['lear_precision', '(2)'], {}), '(lear_precision, 2)\n', (42696, 42715), True, 'import numpy as np\n'), ((42715, 42739), 'numpy.round', 'np.round', (['lear_recall', '(2)'], {}), '(lear_recall, 2)\n', (42723, 42739), True, 'import numpy as np\n'), ((42739, 42760), 'numpy.round', 'np.round', (['lear_mcc', '(2)'], {}), '(lear_mcc, 2)\n', (42747, 42760), True, 'import numpy as np\n'), ((43050, 43077), 'numpy.round', 'np.round', (['lear_ram_hours', '(6)'], {}), '(lear_ram_hours, 6)\n', (43058, 43077), True, 'import numpy as np\n'), ((43129, 43150), 'numpy.round', 'np.round', (['lear_udd', '(2)'], {}), '(lear_udd, 2)\n', (43137, 43150), True, 'import numpy as np\n'), ((43203, 43224), 'numpy.round', 'np.round', (['lear_mcc', '(2)'], {}), '(lear_mcc, 2)\n', (43211, 43224), True, 'import numpy as np\n'), ((25931, 25954), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (25952, 25954), False, 'import psutil\n'), ((26624, 26647), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (26645, 26647), False, 'import psutil\n'), ((26791, 26798), 'timeit.default_timer', 'timer', ([], {}), '()\n', (26796, 26798), True, 'from timeit import default_timer as timer\n'), ((27759, 27782), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (27780, 27782), False, 'import psutil\n'), ((27901, 27908), 'timeit.default_timer', 'timer', ([], {}), '()\n', (27906, 27908), True, 'from timeit import default_timer as timer\n'), ((28477, 28500), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (28498, 28500), False, 'import psutil\n'), ((32273, 32302), 'copy.deepcopy', 'deepcopy', (['detectores_ref[det]'], {}), '(detectores_ref[det])\n', (32281, 32302), False, 'from copy import deepcopy\n'), ((32497, 32524), 'copy.deepcopy', 'deepcopy', (['learners_ref[ler]'], {}), '(learners_ref[ler])\n', (32505, 32524), False, 'from copy import deepcopy\n'), ((32770, 32777), 'timeit.default_timer', 'timer', ([], {}), '()\n', (32775, 32777), True, 'from timeit import default_timer as timer\n'), ((42582, 42604), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (42592, 42604), True, 'import numpy as np\n'), ((42966, 42988), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (42976, 42988), True, 'import numpy as np\n'), ((43453, 43480), 'numpy.round', 'np.round', (['lear_ram_hours', '(6)'], {}), '(lear_ram_hours, 6)\n', (43461, 43480), True, 'import numpy as np\n'), ((43533, 43554), 'numpy.round', 'np.round', (['lear_udd', '(2)'], {}), '(lear_udd, 2)\n', (43541, 43554), True, 'import numpy as np\n'), ((43608, 43629), 'numpy.round', 'np.round', (['lear_mcc', '(2)'], {}), '(lear_mcc, 2)\n', (43616, 43629), True, 'import numpy as np\n'), ((45264, 45291), 'numpy.round', 'np.round', (['lear_ram_hours', '(6)'], {}), '(lear_ram_hours, 6)\n', (45272, 45291), True, 'import numpy as np\n'), ((26846, 26869), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (26867, 26869), False, 'import psutil\n'), ((27956, 27979), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (27977, 27979), False, 'import psutil\n'), ((32825, 32848), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (32846, 32848), False, 'import psutil\n'), ((43368, 43390), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (43378, 43390), True, 'import numpy as np\n'), ((43861, 43888), 'numpy.round', 'np.round', (['lear_ram_hours', '(6)'], {}), '(lear_ram_hours, 6)\n', (43869, 43888), True, 'import numpy as np\n'), ((43942, 43963), 'numpy.round', 'np.round', (['lear_udd', '(2)'], {}), '(lear_udd, 2)\n', (43950, 43963), True, 'import numpy as np\n'), ((44018, 44039), 'numpy.round', 'np.round', (['lear_mcc', '(2)'], {}), '(lear_mcc, 2)\n', (44026, 44039), True, 'import numpy as np\n'), ((45238, 45260), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (45248, 45260), True, 'import numpy as np\n'), ((31266, 31281), 'copy.deepcopy', 'deepcopy', (['curie'], {}), '(curie)\n', (31274, 31281), False, 'from copy import deepcopy\n'), ((43775, 43797), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (43785, 43797), True, 'import numpy as np\n'), ((44271, 44298), 'numpy.round', 'np.round', (['lear_ram_hours', '(6)'], {}), '(lear_ram_hours, 6)\n', (44279, 44298), True, 'import numpy as np\n'), ((44349, 44370), 'numpy.round', 'np.round', (['lear_udd', '(2)'], {}), '(lear_udd, 2)\n', (44357, 44370), True, 'import numpy as np\n'), ((44422, 44443), 'numpy.round', 'np.round', (['lear_mcc', '(2)'], {}), '(lear_mcc, 2)\n', (44430, 44443), True, 'import numpy as np\n'), ((44188, 44210), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (44198, 44210), True, 'import numpy as np\n'), ((44694, 44721), 'numpy.round', 'np.round', (['lear_ram_hours', '(6)'], {}), '(lear_ram_hours, 6)\n', (44702, 44721), True, 'import numpy as np\n'), ((44775, 44796), 'numpy.round', 'np.round', (['lear_udd', '(2)'], {}), '(lear_udd, 2)\n', (44783, 44796), True, 'import numpy as np\n'), ((44851, 44872), 'numpy.round', 'np.round', (['lear_mcc', '(2)'], {}), '(lear_mcc, 2)\n', (44859, 44872), True, 'import numpy as np\n'), ((44608, 44630), 'numpy.nanmean', 'np.nanmean', (['scores_det'], {}), '(scores_det)\n', (44618, 44630), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#python 3 compatibility
from __future__ import print_function
import rasterio
from scipy.io import netcdf
import numpy as np
import subprocess
import sys
from gdal import GDALGrid
from gmt import GMTGrid
def getCommandOutput(cmd):
"""
Internal method for calling external command.
@param cmd: String command ('ls -l', etc.)
@return: Three-element tuple containing a boolean indicating success or failure,
the stdout from running the command, and stderr.
"""
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout,stderr = proc.communicate()
retcode = proc.returncode
if retcode == 0:
retcode = True
else:
retcode = False
return (retcode,stdout,stderr)
if __name__ == '__main__':
#make a data set
data = np.arange(0,16).reshape(4,4).astype(np.int32)
geodict = {'xmin':0.5,'xmax':3.5,'ymin':0.5,'ymax':3.5,'xdim':1.0,'ydim':1.0,'nrows':4,'ncols':4}
gmtgrid = GMTGrid(data,geodict)
#save that data set to a grid
gmtgrid.save('gmt_from_python.grd')
#use gmt to get the value at 1.5,1.5 (should be 9)
f = open('track.xy','wt')
f.write('1.5 1.5\n')
f.close()
cmd = 'gmt grdtrack -nn track.xy -Ggmt_from_python.grd'
res,stdout,stderr = getCommandOutput(cmd)
print(stdout)
#now create an XY file from our grid
f = open('from_python.xyz','wt')
for i in range(0,geodict['nrows']):
for j in range(0,geodict['ncols']):
lat,lon = gmtgrid.getLatLon(i,j)
value = gmtgrid.getValue(lat,lon)
f.write('%.1f %.1f %i\n' % (lon,lat,value))
f.close()
#now create a grid file from our XY file
cmd = 'gmt xyz2grd -R0.5/3.5/0.5/3.5 -I1.0/1.0 from_python.xyz -Gfrom_gmt.grd'
res,stdout,stderr = getCommandOutput(cmd)
#now read in this grid using GMTGrid
gmtgrid2 = GMTGrid.load('from_gmt.grd')
np.testing.assert_almost_equal(data,gmtgrid2.getData())
#now use gdal to convert that GMT grid to ESRI format
cmd = 'gdal_translate from_gmt.grd from_gmt.bil -of EHdr'
res,stdout,stderr = getCommandOutput(cmd)
#now use our GDAL reader to get that grid data
gdalgrid = GDALGrid.load('from_gmt.bil')
np.testing.assert_almost_equal(data,gdalgrid.getData())
#now use gdal to convert that ESRI grid back to netcdf
cmd = 'gdal_translate from_gmt.bil from_gdal.grd -of GMT'
res,stdout,stderr = getCommandOutput(cmd)
#again use gmt to get the value at 1.5,1.5 (should be 9)
cmd = 'gmt grdtrack -nn track.xy -Gfrom_gdal.grd'
res,stdout,stderr = getCommandOutput(cmd)
print(stdout)
#now use our GMT reader to load that grid and compare to original
gmtgrid3 = GMTGrid.load('from_gdal.grd')
np.testing.assert_almost_equal(data,gmtgrid3.getData())
| [
"subprocess.Popen",
"gmt.GMTGrid",
"gdal.GDALGrid.load",
"gmt.GMTGrid.load",
"numpy.arange"
] | [((520, 606), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess\n .PIPE)\n', (536, 606), False, 'import subprocess\n'), ((1119, 1141), 'gmt.GMTGrid', 'GMTGrid', (['data', 'geodict'], {}), '(data, geodict)\n', (1126, 1141), False, 'from gmt import GMTGrid\n'), ((2026, 2054), 'gmt.GMTGrid.load', 'GMTGrid.load', (['"""from_gmt.grd"""'], {}), "('from_gmt.grd')\n", (2038, 2054), False, 'from gmt import GMTGrid\n'), ((2349, 2378), 'gdal.GDALGrid.load', 'GDALGrid.load', (['"""from_gmt.bil"""'], {}), "('from_gmt.bil')\n", (2362, 2378), False, 'from gdal import GDALGrid\n'), ((2873, 2902), 'gmt.GMTGrid.load', 'GMTGrid.load', (['"""from_gdal.grd"""'], {}), "('from_gdal.grd')\n", (2885, 2902), False, 'from gmt import GMTGrid\n'), ((957, 973), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {}), '(0, 16)\n', (966, 973), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def gaussian_func(sigma, x):
return 1 / np.sqrt(2 * np.pi * (sigma ** 2)) * np.exp(-(x ** 2) / (2 * (sigma ** 2)))
def gaussian_random_generator(sigma=5, numbers=100000):
uniform_random_numbers = np.random.rand(numbers, 2)
rho = sigma * np.sqrt(-2 * np.log(1 - uniform_random_numbers[:, 0]))
theta = 2 * np.pi * uniform_random_numbers[:, 1]
gaussian_random = rho * np.array([np.cos(theta), np.sin(theta)])
gaussian_random_numbers = gaussian_random.flatten()
min_value = np.min(gaussian_random_numbers)
max_value = np.max(gaussian_random_numbers)
x = np.linspace(min_value, max_value + 1, 1000)
y = gaussian_func(sigma, x)
plt.hist(gaussian_random_numbers, bins=np.arange(min_value - 0.5, max_value + 1, 1), density=True)
plt.plot(x, y, linewidth=3)
plt.xlabel(r'numbers')
plt.ylabel(r'density')
plt.savefig("images/q4_" + str(sigma) + "_" + str(numbers) + '.png')
plt.show()
gaussian_random_generator()
| [
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"numpy.cos",
"numpy.min",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((259, 285), 'numpy.random.rand', 'np.random.rand', (['numbers', '(2)'], {}), '(numbers, 2)\n', (273, 285), True, 'import numpy as np\n'), ((555, 586), 'numpy.min', 'np.min', (['gaussian_random_numbers'], {}), '(gaussian_random_numbers)\n', (561, 586), True, 'import numpy as np\n'), ((603, 634), 'numpy.max', 'np.max', (['gaussian_random_numbers'], {}), '(gaussian_random_numbers)\n', (609, 634), True, 'import numpy as np\n'), ((643, 686), 'numpy.linspace', 'np.linspace', (['min_value', '(max_value + 1)', '(1000)'], {}), '(min_value, max_value + 1, 1000)\n', (654, 686), True, 'import numpy as np\n'), ((826, 853), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'linewidth': '(3)'}), '(x, y, linewidth=3)\n', (834, 853), True, 'import matplotlib.pyplot as plt\n'), ((858, 879), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""numbers"""'], {}), "('numbers')\n", (868, 879), True, 'import matplotlib.pyplot as plt\n'), ((885, 906), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density"""'], {}), "('density')\n", (895, 906), True, 'import matplotlib.pyplot as plt\n'), ((985, 995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (993, 995), True, 'import matplotlib.pyplot as plt\n'), ((133, 167), 'numpy.exp', 'np.exp', (['(-x ** 2 / (2 * sigma ** 2))'], {}), '(-x ** 2 / (2 * sigma ** 2))\n', (139, 167), True, 'import numpy as np\n'), ((97, 128), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma ** 2)'], {}), '(2 * np.pi * sigma ** 2)\n', (104, 128), True, 'import numpy as np\n'), ((762, 806), 'numpy.arange', 'np.arange', (['(min_value - 0.5)', '(max_value + 1)', '(1)'], {}), '(min_value - 0.5, max_value + 1, 1)\n', (771, 806), True, 'import numpy as np\n'), ((317, 357), 'numpy.log', 'np.log', (['(1 - uniform_random_numbers[:, 0])'], {}), '(1 - uniform_random_numbers[:, 0])\n', (323, 357), True, 'import numpy as np\n'), ((451, 464), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (457, 464), True, 'import numpy as np\n'), ((466, 479), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (472, 479), True, 'import numpy as np\n')] |
import time
import edgeiq
import cv2
import numpy as np
import os
"""
Instance segmenataiom application used to count unique instances of bottles.
Instance Segmenataiom is currently not part of the alwaysai API's or Model Catalog.
This application demostartes how to implement instance segmenataiom using the
alwaysai platform.
"""
def main():
print("get labels")
labelsPath = "instances_models/object_detection_classes_coco.txt"
LABELS = open(labelsPath).read().strip().split("\n")
print("create colors")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# set paths to the Mask R-CNN model and configuration
weightsPath = "instances_models/frozen_inference_graph.pb"
configPath = "instances_models/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"
print("load mask rcnn model and set CUDA backend")
net = cv2.dnn.readNetFromTensorflow(weightsPath, configPath)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
print("mask rcnn loaded sucessfully")
fps = edgeiq.FPS()
try:
with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
edgeiq.Streamer() as streamer:
# Allow Webcam to warm up
time.sleep(2.0)
fps.start()
# loop detection
while True:
frame = video_stream.read()
blob = cv2.dnn.blobFromImage(frame, swapRB=True, crop=False)
net.setInput(blob)
(boxes, masks) = net.forward(["detection_out_final",
"detection_masks"])
# loop over the number of detected objects
counter = 0
for i in range(0, boxes.shape[2]):
# extract the class ID of the detection along with the
# confidence (i.e., probability) associated with the
# prediction
classID = int(boxes[0, 0, i, 1])
confidence = boxes[0, 0, i, 2]
if confidence > 0.5 and LABELS[classID] == "bottle":
# scale the bounding box coordinates back relative to the
# size of the frame and then compute the width and the
# height of the bounding box
(H, W) = frame.shape[:2]
box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
boxW = endX - startX
boxH = endY - startY
# extract the pixel-wise segmentation for the object,
# resize the mask such that it's the same dimensions of
# the bounding box, and then finally threshold to create
# a *binary* mask
mask = masks[i, classID]
mask = cv2.resize(mask, (boxW, boxH),
interpolation=cv2.INTER_CUBIC)
mask = (mask > 0.3)
# extract the ROI of the image
roi = frame[startY:endY, startX:endX][mask]
# grab the color used to visualize this particular class,
# then create a transparent overlay by blending the color
# with the ROI
counter += 1
color = COLORS[classID + counter]
blended = ((0.7 * color) + (0.4 * roi)).astype("uint8")
# store the blended ROI in the original frame
frame[startY:endY, startX:endX][mask] = blended
# draw the bounding box of the instance on the frame
# counter += 1
color = [int(c) for c in color]
cv2.rectangle(frame, (startX, startY), (endX, endY),
color, 2)
text = "{}: number {} ".format(LABELS[classID], counter)
cv2.putText(frame, text, (startX, startY - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
else:
text = "Instance Segmenataiom"
streamer.send_data(frame, text)
fps.update()
if streamer.check_exit():
break
finally:
fps.stop()
print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
print("approx. FPS: {:.2f}".format(fps.compute_fps()))
print("Program Ending")
if __name__ == "__main__":
main()
| [
"cv2.dnn.blobFromImage",
"cv2.rectangle",
"edgeiq.WebcamVideoStream",
"edgeiq.Streamer",
"cv2.dnn.readNetFromTensorflow",
"time.sleep",
"cv2.putText",
"numpy.array",
"numpy.random.seed",
"cv2.resize",
"edgeiq.FPS"
] | [((531, 549), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (545, 549), True, 'import numpy as np\n'), ((901, 955), 'cv2.dnn.readNetFromTensorflow', 'cv2.dnn.readNetFromTensorflow', (['weightsPath', 'configPath'], {}), '(weightsPath, configPath)\n', (930, 955), False, 'import cv2\n'), ((1117, 1129), 'edgeiq.FPS', 'edgeiq.FPS', ([], {}), '()\n', (1127, 1129), False, 'import edgeiq\n'), ((1153, 1184), 'edgeiq.WebcamVideoStream', 'edgeiq.WebcamVideoStream', ([], {'cam': '(0)'}), '(cam=0)\n', (1177, 1184), False, 'import edgeiq\n'), ((1220, 1237), 'edgeiq.Streamer', 'edgeiq.Streamer', ([], {}), '()\n', (1235, 1237), False, 'import edgeiq\n'), ((1301, 1316), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (1311, 1316), False, 'import time\n'), ((1463, 1516), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame'], {'swapRB': '(True)', 'crop': '(False)'}), '(frame, swapRB=True, crop=False)\n', (1484, 1516), False, 'import cv2\n'), ((3023, 3084), 'cv2.resize', 'cv2.resize', (['mask', '(boxW, boxH)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(mask, (boxW, boxH), interpolation=cv2.INTER_CUBIC)\n', (3033, 3084), False, 'import cv2\n'), ((3978, 4040), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(startX, startY)', '(endX, endY)', 'color', '(2)'], {}), '(frame, (startX, startY), (endX, endY), color, 2)\n', (3991, 4040), False, 'import cv2\n'), ((4174, 4266), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(startX, startY - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'color', '(2)'], {}), '(frame, text, (startX, startY - 5), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, color, 2)\n', (4185, 4266), False, 'import cv2\n'), ((2476, 2498), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (2484, 2498), True, 'import numpy as np\n')] |
# imports
import numpy as np
import matplotlib.pyplot as plt
""" Implementation of the Heaviside step Function
Defined as the integral of the dirac delta function."""
def _unit_step(n):
return 0 if n < 0 else 1
# vectorize function for increased performance
unit_step = np.vectorize(_unit_step)
# define input vector
n = np.arange(-10, 11, 1)
u = unit_step(n)
# graph the unit step function
plt.figure()
plt.plot(n, u)
plt.xlim(-12, 12)
plt.ylim(-1, 2)
plt.figure()
plt.stem(n, u)
plt.xlim(-12, 12)
plt.ylim(-1, 2)
plt.figure()
plt.step(n, u)
plt.xlim(-12, 12)
plt.ylim(-1, 2)
plt.show()
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.stem",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.vectorize",
"matplotlib.pyplot.step",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((281, 305), 'numpy.vectorize', 'np.vectorize', (['_unit_step'], {}), '(_unit_step)\n', (293, 305), True, 'import numpy as np\n'), ((333, 354), 'numpy.arange', 'np.arange', (['(-10)', '(11)', '(1)'], {}), '(-10, 11, 1)\n', (342, 354), True, 'import numpy as np\n'), ((405, 417), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (415, 417), True, 'import matplotlib.pyplot as plt\n'), ((418, 432), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'u'], {}), '(n, u)\n', (426, 432), True, 'import matplotlib.pyplot as plt\n'), ((433, 450), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-12)', '(12)'], {}), '(-12, 12)\n', (441, 450), True, 'import matplotlib.pyplot as plt\n'), ((451, 466), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(2)'], {}), '(-1, 2)\n', (459, 466), True, 'import matplotlib.pyplot as plt\n'), ((468, 480), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (478, 480), True, 'import matplotlib.pyplot as plt\n'), ((481, 495), 'matplotlib.pyplot.stem', 'plt.stem', (['n', 'u'], {}), '(n, u)\n', (489, 495), True, 'import matplotlib.pyplot as plt\n'), ((496, 513), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-12)', '(12)'], {}), '(-12, 12)\n', (504, 513), True, 'import matplotlib.pyplot as plt\n'), ((514, 529), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(2)'], {}), '(-1, 2)\n', (522, 529), True, 'import matplotlib.pyplot as plt\n'), ((531, 543), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (541, 543), True, 'import matplotlib.pyplot as plt\n'), ((544, 558), 'matplotlib.pyplot.step', 'plt.step', (['n', 'u'], {}), '(n, u)\n', (552, 558), True, 'import matplotlib.pyplot as plt\n'), ((559, 576), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-12)', '(12)'], {}), '(-12, 12)\n', (567, 576), True, 'import matplotlib.pyplot as plt\n'), ((577, 592), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(2)'], {}), '(-1, 2)\n', (585, 592), True, 'import matplotlib.pyplot as plt\n'), ((594, 604), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (602, 604), True, 'import matplotlib.pyplot as plt\n')] |
import os
import random
import numpy as np
class EA_Util:
def __init__(self, gen_size, pop_size=30, eval_func=None, max_gen=50, early_stop=0):
self.gen_size = gen_size
self.pop_size = pop_size
self.max_gen = max_gen
self.early_stop = early_stop
if eval_func == None:
raise Exception("Undefined Evaluation Function")
else:
self.eval_func = eval_func
self._init_pop()
def _init_pop(self):
population = []
for _ in range(self.pop_size):
individual = [1] * self.gen_size
p = np.random.uniform(size=self.gen_size)
for x in range(self.gen_size):
individual[x] = 1 if p[x] <= 0.9 else 0
population.append(individual)
self.population = population
self.fitness = [-1] * self.pop_size
def _mutation(self, individual):
new_chrom = individual.copy()
t = random.sample(range(self.gen_size), 5)
for s in t:
new_chrom[s] = 1 - new_chrom[s]
return new_chrom
def _crossover(self, a, b):
x = a.copy()
for i in range(self.gen_size):
p = random.random()
if p > 0.5:
x[i] = b[i]
return x
def _eval_pop(self):
for x in range(self.pop_size):
fit = self.eval_func(self.population[x])
self.fitness[x] = fit
def _reproduct(self):
best = []
fitsort = np.argsort(self.fitness)
for x in fitsort[-5:]:
best.append(x)
for i in range(self.pop_size//2):
if i in best:
continue
self.population[i] = self._mutation(self.population[random.choice(best)])
for i in range(self.pop_size//2, self.pop_size):
if i in best:
continue
t = random.sample(best, 2)
self.population[i] = self._crossover(self.population[t[0]], self.population[t[1]])
def evolution(self):
self._eval_pop()
print('Init Pop')
best_fit = round(max(self.fitness), 4)
temp_fit = self.fitness.copy()
for i in range(self.pop_size):
temp_fit[i] = round(temp_fit[i], 4)
print(' Best Fitness: %.4f' % (best_fit))
print(' Pop Fitness:', temp_fit)
for gen in range(1, self.max_gen+1):
print('%d evolution' % (gen))
self._reproduct()
self._eval_pop()
best_fit = round(max(self.fitness), 4)
temp_fit = self.fitness.copy()
for i in range(self.pop_size):
temp_fit[i] = round(temp_fit[i], 4)
print(' Best Fitness: %.4f' % (best_fit))
print(' Pop Fitness:', temp_fit)
index = self.fitness.index(max(self.fitness))
return index | [
"random.sample",
"random.choice",
"numpy.argsort",
"numpy.random.uniform",
"random.random"
] | [((1524, 1548), 'numpy.argsort', 'np.argsort', (['self.fitness'], {}), '(self.fitness)\n', (1534, 1548), True, 'import numpy as np\n'), ((627, 664), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.gen_size'}), '(size=self.gen_size)\n', (644, 664), True, 'import numpy as np\n'), ((1216, 1231), 'random.random', 'random.random', ([], {}), '()\n', (1229, 1231), False, 'import random\n'), ((1910, 1932), 'random.sample', 'random.sample', (['best', '(2)'], {}), '(best, 2)\n', (1923, 1932), False, 'import random\n'), ((1764, 1783), 'random.choice', 'random.choice', (['best'], {}), '(best)\n', (1777, 1783), False, 'import random\n')] |
import numpy
from amuse.test import amusetest
from amuse.units import units, nbody_system
from amuse.ic.brokenimf import *
# Instead of random, use evenly distributed numbers, just for testing
default_options = dict(random=False)
class TestMultiplePartIMF(amusetest.TestCase):
def test1(self):
print("Test MultiplePartIMF with default mass_boundaries and alphas, i.e. Salpeter")
instance = MultiplePartIMF(mass_max=100.0 | units.MSun)
self.assertEqual(instance.mass_boundaries, [0.1, 100.0] | units.MSun)
self.assertEqual(instance.alphas, [-2.35])
self.assertEqual(instance.number_of_bins, 1)
self.assertEqual(instance.fraction_per_bin, [1.0])
self.assertEqual(instance.cumulative_fractions, [0.0, 1.0])
self.assertAlmostEqual(instance.mass([0.0]), 0.1 | units.MSun)
self.assertAlmostEqual(instance.mass([1.0]), 100.0 | units.MSun)
self.assertAlmostEqual(instance.mass_mean(), 0.351 | units.MSun, 3)
def test2(self):
print("Test MultiplePartIMF with mass_boundaries and alphas")
instance = MultiplePartIMF(mass_boundaries = [1.0, 10.0, 100.0] | units.MSun,
alphas = [1.3, -3.3], **default_options)
self.assertEqual(instance.mass_boundaries, [1.0, 10.0, 100.0] | units.MSun)
self.assertEqual(instance.alphas, [1.3, -3.3])
self.assertEqual(instance.number_of_bins, 2)
self.assertAlmostEqual(instance.fraction_per_bin, numpy.array([0.5, 0.5]))
self.assertEqual(instance.cumulative_fractions, [0.0, 0.5, 1.0])
self.assertAlmostEqual(instance.mass([0.0]), 1.0 | units.MSun)
self.assertAlmostEqual(instance.mass([0.5]), 10.0 | units.MSun)
self.assertAlmostEqual(instance.mass([1.0]), 100.0 | units.MSun)
self.assertAlmostEqual(instance.mass_mean(), 11.9457684987 | units.MSun)
self.assertAlmostEqual(instance.mass_mean(), instance.next_mass(10000).mean(), 2)
def test3(self):
print("Test new_broken_power_law_mass_distribution with default mass_boundaries and alphas, i.e. Salpeter")
masses = new_broken_power_law_mass_distribution(10000, mass_max=100.0 | units.MSun, **default_options)
self.assertTrue((masses >= 0.1 | units.MSun).all())
self.assertTrue((masses <= 100.0 | units.MSun).all())
self.assertAlmostEqual(min(masses), 0.1 | units.MSun)
self.assertAlmostEqual(max(masses), 100.0 | units.MSun)
mass_mean = MultiplePartIMF(mass_boundaries=[0.1, 100.0]|units.MSun,
alphas=[-2.35]).mass_mean()
self.assertAlmostEqual(mass_mean, 0.35136877959 | units.MSun)
self.assertAlmostRelativeEqual(masses.mean(), 0.351 | units.MSun, 1)
def test4(self):
print("Test new_broken_power_law_mass_distribution with mass_boundaries and alphas")
masses = new_broken_power_law_mass_distribution(10000,
mass_boundaries = [1.0, 10.0, 100.0] | units.MSun,
alphas = [1.3, -3.3], **default_options)
self.assertTrue((masses >= 1.0 | units.MSun).all())
self.assertTrue((masses <= 100.0 | units.MSun).all())
self.assertAlmostEqual(min(masses), 1.0 | units.MSun)
self.assertAlmostEqual(max(masses), 100.0 | units.MSun)
mass_mean = MultiplePartIMF(mass_boundaries=[1.0, 10.0, 100.0]|units.MSun,
alphas=[1.3, -3.3]).mass_mean()
self.assertAlmostEqual(mass_mean, 11.9457684987 | units.MSun)
self.assertAlmostRelativeEqual(masses.mean(), 11.9457684987 | units.MSun, 1)
def test5(self):
print("Test new_scalo_mass_distribution")
masses = new_scalo_mass_distribution(10000, **default_options)
self.assertTrue((masses >= 0.1 | units.MSun).all())
self.assertTrue((masses <= 125.0 | units.MSun).all())
self.assertAlmostEqual(min(masses), 0.1 | units.MSun)
self.assertAlmostEqual(max(masses), 125.0 | units.MSun)
mass_mean = MultiplePartIMF(mass_boundaries=[0.10, 0.18, 0.42, 0.62, 1.18, 3.5, 125.0]|units.MSun,
alphas=[1.6, -1.01, -2.75, -2.08, -3.5, -2.63]).mass_mean()
self.assertAlmostEqual(mass_mean, 0.487756751788 | units.MSun)
self.assertAlmostRelativeEqual(masses.mean(), 0.487756751788 | units.MSun, 1)
def test6(self):
print("Test new_miller_scalo_mass_distribution")
masses = new_miller_scalo_mass_distribution(10000, **default_options)
self.assertTrue((masses >= 0.1 | units.MSun).all())
self.assertTrue((masses <= 125.0 | units.MSun).all())
self.assertAlmostEqual(min(masses), 0.1 | units.MSun)
self.assertAlmostEqual(max(masses), 125.0 | units.MSun)
mass_mean = MultiplePartIMF(mass_boundaries=[0.1, 1.0, 2.0, 10.0, 125.0]|units.MSun,
alphas=[-1.25, -2.0, -2.3, -3.3]).mass_mean()
self.assertAlmostEqual(mass_mean, 0.885783055149 | units.MSun)
self.assertAlmostRelativeEqual(masses.mean(), 0.885783055149 | units.MSun, 1)
def test7(self):
print("Test new_kroupa_mass_distribution")
masses = new_kroupa_mass_distribution(10000, **default_options)
self.assertTrue((masses >= 0.01 | units.MSun).all())
roundoff = 1.0 + 1.0e-12
self.assertTrue((masses <= (100.0 * roundoff) | units.MSun).all())
self.assertAlmostEqual(min(masses), 0.01 | units.MSun)
self.assertAlmostEqual(max(masses), 100.0 | units.MSun)
mass_mean = MultiplePartIMF(mass_boundaries=[0.01, 0.08, 0.5, 100.0]|units.MSun,
alphas=[-0.3, -1.3, -2.3]).mass_mean()
self.assertAlmostEqual(mass_mean, 0.376175542639 | units.MSun)
self.assertAlmostRelativeEqual(masses.mean(), 0.376175542639 | units.MSun, 1)
def test8(self):
print("Test with problematic alphas (new_salpeter_mass_distribution would give zero division errors)")
masses = new_broken_power_law_mass_distribution(10000,
mass_boundaries = [1.0, 10.0, 100.0] | units.MSun,
alphas = [-1, -2], **default_options)
self.assertTrue((masses >= 1.0 | units.MSun).all())
roundoff = 1.0 + 1.0e-12
self.assertTrue((masses <= (100.0 * roundoff) | units.MSun).all())
self.assertAlmostEqual(min(masses), 1.0 | units.MSun)
self.assertAlmostEqual(max(masses), 100.0 | units.MSun)
mass_mean = MultiplePartIMF(mass_boundaries=[1.0, 10.0, 100.0] | units.MSun,
alphas=[-1, -2]).mass_mean()
self.assertAlmostEqual(mass_mean, 10.0 | units.MSun)
self.assertAlmostRelativeEqual(masses.mean(), 10.0 | units.MSun, 1)
masses = new_broken_power_law_mass_distribution(101,
mass_boundaries = [1.0, 100.0] | units.MSun,
alphas = [-1], **default_options)
self.assertAlmostEqual(masses.median(), 10.0 | units.MSun)
| [
"numpy.array"
] | [((1487, 1510), 'numpy.array', 'numpy.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (1498, 1510), False, 'import numpy\n')] |
#!/usr/bin/python
from typing import Dict, Union, Tuple, List
import numpy as np
from ..parameters import POI
from ..fitutils.api_check import is_valid_loss, is_valid_fitresult, is_valid_minimizer
from ..fitutils.api_check import is_valid_data, is_valid_pdf
from ..fitutils.utils import pll
"""
Module defining the base class for the calculators for statistical tests based on the likelyhood ratio.
Any calculator can be a subclass of `BaseCalculator`. Currently implemented:
* `AsymptoticCalculator`: calculator using the asymptotic formulaed of the likehood ratio.
Acronyms used in the code:
* nll = negative log-likehood, the likehood being the `loss` attribute of a calculator;
* obs = observed, i.e. measured on provided data.
"""
class BaseCalculator(object):
def __init__(self, input, minimizer):
"""Base class for calculator.
Args:
input : loss or fit result
minimizer : minimizer to use to find the minimum of the loss function
Example with `zfit`:
>>> import zfit
>>> from zfit.core.loss import UnbinnedNLL
>>> from zfit.minimize import MinuitMinimizer
>>> obs = zfit.Space('x', limits=(0.1, 2.0))
>>> data = zfit.data.Data.from_numpy(obs=obs, array=np.random.normal(1.2, 0.1, 10000))
>>> mean = zfit.Parameter("mu", 1.2)
>>> sigma = zfit.Parameter("sigma", 0.1)
>>> model = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma)
>>> loss = UnbinnedNLL(model=[model], data=[data], fit_range=[obs])
>>> calc = BaseCalculator(input=loss, minimizer=MinuitMinimizer())
"""
if is_valid_fitresult(input):
self._loss = input.loss
self._bestfit = input
elif is_valid_loss(input):
self._loss = input
self._bestfit = None
else:
raise ValueError("{} is not a valid loss funtion or fit result!".format(input))
if not is_valid_minimizer(minimizer):
raise ValueError("{} is not a valid minimizer !".format(minimizer))
self._minimizer = minimizer
self.minimizer.verbosity = 0
# cache of the observed nll values
self._obs_nll = {}
@property
def loss(self):
"""
Returns the loss / likelihood function used in the calculator.
"""
return self._loss
@property
def minimizer(self):
"""
Returns the minimzer used in the calculator.
"""
return self._minimizer
@property
def bestfit(self):
"""
Returns the best fit values of the model parameters.
"""
if getattr(self, "_bestfit", None):
return self._bestfit
else:
print("Get fit best values!")
self.minimizer.verbosity = 5
mininum = self.minimizer.minimize(loss=self.loss)
self.minimizer.verbosity = 0
self._bestfit = mininum
return self._bestfit
@bestfit.setter
def bestfit(self, value):
"""
Set the best fit values of the model parameters.
Args:
value: fit result
"""
if not is_valid_fitresult(value):
raise ValueError()
self._bestfit = value
@property
def model(self):
"""
Returns the model used in the calculator.
"""
return self.loss.model
@property
def data(self):
"""
Returns the data used in the calculator.
"""
return self.loss.data
@property
def constraints(self):
"""
Returns the constraints on the loss / likehood function used in the calculator.
"""
return self.loss.constraints
def lossbuilder(self, model, data, weights=None):
""" Method to build a new loss function.
Args:
model (List): The model or models to evaluate the data on
data (List): Data to use
weights (optional, List): the data weights
Example with `zfit`:
>>> data = zfit.data.Data.from_numpy(obs=obs, array=np.random.normal(1.2, 0.1, 10000))
>>> mean = zfit.Parameter("mu", 1.2)
>>> sigma = zfit.Parameter("sigma", 0.1)
>>> model = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma)
>>> loss = calc.lossbuilder(model, data)
Returns:
Loss function
"""
assert all(is_valid_pdf(m) for m in model)
assert all(is_valid_data(d) for d in data)
msg = "{0} must have the same number of components as {1}"
if len(data) != len(self.data):
raise ValueError(msg.format("data", "`self.data"))
if len(model) != len(self.model):
raise ValueError(msg.format("model", "`self.model"))
if weights is not None and len(weights) != len(self.data):
raise ValueError(msg.format("weights", "`self.data`"))
fit_range = self.loss.fit_range
if weights is not None:
for d, w in zip(data, weights):
d.set_weights(w)
loss = type(self.loss)(model=model, data=data, fit_range=fit_range)
loss.add_constraints(self.constraints)
return loss
def obs_nll(self, pois: List[POI]) -> np.array:
""" Compute observed negative log-likelihood values for given parameters of interest.
Args:
pois (List[`hypotests.POI`]): parameters of interest
Returns:
numpy.array`: observed nll values
Example with `zfit`:
>>> mean = zfit.Parameter("mu", 1.2)
>>> poi = POI(mean, [1.1, 1.2, 1.0])
>>> nll = calc.obs_nll([poi])
"""
self.check_pois(pois)
grid = np.array([g.ravel() for g in np.meshgrid(*pois)]).T
ret = np.empty(len(grid))
for i, g in enumerate(grid):
k = tuple(g)
if k not in self._obs_nll.keys():
nll = pll(minimizer=self.minimizer, loss=self.loss, pois=g)
self._obs_nll[k] = nll
ret[i] = self._obs_nll[k]
return ret
def qobs(self, poinull: List[POI], onesided=True, onesideddiscovery=False, qtilde=False):
""" Compute observed values of the $$\\Delta$$ log-likelihood test statistic.
Args:
poinull (List[`hypotests.POI`]): parameters of interest for the null hypothesis
qtilde (bool, optional): if `True` use the $$\tilde{q}$$ test statistics else (default) use
the $$q$$ test statistic
onesided (bool, optional): if `True` (default) computes onesided pvalues
onesideddiscovery (bool, optional): if `True` (default) computes onesided pvalues for a discovery
test
Returns:
`numpy.array`: observed values of q
Example with `zfit`:
>>> mean = zfit.Parameter("mu", 1.2)
>>> poi = POI(mean, [1.1, 1.2, 1.0])
>>> q = calc.qobs([poi])
"""
print("Compute qobs for the null hypothesis!")
self.check_pois(poinull)
params = [p.parameter for p in poinull]
bestfit = [self.bestfit.params[p]["value"] for p in params]
bestfitpoi = []
for param, bf in zip(params, bestfit):
if qtilde and len(poinull) == 1:
bestfitpoi.append(POI(param, 0))
else:
bestfitpoi.append(POI(param, bf))
if len(poinull) == 1:
self._obs_nll[POI(param, bf)] = self.bestfit.fmin
nll_poinull_obs = self.obs_nll(poinull)
nll_bestfitpoi_obs = self.obs_nll(bestfitpoi)
qobs = self.q(nll1=nll_poinull_obs, nll2=nll_bestfitpoi_obs, poi1=poinull, poi2=bestfitpoi,
onesided=onesided, onesideddiscovery=onesideddiscovery)
return qobs
def pvalue(self, poinull: List[POI], poialt: Union[List[POI], None] = None, qtilde=False, onesided=True,
onesideddiscovery=False) -> Tuple[np.array, np.array]:
"""Computes pvalues for the null and alternative hypothesis.
Args:
poinull (List[`hypotests.POI`]): parameters of interest for the null hypothesis
poialt (List[`hypotests.POI`], optional): parameters of interest for the alternative hypothesis
qtilde (bool, optional): if `True` use the $$\tilde{q}$$ test statistics else (default) use
the $$q$$ test statistic
onesided (bool, optional): if `True` (default) computes onesided pvalues
onesideddiscovery (bool, optional): if `True` (default) computes onesided pvalues for a discovery
test
Returns:
Tuple(`numpy.array`, `numpy.array`): pnull, palt
Example with `zfit`:
>>> mean = zfit.Parameter("mu", 1.2)
>>> poinull = POI(mean, [1.1, 1.2, 1.0])
>>> poialt = POI(mean, 1.2)
>>> pvalues = calc.pavalue([poinull], [poialt])
"""
self.check_pois(poinull)
if poialt:
self.check_pois(poialt)
self.check_pois_compatibility(poinull, poialt)
return self._pvalue_(poinull=poinull, poialt=poialt, qtilde=qtilde, onesided=onesided,
onesideddiscovery=onesideddiscovery)
def _pvalue_(self, poinull, poialt, qtilde, onesided, onesideddiscovery):
"""
To be overwritten in `BaseCalculator` subclasses.
"""
raise NotImplementedError
def expected_pvalue(self, poinull: List[POI], poialt: List[POI], nsigma, CLs=False, qtilde=False,
onesided=True, onesideddiscovery=False) -> Dict[int, np.array]:
"""Computes the expected pvalues and error bands for different values of $$\\sigma$$ (0=expected/median)
Args:
poinull (List[`hypotests.POI`]): parameters of interest for the null hypothesis
poialt (List[`hypotests.POI`], optional): parameters of interest for the alternative hypothesis
nsigma (`numpy.array`): array of values of $$\\sigma$$ to compute the expected pvalue
CLs (bool, optional): if `True` computes pvalues as $$p_{cls}=p_{null}/p_{alt}=p_{clsb}/p_{clb}$$
else as $$p_{clsb} = p_{null}$
qtilde (bool, optional): if `True` use the $$\tilde{q}$$ test statistics else (default) use
the $$q$$ test statistic
onesided (bool, optional): if `True` (default) computes onesided pvalues
onesideddiscovery (bool, optional): if `True` (default) computes onesided pvalues for a discovery
Returns:
`numpy.array`: array of expected pvalues for each $$\\sigma$$ value
Example with `zfit`:
>>> mean = zfit.Parameter("mu", 1.2)
>>> poinull = POI(mean, [1.1, 1.2, 1.0])
>>> poialt = POI(mean, 1.2)
>>> nll = calc.expected_pvalue([poinull], [poialt])
"""
self.check_pois(poinull)
if poialt:
self.check_pois(poialt)
self.check_pois_compatibility(poinull, poialt)
return self._expected_pvalue_(poinull=poinull, poialt=poialt, nsigma=nsigma, CLs=CLs, qtilde=qtilde,
onesided=onesided, onesideddiscovery=onesideddiscovery)
def _expected_pvalue_(self, poinull, poialt, nsigma, CLs, qtilde, onesided, onesideddiscovery):
"""
To be overwritten in `BaseCalculator` subclasses.
"""
raise NotImplementedError
def expected_poi(self, poinull: List[POI], poialt: List[POI], nsigma, alpha=0.05, CLs=False,
onesided=True, onesideddiscovery=False):
"""Computes the expected parameter of interest values such that the expected p_values == $$\alpha$$
for different values of $$\\sigma$$ (0=expected/median)
Args:
poinull (List[`hypotests.POI`]): parameters of interest for the null hypothesis
poialt (List[`hypotests.POI`], optional): parameters of interest for the alternative hypothesis
nsigma (`numpy.array`): array of values of $$\\sigma$$ to compute the expected pvalue
alpha (float, default=0.05): significance level
CLs (bool, optional): if `True` uses pvalues as $$p_{cls}=p_{null}/p_{alt}=p_{clsb}/p_{clb}$$
else as $$p_{clsb} = p_{null}$
onesided (bool, optional): if `True` (default) computes onesided pvalues
onesideddiscovery (bool, optional): if `True` (default) computes onesided pvalues for a discovery
Returns:
`numpy.array`: array of expected POI values for each $$\\sigma$$ value
Example with `zfit`:
>>> mean = zfit.Parameter("mu", 1.2)
>>> poinull = POI(mean, [1.1, 1.2, 1.0])
>>> poialt = POI(mean, 1.2)
>>> nll = calc.expected_poi([poinull], [poialt])
"""
self.check_pois(poinull)
if poialt:
self.check_pois(poialt)
self.check_pois_compatibility(poinull, poialt)
return self._expected_poi_(poinull=poinull, poialt=poialt, nsigma=nsigma, alpha=alpha, CLs=CLs,
onesided=onesided, onesideddiscovery=onesideddiscovery)
def _expected_poi_(self, poinull, poialt, nsigma, alpha, CLs, onesided, onesideddiscovery):
"""
To be overwritten in `BaseCalculator` subclasses.
"""
raise NotImplementedError
@staticmethod
def check_pois(pois):
"""
Check if the parameters of interest are all `skstats.parameters.POI` instances.
"""
msg = "A list of POIs is required."
if not isinstance(pois, (list, tuple)):
raise ValueError(msg)
if not all(isinstance(p, POI) for p in pois):
raise ValueError(msg)
if len(pois) > 1:
msg = "Tests with more that one parameter of interest are not yet implemented."
raise NotImplementedError(msg)
@staticmethod
def check_pois_compatibility(poi1, poi2):
"""
Check compatibility between two lists of `skstats.parameters.POI` instances.
"""
if len(poi1) != len(poi2):
msg = "Lists of parameters of interest should have the same length, poi1={0}, poi2={1}"
raise ValueError(msg.format(poi1, poi2))
names1 = sorted([p.name for p in poi1])
names2 = sorted([p.name for p in poi2])
if names1 != names2:
msg = "The variables used in the lists of parameters of interest should have the same names,"
msg += " poi1={0}, poi2={1}"
raise ValueError(msg.format(poi1, poi2))
def q(self, nll1: np.array, nll2: np.array, poi1: List[POI], poi2: List[POI],
onesided=True, onesideddiscovery=False) -> np.array:
""" Compute value of the test statistic q defined as the difference between negative log-likelihood
values $$q = nll1 - nll2$$
Args:
nll1 (`numpy.array`): array of nll values #1, evaluated with poi1
nll2 (`numpy.array`): array of nll values #2, evaluated with poi2
poi1 ((List[`hypotests.POI`])): list of POI's #1
poi2 ((List[`hypotests.POI`])): list of POI's #2
onesided (bool, optional, default=True)
onesideddiscovery (bool, optional, default=True)
Returns:
`np.array`: array of q values
"""
self.check_pois(poi1)
self.check_pois(poi2)
self.check_pois_compatibility(poi1, poi2)
assert len(poi1[0]) == len(nll1)
assert len(poi2[0]) == len(nll2)
poi1 = poi1[0].value
poi2 = poi2[0].value
q = 2*(nll1 - nll2)
filter_non_nan = ~(np.isnan(q) | np.isinf(q))
q = q[filter_non_nan]
if isinstance(poi2, np.ndarray):
poi2 = poi2[filter_non_nan]
zeros = np.zeros(q.shape)
if onesideddiscovery:
condition = (poi2 < poi1) | (q < 0)
q = np.where(condition, zeros, q)
elif onesided:
condition = (poi2 > poi1) | (q < 0)
q = np.where(condition, zeros, q)
else:
q = q
return q
| [
"numpy.where",
"numpy.zeros",
"numpy.isnan",
"numpy.meshgrid",
"numpy.isinf"
] | [((16237, 16254), 'numpy.zeros', 'np.zeros', (['q.shape'], {}), '(q.shape)\n', (16245, 16254), True, 'import numpy as np\n'), ((16350, 16379), 'numpy.where', 'np.where', (['condition', 'zeros', 'q'], {}), '(condition, zeros, q)\n', (16358, 16379), True, 'import numpy as np\n'), ((16083, 16094), 'numpy.isnan', 'np.isnan', (['q'], {}), '(q)\n', (16091, 16094), True, 'import numpy as np\n'), ((16097, 16108), 'numpy.isinf', 'np.isinf', (['q'], {}), '(q)\n', (16105, 16108), True, 'import numpy as np\n'), ((16467, 16496), 'numpy.where', 'np.where', (['condition', 'zeros', 'q'], {}), '(condition, zeros, q)\n', (16475, 16496), True, 'import numpy as np\n'), ((5980, 5998), 'numpy.meshgrid', 'np.meshgrid', (['*pois'], {}), '(*pois)\n', (5991, 5998), True, 'import numpy as np\n')] |
import numpy
from scipy.optimize import differential_evolution
def optim_matrix(A, B):
X = A.points.T
Y = B.points.T
bounds = [(-999999.0, 999999.0)] * 4
def f(p):
Z = numpy.array(p)
Z.shape = (2, 2)
y = numpy.dot(Z, X)
return numpy.linalg.norm(y - Y)
return differential_evolution(f, bounds)
def linear_transform_prediction(A, B, reference):
solution = optim_matrix(A, B)
if solution.success:
Z = solution.x
Z.shape = (2, 2)
guess_points = Z @ reference.points.T
return guess_points.T, solution
else:
print(f"Linear transform for {A.name} to {B.name} could not be found.")
print(solution.message)
def loglinear_transform_prediction(A, B, reference):
solution = optim_matrix(A.log(), B.log())
if solution.success:
Z = solution.x
Z.shape = (2, 2)
guess_points = numpy.dot(Z, reference.log().points.T)
return numpy.exp(guess_points)
else:
print(f"Linear transform for {A.name} to {B.name} could not be found.")
| [
"scipy.optimize.differential_evolution",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
] | [((317, 350), 'scipy.optimize.differential_evolution', 'differential_evolution', (['f', 'bounds'], {}), '(f, bounds)\n', (339, 350), False, 'from scipy.optimize import differential_evolution\n'), ((197, 211), 'numpy.array', 'numpy.array', (['p'], {}), '(p)\n', (208, 211), False, 'import numpy\n'), ((249, 264), 'numpy.dot', 'numpy.dot', (['Z', 'X'], {}), '(Z, X)\n', (258, 264), False, 'import numpy\n'), ((280, 304), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(y - Y)'], {}), '(y - Y)\n', (297, 304), False, 'import numpy\n'), ((971, 994), 'numpy.exp', 'numpy.exp', (['guess_points'], {}), '(guess_points)\n', (980, 994), False, 'import numpy\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from absl.testing import absltest
from absl.testing import parameterized
from jax import tree_util
import jax.numpy as jnp
import tree_math as tm
from tree_math._src import test_util
import numpy as np
# pylint: disable=g-complex-comprehension
class VectorTest(test_util.TestCase):
def test_vector(self):
tree = {"a": 0, "b": jnp.array([1, 2], dtype=jnp.int32)}
vector = tm.Vector(tree)
self.assertEqual(vector.size, 3)
self.assertLen(vector, 3)
self.assertEqual(vector.shape, (3,))
self.assertEqual(vector.ndim, 1)
self.assertEqual(vector.dtype, jnp.int32)
self.assertEqual(repr(tm.Vector({"a": 1})),
"tree_math.Vector({'a': 1})")
self.assertTreeEqual(tree_util.tree_leaves(tree),
tree_util.tree_leaves(vector), check_dtypes=True)
vector2 = tree_util.tree_map(lambda x: x, vector)
self.assertTreeEqual(vector, vector2, check_dtypes=True)
@parameterized.named_parameters(*(
{"testcase_name": op.__name__, "op": op}
for op in [operator.pos, operator.neg, abs, operator.invert]
))
def test_unary_math(self, op):
tree = {"a": 1, "b": -jnp.array([2, 3])}
expected = tm.Vector(tree_util.tree_map(op, tree))
actual = op(tm.Vector(tree))
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_arithmetic_with_scalar(self):
vector = tm.Vector({"a": 0, "b": jnp.array([1, 2])})
expected = tm.Vector({"a": 1, "b": jnp.array([2, 3])})
self.assertTreeEqual(vector + 1, expected, check_dtypes=True)
self.assertTreeEqual(1 + vector, expected, check_dtypes=True)
with self.assertRaisesRegex(
TypeError, "non-tree_math.VectorMixin argument is not a scalar",
):
vector + jnp.ones((3,)) # pylint: disable=expression-not-assigned
@parameterized.named_parameters(*(
{"testcase_name": op.__name__, "op": op}
for op in [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.mod,
]
))
def test_binary_arithmetic(self, op):
rng = np.random.default_rng(0)
tree1 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
tree2 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
expected = tm.Vector(tree_util.tree_map(op, tree1, tree2))
actual = op(tm.Vector(tree1), tm.Vector(tree2))
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_pow(self):
expected = tm.Vector({"a": 2 ** 3})
actual = tm.Vector({"a": 2}) ** tm.Vector({"a": 3})
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_divmod(self):
x, y = divmod(jnp.arange(5), 2)
expected = tm.Vector({"a": x}), tm.Vector({"a": y})
actual = divmod(tm.Vector({"a": jnp.arange(5)}), 2)
self.assertTreeEqual(actual, expected, check_dtypes=True)
x, y = divmod(5, jnp.arange(5))
expected = tm.Vector({"a": x}), tm.Vector({"a": y})
actual = divmod(5, tm.Vector({"a": jnp.arange(5)}))
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_matmul_scalars(self):
actual = tm.Vector(1.0) @ tm.Vector(2.0)
expected = 2.0
self.assertAllClose(actual, expected)
def test_matmul(self):
rng = np.random.default_rng(0)
tree1 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
tree2 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
expected = tree1["a"] * tree2["a"] + tree1["b"].ravel() @ tree2["b"].ravel()
vector1 = tm.Vector(tree1)
vector2 = tm.Vector(tree2)
actual = vector1 @ vector2
self.assertAllClose(actual, expected)
actual = vector1.dot(vector2)
self.assertAllClose(actual, expected)
with self.assertRaisesRegex(
TypeError, "matmul arguments must both be tree_math.VectorMixin objects",
):
vector1 @ jnp.ones((7,)) # pylint: disable=expression-not-assigned
# TODO(shoyer): test comparisons and bitwise ops
def test_conj(self):
vector = tm.Vector({"a": jnp.array([1, 1j])})
actual = vector.conj()
expected = tm.Vector({"a": jnp.array([1, -1j])})
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_real_imag(self):
vector = tm.Vector({"a": jnp.array([1, 1j])})
real_part = tm.Vector({"a": jnp.array([1.0, 0.0])})
imag_part = tm.Vector({"a": jnp.array([0.0, 1.0])})
self.assertTreeEqual(vector.real, real_part, check_dtypes=True)
self.assertTreeEqual(vector.imag, imag_part, check_dtypes=True)
def test_sum_mean_min_max(self):
vector = tm.Vector({"a": 1, "b": jnp.array([2, 3, 4])})
self.assertTreeEqual(vector.sum(), 10, check_dtypes=False)
self.assertTreeEqual(vector.min(), 1, check_dtypes=False)
self.assertTreeEqual(vector.max(), 4, check_dtypes=False)
def test_custom_class(self):
@tree_util.register_pytree_node_class
class CustomVector(tm.VectorMixin):
def __init__(self, a: int, b: float):
self.a = a
self.b = b
def tree_flatten(self):
return (self.a, self.b), None
@classmethod
def tree_unflatten(cls, _, args):
return cls(*args)
v1 = CustomVector(1, 2.0)
v2 = v1 + 3
self.assertTreeEqual(v2, CustomVector(4, 5.0), check_dtypes=True)
v3 = v2 + v1
self.assertTreeEqual(v3, CustomVector(5, 7.0), check_dtypes=True)
if __name__ == "__main__":
absltest.main()
| [
"numpy.random.default_rng",
"jax.numpy.arange",
"absl.testing.absltest.main",
"absl.testing.parameterized.named_parameters",
"jax.numpy.array",
"jax.tree_util.tree_map",
"tree_math.Vector",
"jax.tree_util.tree_leaves",
"jax.numpy.ones"
] | [((1532, 1672), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["*({'testcase_name': op.__name__, 'op': op} for op in [operator.pos,\n operator.neg, abs, operator.invert])"], {}), "(*({'testcase_name': op.__name__, 'op': op} for\n op in [operator.pos, operator.neg, abs, operator.invert]))\n", (1562, 1672), False, 'from absl.testing import parameterized\n'), ((2393, 2580), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["*({'testcase_name': op.__name__, 'op': op} for op in [operator.add,\n operator.sub, operator.mul, operator.truediv, operator.floordiv,\n operator.mod])"], {}), "(*({'testcase_name': op.__name__, 'op': op} for\n op in [operator.add, operator.sub, operator.mul, operator.truediv,\n operator.floordiv, operator.mod]))\n", (2423, 2580), False, 'from absl.testing import parameterized\n'), ((6200, 6215), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (6213, 6215), False, 'from absl.testing import absltest\n'), ((978, 993), 'tree_math.Vector', 'tm.Vector', (['tree'], {}), '(tree)\n', (987, 993), True, 'import tree_math as tm\n'), ((1427, 1466), 'jax.tree_util.tree_map', 'tree_util.tree_map', (['(lambda x: x)', 'vector'], {}), '(lambda x: x, vector)\n', (1445, 1466), False, 'from jax import tree_util\n'), ((2708, 2732), 'numpy.random.default_rng', 'np.random.default_rng', (['(0)'], {}), '(0)\n', (2729, 2732), True, 'import numpy as np\n'), ((3192, 3216), 'tree_math.Vector', 'tm.Vector', (["{'a': 2 ** 3}"], {}), "({'a': 2 ** 3})\n", (3201, 3216), True, 'import tree_math as tm\n'), ((3958, 3982), 'numpy.random.default_rng', 'np.random.default_rng', (['(0)'], {}), '(0)\n', (3979, 3982), True, 'import numpy as np\n'), ((4324, 4340), 'tree_math.Vector', 'tm.Vector', (['tree1'], {}), '(tree1)\n', (4333, 4340), True, 'import tree_math as tm\n'), ((4355, 4371), 'tree_math.Vector', 'tm.Vector', (['tree2'], {}), '(tree2)\n', (4364, 4371), True, 'import tree_math as tm\n'), ((929, 963), 'jax.numpy.array', 'jnp.array', (['[1, 2]'], {'dtype': 'jnp.int32'}), '([1, 2], dtype=jnp.int32)\n', (938, 963), True, 'import jax.numpy as jnp\n'), ((1309, 1336), 'jax.tree_util.tree_leaves', 'tree_util.tree_leaves', (['tree'], {}), '(tree)\n', (1330, 1336), False, 'from jax import tree_util\n'), ((1363, 1392), 'jax.tree_util.tree_leaves', 'tree_util.tree_leaves', (['vector'], {}), '(vector)\n', (1384, 1392), False, 'from jax import tree_util\n'), ((1788, 1816), 'jax.tree_util.tree_map', 'tree_util.tree_map', (['op', 'tree'], {}), '(op, tree)\n', (1806, 1816), False, 'from jax import tree_util\n'), ((1834, 1849), 'tree_math.Vector', 'tm.Vector', (['tree'], {}), '(tree)\n', (1843, 1849), True, 'import tree_math as tm\n'), ((3002, 3038), 'jax.tree_util.tree_map', 'tree_util.tree_map', (['op', 'tree1', 'tree2'], {}), '(op, tree1, tree2)\n', (3020, 3038), False, 'from jax import tree_util\n'), ((3056, 3072), 'tree_math.Vector', 'tm.Vector', (['tree1'], {}), '(tree1)\n', (3065, 3072), True, 'import tree_math as tm\n'), ((3074, 3090), 'tree_math.Vector', 'tm.Vector', (['tree2'], {}), '(tree2)\n', (3083, 3090), True, 'import tree_math as tm\n'), ((3230, 3249), 'tree_math.Vector', 'tm.Vector', (["{'a': 2}"], {}), "({'a': 2})\n", (3239, 3249), True, 'import tree_math as tm\n'), ((3253, 3272), 'tree_math.Vector', 'tm.Vector', (["{'a': 3}"], {}), "({'a': 3})\n", (3262, 3272), True, 'import tree_math as tm\n'), ((3379, 3392), 'jax.numpy.arange', 'jnp.arange', (['(5)'], {}), '(5)\n', (3389, 3392), True, 'import jax.numpy as jnp\n'), ((3412, 3431), 'tree_math.Vector', 'tm.Vector', (["{'a': x}"], {}), "({'a': x})\n", (3421, 3431), True, 'import tree_math as tm\n'), ((3433, 3452), 'tree_math.Vector', 'tm.Vector', (["{'a': y}"], {}), "({'a': y})\n", (3442, 3452), True, 'import tree_math as tm\n'), ((3593, 3606), 'jax.numpy.arange', 'jnp.arange', (['(5)'], {}), '(5)\n', (3603, 3606), True, 'import jax.numpy as jnp\n'), ((3623, 3642), 'tree_math.Vector', 'tm.Vector', (["{'a': x}"], {}), "({'a': x})\n", (3632, 3642), True, 'import tree_math as tm\n'), ((3644, 3663), 'tree_math.Vector', 'tm.Vector', (["{'a': y}"], {}), "({'a': y})\n", (3653, 3663), True, 'import tree_math as tm\n'), ((3829, 3843), 'tree_math.Vector', 'tm.Vector', (['(1.0)'], {}), '(1.0)\n', (3838, 3843), True, 'import tree_math as tm\n'), ((3846, 3860), 'tree_math.Vector', 'tm.Vector', (['(2.0)'], {}), '(2.0)\n', (3855, 3860), True, 'import tree_math as tm\n'), ((1211, 1230), 'tree_math.Vector', 'tm.Vector', (["{'a': 1}"], {}), "({'a': 1})\n", (1220, 1230), True, 'import tree_math as tm\n'), ((1744, 1761), 'jax.numpy.array', 'jnp.array', (['[2, 3]'], {}), '([2, 3])\n', (1753, 1761), True, 'import jax.numpy as jnp\n'), ((1992, 2009), 'jax.numpy.array', 'jnp.array', (['[1, 2]'], {}), '([1, 2])\n', (2001, 2009), True, 'import jax.numpy as jnp\n'), ((2051, 2068), 'jax.numpy.array', 'jnp.array', (['[2, 3]'], {}), '([2, 3])\n', (2060, 2068), True, 'import jax.numpy as jnp\n'), ((2331, 2345), 'jax.numpy.ones', 'jnp.ones', (['(3,)'], {}), '((3,))\n', (2339, 2345), True, 'import jax.numpy as jnp\n'), ((4662, 4676), 'jax.numpy.ones', 'jnp.ones', (['(7,)'], {}), '((7,))\n', (4670, 4676), True, 'import jax.numpy as jnp\n'), ((4825, 4845), 'jax.numpy.array', 'jnp.array', (['[1, 1.0j]'], {}), '([1, 1.0j])\n', (4834, 4845), True, 'import jax.numpy as jnp\n'), ((4904, 4925), 'jax.numpy.array', 'jnp.array', (['[1, -1.0j]'], {}), '([1, -1.0j])\n', (4913, 4925), True, 'import jax.numpy as jnp\n'), ((5046, 5066), 'jax.numpy.array', 'jnp.array', (['[1, 1.0j]'], {}), '([1, 1.0j])\n', (5055, 5066), True, 'import jax.numpy as jnp\n'), ((5099, 5120), 'jax.numpy.array', 'jnp.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (5108, 5120), True, 'import jax.numpy as jnp\n'), ((5155, 5176), 'jax.numpy.array', 'jnp.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5164, 5176), True, 'import jax.numpy as jnp\n'), ((5388, 5408), 'jax.numpy.array', 'jnp.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (5397, 5408), True, 'import jax.numpy as jnp\n'), ((3489, 3502), 'jax.numpy.arange', 'jnp.arange', (['(5)'], {}), '(5)\n', (3499, 3502), True, 'import jax.numpy as jnp\n'), ((3703, 3716), 'jax.numpy.arange', 'jnp.arange', (['(5)'], {}), '(5)\n', (3713, 3716), True, 'import jax.numpy as jnp\n')] |
import numpy as np
import matplotlib
# matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from typing import *
import pandas as pd
import seaborn as sns
import math
sns.set()
class Accuracy(object):
def at_radii(self, radii: np.ndarray):
raise NotImplementedError()
class ApproximateAccuracy(Accuracy):
def __init__(self, data_file_path: str):
self.data_file_path = data_file_path
def at_radii(self, radii: np.ndarray) -> np.ndarray:
df = pd.read_csv(self.data_file_path, delimiter="\t")
return np.array([self.at_radius(df, radius) for radius in radii])
def at_radius(self, df: pd.DataFrame, radius: float):
return (df["correct"] & (df["radius"] >= radius)).mean()
class HighProbAccuracy(Accuracy):
def __init__(self, data_file_path: str, alpha: float, rho: float):
self.data_file_path = data_file_path
self.alpha = alpha
self.rho = rho
def at_radii(self, radii: np.ndarray) -> np.ndarray:
df = pd.read_csv(self.data_file_path, delimiter="\t")
return np.array([self.at_radius(df, radius) for radius in radii])
def at_radius(self, df: pd.DataFrame, radius: float):
mean = (df["correct"] & (df["radius"] >= radius)).mean()
num_examples = len(df)
return (
mean
- self.alpha
- math.sqrt(
self.alpha * (1 - self.alpha) * math.log(1 / self.rho) / num_examples
)
- math.log(1 / self.rho) / (3 * num_examples)
)
class Line(object):
def __init__(
self, quantity: Accuracy, legend: str, plot_fmt: str = "", scale_x: float = 1
):
self.quantity = quantity
self.legend = legend
self.plot_fmt = plot_fmt
self.scale_x = scale_x
def plot_certified_accuracy(
outfile: str,
title: str,
max_radius: float,
lines: List[Line],
radius_step: float = 0.01,
) -> None:
radii = np.arange(0, max_radius + radius_step, radius_step)
plt.figure()
for line in lines:
plt.plot(radii * line.scale_x, line.quantity.at_radii(radii), line.plot_fmt)
plt.ylim((0, 1))
plt.xlim((0, max_radius))
plt.tick_params(labelsize=14)
plt.xlabel("radius", fontsize=16)
plt.ylabel("certified accuracy", fontsize=16)
plt.legend([method.legend for method in lines], loc="upper right", fontsize=16)
plt.savefig(outfile + ".pdf")
plt.tight_layout()
plt.title(title, fontsize=20)
plt.tight_layout()
plt.savefig(outfile + ".png", dpi=300)
plt.close()
def smallplot_certified_accuracy(
outfile: str,
title: str,
max_radius: float,
methods: List[Line],
radius_step: float = 0.01,
xticks=0.5,
) -> None:
radii = np.arange(0, max_radius + radius_step, radius_step)
plt.figure()
for method in methods:
plt.plot(radii, method.quantity.at_radii(radii), method.plot_fmt)
plt.ylim((0, 1))
plt.xlim((0, max_radius))
plt.xlabel("radius", fontsize=22)
plt.ylabel("certified accuracy", fontsize=22)
plt.tick_params(labelsize=20)
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(xticks))
plt.legend([method.legend for method in methods], loc="upper right", fontsize=20)
plt.tight_layout()
plt.savefig(outfile + ".pdf")
plt.close()
def latex_table_certified_accuracy(
outfile: str,
radius_start: float,
radius_stop: float,
radius_step: float,
methods: List[Line],
):
radii = np.arange(radius_start, radius_stop + radius_step, radius_step)
accuracies = np.zeros((len(methods), len(radii)))
for i, method in enumerate(methods):
accuracies[i, :] = method.quantity.at_radii(radii)
f = open(outfile, "w")
for radius in radii:
f.write("& $r = {:.3}$".format(radius))
f.write("\\\\\n")
f.write("\midrule\n")
for i, method in enumerate(methods):
f.write(method.legend)
for j, radius in enumerate(radii):
if i == accuracies[:, j].argmax():
txt = r" & \textbf{" + "{:.2f}".format(accuracies[i, j]) + "}"
else:
txt = " & {:.2f}".format(accuracies[i, j])
f.write(txt)
f.write("\\\\\n")
f.close()
def markdown_table_certified_accuracy(
outfile: str,
radius_start: float,
radius_stop: float,
radius_step: float,
methods: List[Line],
):
radii = np.arange(radius_start, radius_stop + radius_step, radius_step)
accuracies = np.zeros((len(methods), len(radii)))
for i, method in enumerate(methods):
accuracies[i, :] = method.quantity.at_radii(radii)
f = open(outfile, "w")
f.write("| | ")
for radius in radii:
f.write("r = {:.3} |".format(radius))
f.write("\n")
f.write("| --- | ")
for i in range(len(radii)):
f.write(" --- |")
f.write("\n")
for i, method in enumerate(methods):
f.write("<b> {} </b>| ".format(method.legend))
for j, radius in enumerate(radii):
if i == accuracies[:, j].argmax():
txt = "{:.2f}<b>*</b> |".format(accuracies[i, j])
else:
txt = "{:.2f} |".format(accuracies[i, j])
f.write(txt)
f.write("\n")
f.close()
if __name__ == "__main__":
latex_table_certified_accuracy(
"analysis/latex/vary_noise_cifar10",
0.25,
1.5,
0.25,
[
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.12/test/sigma_0.12"
),
"$\sigma = 0.12$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.25/test/sigma_0.25"
),
"$\sigma = 0.25$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.50/test/sigma_0.50"
),
"$\sigma = 0.50$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_1.00/test/sigma_1.00"
),
"$\sigma = 1.00$",
),
],
)
markdown_table_certified_accuracy(
"analysis/markdown/vary_noise_cifar10",
0.25,
1.5,
0.25,
[
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.12/test/sigma_0.12"
),
"σ = 0.12",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.25/test/sigma_0.25"
),
"σ = 0.25",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.50/test/sigma_0.50"
),
"σ = 0.50",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_1.00/test/sigma_1.00"
),
"σ = 1.00",
),
],
)
latex_table_certified_accuracy(
"analysis/latex/vary_noise_imagenet",
0.5,
3.0,
0.5,
[
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.25/test/sigma_0.25"
),
"$\sigma = 0.25$",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.50/test/sigma_0.50"
),
"$\sigma = 0.50$",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_1.00/test/sigma_1.00"
),
"$\sigma = 1.00$",
),
],
)
markdown_table_certified_accuracy(
"analysis/markdown/vary_noise_imagenet",
0.5,
3.0,
0.5,
[
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.25/test/sigma_0.25"
),
"σ = 0.25",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.50/test/sigma_0.50"
),
"σ = 0.50",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_1.00/test/sigma_1.00"
),
"σ = 1.00",
),
],
)
plot_certified_accuracy(
"analysis/plots/vary_noise_cifar10",
"CIFAR-10, vary $\sigma$",
1.5,
[
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.12/test/sigma_0.12"
),
"$\sigma = 0.12$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.25/test/sigma_0.25"
),
"$\sigma = 0.25$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.50/test/sigma_0.50"
),
"$\sigma = 0.50$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_1.00/test/sigma_1.00"
),
"$\sigma = 1.00$",
),
],
)
plot_certified_accuracy(
"analysis/plots/vary_train_noise_cifar_050",
"CIFAR-10, vary train noise, $\sigma=0.5$",
1.5,
[
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.25/test/sigma_0.50"
),
"train $\sigma = 0.25$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_0.50/test/sigma_0.50"
),
"train $\sigma = 0.50$",
),
Line(
ApproximateAccuracy(
"data/certify/cifar10/resnet110/noise_1.00/test/sigma_0.50"
),
"train $\sigma = 1.00$",
),
],
)
plot_certified_accuracy(
"analysis/plots/vary_train_noise_imagenet_050",
"ImageNet, vary train noise, $\sigma=0.5$",
1.5,
[
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.25/test/sigma_0.50"
),
"train $\sigma = 0.25$",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.50/test/sigma_0.50"
),
"train $\sigma = 0.50$",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_1.00/test/sigma_0.50"
),
"train $\sigma = 1.00$",
),
],
)
plot_certified_accuracy(
"analysis/plots/vary_noise_imagenet",
"ImageNet, vary $\sigma$",
4,
[
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.25/test/sigma_0.25"
),
"$\sigma = 0.25$",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.50/test/sigma_0.50"
),
"$\sigma = 0.50$",
),
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_1.00/test/sigma_1.00"
),
"$\sigma = 1.00$",
),
],
)
plot_certified_accuracy(
"analysis/plots/high_prob",
"Approximate vs. High-Probability",
2.0,
[
Line(
ApproximateAccuracy(
"data/certify/imagenet/resnet50/noise_0.50/test/sigma_0.50"
),
"Approximate",
),
Line(
HighProbAccuracy(
"data/certify/imagenet/resnet50/noise_0.50/test/sigma_0.50",
0.001,
0.001,
),
"High-Prob",
),
],
)
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.gca",
"math.log",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figu... | [((172, 181), 'seaborn.set', 'sns.set', ([], {}), '()\n', (179, 181), True, 'import seaborn as sns\n'), ((1961, 2012), 'numpy.arange', 'np.arange', (['(0)', '(max_radius + radius_step)', 'radius_step'], {}), '(0, max_radius + radius_step, radius_step)\n', (1970, 2012), True, 'import numpy as np\n'), ((2017, 2029), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2027, 2029), True, 'import matplotlib.pyplot as plt\n'), ((2143, 2159), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (2151, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2189), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, max_radius)'], {}), '((0, max_radius))\n', (2172, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2194, 2223), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(14)'}), '(labelsize=14)\n', (2209, 2223), True, 'import matplotlib.pyplot as plt\n'), ((2228, 2261), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""radius"""'], {'fontsize': '(16)'}), "('radius', fontsize=16)\n", (2238, 2261), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2311), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""certified accuracy"""'], {'fontsize': '(16)'}), "('certified accuracy', fontsize=16)\n", (2276, 2311), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2395), 'matplotlib.pyplot.legend', 'plt.legend', (['[method.legend for method in lines]'], {'loc': '"""upper right"""', 'fontsize': '(16)'}), "([method.legend for method in lines], loc='upper right', fontsize=16)\n", (2326, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2429), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outfile + '.pdf')"], {}), "(outfile + '.pdf')\n", (2411, 2429), True, 'import matplotlib.pyplot as plt\n'), ((2434, 2452), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2450, 2452), True, 'import matplotlib.pyplot as plt\n'), ((2457, 2486), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (2466, 2486), True, 'import matplotlib.pyplot as plt\n'), ((2491, 2509), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2507, 2509), True, 'import matplotlib.pyplot as plt\n'), ((2514, 2552), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outfile + '.png')"], {'dpi': '(300)'}), "(outfile + '.png', dpi=300)\n", (2525, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2557, 2568), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2566, 2568), True, 'import matplotlib.pyplot as plt\n'), ((2757, 2808), 'numpy.arange', 'np.arange', (['(0)', '(max_radius + radius_step)', 'radius_step'], {}), '(0, max_radius + radius_step, radius_step)\n', (2766, 2808), True, 'import numpy as np\n'), ((2813, 2825), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2823, 2825), True, 'import matplotlib.pyplot as plt\n'), ((2932, 2948), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (2940, 2948), True, 'import matplotlib.pyplot as plt\n'), ((2953, 2978), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, max_radius)'], {}), '((0, max_radius))\n', (2961, 2978), True, 'import matplotlib.pyplot as plt\n'), ((2983, 3016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""radius"""'], {'fontsize': '(22)'}), "('radius', fontsize=22)\n", (2993, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3021, 3066), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""certified accuracy"""'], {'fontsize': '(22)'}), "('certified accuracy', fontsize=22)\n", (3031, 3066), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3100), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(20)'}), '(labelsize=20)\n', (3086, 3100), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3257), 'matplotlib.pyplot.legend', 'plt.legend', (['[method.legend for method in methods]'], {'loc': '"""upper right"""', 'fontsize': '(20)'}), "([method.legend for method in methods], loc='upper right',\n fontsize=20)\n", (3182, 3257), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3276), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3274, 3276), True, 'import matplotlib.pyplot as plt\n'), ((3281, 3310), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outfile + '.pdf')"], {}), "(outfile + '.pdf')\n", (3292, 3310), True, 'import matplotlib.pyplot as plt\n'), ((3315, 3326), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3324, 3326), True, 'import matplotlib.pyplot as plt\n'), ((3496, 3559), 'numpy.arange', 'np.arange', (['radius_start', '(radius_stop + radius_step)', 'radius_step'], {}), '(radius_start, radius_stop + radius_step, radius_step)\n', (3505, 3559), True, 'import numpy as np\n'), ((4421, 4484), 'numpy.arange', 'np.arange', (['radius_start', '(radius_stop + radius_step)', 'radius_step'], {}), '(radius_start, radius_stop + radius_step, radius_step)\n', (4430, 4484), True, 'import numpy as np\n'), ((487, 535), 'pandas.read_csv', 'pd.read_csv', (['self.data_file_path'], {'delimiter': '"""\t"""'}), "(self.data_file_path, delimiter='\\t')\n", (498, 535), True, 'import pandas as pd\n'), ((1007, 1055), 'pandas.read_csv', 'pd.read_csv', (['self.data_file_path'], {'delimiter': '"""\t"""'}), "(self.data_file_path, delimiter='\\t')\n", (1018, 1055), True, 'import pandas as pd\n'), ((3139, 3166), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['xticks'], {}), '(xticks)\n', (3158, 3166), True, 'import matplotlib.pyplot as plt\n'), ((1483, 1505), 'math.log', 'math.log', (['(1 / self.rho)'], {}), '(1 / self.rho)\n', (1491, 1505), False, 'import math\n'), ((3105, 3114), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3112, 3114), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1439), 'math.log', 'math.log', (['(1 / self.rho)'], {}), '(1 / self.rho)\n', (1425, 1439), False, 'import math\n')] |
from numpy import random
from matplotlib import pyplot
random.seed(12345)
sequence = random.normal(size=1000000, loc=30, scale=5)
pyplot.hist(sequence, bins=20)
pyplot.show()
| [
"numpy.random.normal",
"matplotlib.pyplot.hist",
"numpy.random.seed",
"matplotlib.pyplot.show"
] | [((56, 74), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (67, 74), False, 'from numpy import random\n'), ((86, 130), 'numpy.random.normal', 'random.normal', ([], {'size': '(1000000)', 'loc': '(30)', 'scale': '(5)'}), '(size=1000000, loc=30, scale=5)\n', (99, 130), False, 'from numpy import random\n'), ((132, 162), 'matplotlib.pyplot.hist', 'pyplot.hist', (['sequence'], {'bins': '(20)'}), '(sequence, bins=20)\n', (143, 162), False, 'from matplotlib import pyplot\n'), ((163, 176), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (174, 176), False, 'from matplotlib import pyplot\n')] |
from collections import defaultdict
from math import *
from itertools import product
from logbook import Logger
import cv2
import numpy as np
import networkx as nx
import math
from tqdm import tqdm
# from palettable.cartocolors.qualitative import Pastel_10 as COLORS
from suppose.common import timing
from suppose.camera import load_calibration
import pandas as pd
log = Logger("pose3d")
def undistort_points(pts, calibration):
if pts.size == 0:
return pts
camera_matrix = calibration["cameraMatrix"]
distortion_coefficients = calibration["distortionCoefficients"]
original_shape = pts.shape
pts2 = np.ascontiguousarray(pts).reshape(-1, 1, 2)
undistorted_points = cv2.undistortPoints(pts2, camera_matrix, distortion_coefficients, P=camera_matrix)
undistorted_points = undistorted_points.reshape(original_shape)
return undistorted_points
def undistort_2d_poses(poses, camera_calibration):
if poses.size == 0:
return poses
poses1 = poses[:, :, :2]
scores1 = poses[:, :, 2]
poses1_undistorted = undistort_points(poses1, camera_calibration)
p1 = np.dstack((poses1_undistorted, scores1))
return p1
def triangulate(p1, p2, projection_matrix1, projection_matrix2):
if p1.size == 0 or p2.size == 0:
return np.zeros((0, 3))
object_points_homogeneous = cv2.triangulatePoints(projection_matrix1, projection_matrix2, p1.T, p2.T)
object_points = cv2.convertPointsFromHomogeneous(object_points_homogeneous.T)
object_points = object_points.reshape(-1, 3)
return object_points
def project_3d_to_2d(pts3d, camera_calibration):
if pts3d.size == 0:
return np.zeros((0, 2))
return cv2.projectPoints(
pts3d,
camera_calibration["rotationVector"],
camera_calibration["translationVector"],
camera_calibration["cameraMatrix"],
camera_calibration["distortionCoefficients"],
)[0].squeeze()
def rmse(p1, p2):
return np.linalg.norm(p1 - p2) / np.sqrt(p1.shape[0])
def get_likely_matches(g, max_error):
gg = nx.DiGraph()
for node in g.nodes:
best_edges = {} # target_camera -> best_edge
for edge in g.edges(node, data=True):
_, tgt, data = edge
weight = data["weight"]
# reject when no reprojection found or beyond max error threshold
if np.isnan(weight) or weight > max_error:
continue
# tgt[0] is camera the paired keypoints is located in
# alternatively, access data['pose']['camera2']
if tgt[0] in best_edges:
_, _, data2 = best_edges[tgt[0]]
# compare against np.nan should return false
if weight < data2["weight"]:
best_edges[tgt[0]] = edge
else:
best_edges[tgt[0]] = edge
gg.add_edges_from(best_edges.values())
return gg.to_undirected(reciprocal=True)
def get_best_matches(graph_likely, min_edges=1):
graph_best = nx.Graph()
best_edges = []
for subgraph in nx.connected_component_subgraphs(graph_likely):
if subgraph.number_of_edges() >= min_edges:
best_edge = sorted(subgraph.edges(data=True), key=lambda node: node[2]["weight"])[0]
best_edges.append(best_edge)
graph_best.add_edges_from(best_edges)
return graph_best
@timing
def reconstruct3d(file, camera_calibration, output, debug_output):
log.info("3D Reconstruction from poses in multiple views")
log.info("file: {}".format(file))
log.info("calibration: {}".format(camera_calibration))
cameras = defaultdict(dict)
for c in camera_calibration:
name, camera_file = c.split(",")
cameras[name]["calibration"] = load_calibration(camera_file)
# cameras[name]['projection'] = get_projection_matrix(cameras[name]['calibration'])
df = pd.read_pickle(file)
camera_names = sorted(list(cameras.keys()))
all_poses3d = []
pbar = tqdm(total=len(df))
for frame_number, (index, row) in enumerate(df.iterrows()):
graph = nx.Graph()
for camera1_idx, camera_name1 in enumerate(camera_names):
camera_calibration1 = cameras[camera_name1]["calibration"]
poses1 = row[camera_name1].poses
pts1 = undistort_2d_poses(poses1, camera_calibration1)
projection_matrix1 = cameras[camera_name1]["projection"]
for camera2_idx, camera_name2 in enumerate(camera_names):
if camera_name1 == camera_name2:
continue
camera_calibration2 = cameras[camera_name2]["calibration"]
poses2 = row[camera_name2].poses
pts2 = undistort_2d_poses(poses2, camera_calibration2)
projection_matrix2 = cameras[camera_name2]["projection"]
for (idx1, (p1, p1_orig)), (idx2, (p2, p2_orig)) in product(
enumerate(zip(pts1, poses1)), enumerate(zip(pts2, poses2))
):
pts_present = np.logical_and(
p1[:, 2], p2[:, 2]
) # keypoints exist in both p1 and p2 if score != 0
pp1 = p1[pts_present][:, :2] # drop score dimension
pp2 = p2[pts_present][:, :2]
p1_orig_shared = p1_orig[pts_present][:, :2]
p2_orig_shared = p2_orig[pts_present][:, :2]
pts3d = triangulate(pp1, pp2, projection_matrix1, projection_matrix2)
pp1_reprojected = project_3d_to_2d(pts3d, camera_calibration1)
pp2_reprojected = project_3d_to_2d(pts3d, camera_calibration2)
pp1_rmse = rmse(p1_orig_shared, pp1_reprojected)
pp2_rmse = rmse(p2_orig_shared, pp2_reprojected)
if np.isnan(pp1_rmse) or np.isnan(pp2_rmse):
# should just continue the loop and ignore this edge
reprojection_error = np.nan
else:
reprojection_error = max(pp1_rmse, pp2_rmse)
# should just reject 3d poses errors > threshold
keypoints_3d = np.full([len(pts_present)] + list(pts3d.shape[1:]), np.nan)
keypoints_3d[pts_present] = pts3d
if debug_output:
pose3d = {
"keypoints_3d": keypoints_3d,
"reprojection_error": reprojection_error,
"camera1": camera1_idx,
"camera2": camera2_idx,
"pose1": idx1,
"pose2": idx2,
"pp1_reprojected": pp1_reprojected,
"pp2_reprojected": pp2_reprojected,
"pp1": pp1,
"pp2": pp2,
"pts1": pts1,
"pts2": pts2,
"p1": p1,
"p2": p2,
}
else:
pose3d = {
"keypoints_3d": keypoints_3d,
"reprojection_error": reprojection_error,
"camera1": camera1_idx,
"camera2": camera2_idx,
"pose1": idx1,
"pose2": idx2,
}
graph.add_edge(
(camera1_idx, idx1),
(camera2_idx, idx2),
pose=pose3d,
weight_inverse=-math.log(reprojection_error),
weight=reprojection_error,
)
graph_likely = get_likely_matches(graph, 15)
graph_best = get_best_matches(graph_likely, min_edges=1)
poses3d = []
for src, tgt, data in graph_best.edges(data=True):
poses3d.append(data["pose"])
all_poses3d.append(poses3d)
pbar.update(1)
pbar.close()
log.info("Creating dataframe for serialization")
if debug_output:
all_poses = {
idx: {
"poses": np.array([p["keypoints_3d"] for p in poses]),
"debug": [{k: p[k] for k in p.keys() if k != "keypoints_3d"} for p in poses],
}
for idx, poses in enumerate(all_poses3d)
}
else:
all_poses = {
idx: {"poses": np.array([p["keypoints_3d"] for p in poses])}
for idx, poses in enumerate(all_poses3d)
}
df = pd.DataFrame.from_dict(all_poses, orient="index")
log.info("Writing output to ")
df.poses.to_json("{}.json".format(output))
df.to_pickle("{}.pickle.xz".format(output), compression="xz")
| [
"numpy.sqrt",
"networkx.connected_component_subgraphs",
"cv2.projectPoints",
"numpy.ascontiguousarray",
"cv2.triangulatePoints",
"numpy.array",
"math.log",
"numpy.linalg.norm",
"pandas.read_pickle",
"logbook.Logger",
"networkx.DiGraph",
"cv2.convertPointsFromHomogeneous",
"pandas.DataFrame.f... | [((376, 392), 'logbook.Logger', 'Logger', (['"""pose3d"""'], {}), "('pose3d')\n", (382, 392), False, 'from logbook import Logger\n'), ((703, 790), 'cv2.undistortPoints', 'cv2.undistortPoints', (['pts2', 'camera_matrix', 'distortion_coefficients'], {'P': 'camera_matrix'}), '(pts2, camera_matrix, distortion_coefficients, P=\n camera_matrix)\n', (722, 790), False, 'import cv2\n'), ((1119, 1159), 'numpy.dstack', 'np.dstack', (['(poses1_undistorted, scores1)'], {}), '((poses1_undistorted, scores1))\n', (1128, 1159), True, 'import numpy as np\n'), ((1342, 1415), 'cv2.triangulatePoints', 'cv2.triangulatePoints', (['projection_matrix1', 'projection_matrix2', 'p1.T', 'p2.T'], {}), '(projection_matrix1, projection_matrix2, p1.T, p2.T)\n', (1363, 1415), False, 'import cv2\n'), ((1436, 1497), 'cv2.convertPointsFromHomogeneous', 'cv2.convertPointsFromHomogeneous', (['object_points_homogeneous.T'], {}), '(object_points_homogeneous.T)\n', (1468, 1497), False, 'import cv2\n'), ((2063, 2075), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2073, 2075), True, 'import networkx as nx\n'), ((3013, 3023), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3021, 3023), True, 'import networkx as nx\n'), ((3064, 3110), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['graph_likely'], {}), '(graph_likely)\n', (3096, 3110), True, 'import networkx as nx\n'), ((3617, 3634), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3628, 3634), False, 'from collections import defaultdict\n'), ((3881, 3901), 'pandas.read_pickle', 'pd.read_pickle', (['file'], {}), '(file)\n', (3895, 3901), True, 'import pandas as pd\n'), ((8675, 8724), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['all_poses'], {'orient': '"""index"""'}), "(all_poses, orient='index')\n", (8697, 8724), True, 'import pandas as pd\n'), ((1293, 1309), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (1301, 1309), True, 'import numpy as np\n'), ((1662, 1678), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (1670, 1678), True, 'import numpy as np\n'), ((1967, 1990), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p2)'], {}), '(p1 - p2)\n', (1981, 1990), True, 'import numpy as np\n'), ((1993, 2013), 'numpy.sqrt', 'np.sqrt', (['p1.shape[0]'], {}), '(p1.shape[0])\n', (2000, 2013), True, 'import numpy as np\n'), ((3748, 3777), 'suppose.camera.load_calibration', 'load_calibration', (['camera_file'], {}), '(camera_file)\n', (3764, 3777), False, 'from suppose.camera import load_calibration\n'), ((4082, 4092), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4090, 4092), True, 'import networkx as nx\n'), ((634, 659), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pts'], {}), '(pts)\n', (654, 659), True, 'import numpy as np\n'), ((1690, 1884), 'cv2.projectPoints', 'cv2.projectPoints', (['pts3d', "camera_calibration['rotationVector']", "camera_calibration['translationVector']", "camera_calibration['cameraMatrix']", "camera_calibration['distortionCoefficients']"], {}), "(pts3d, camera_calibration['rotationVector'],\n camera_calibration['translationVector'], camera_calibration[\n 'cameraMatrix'], camera_calibration['distortionCoefficients'])\n", (1707, 1884), False, 'import cv2\n'), ((2363, 2379), 'numpy.isnan', 'np.isnan', (['weight'], {}), '(weight)\n', (2371, 2379), True, 'import numpy as np\n'), ((8280, 8324), 'numpy.array', 'np.array', (["[p['keypoints_3d'] for p in poses]"], {}), "([p['keypoints_3d'] for p in poses])\n", (8288, 8324), True, 'import numpy as np\n'), ((8556, 8600), 'numpy.array', 'np.array', (["[p['keypoints_3d'] for p in poses]"], {}), "([p['keypoints_3d'] for p in poses])\n", (8564, 8600), True, 'import numpy as np\n'), ((5036, 5070), 'numpy.logical_and', 'np.logical_and', (['p1[:, 2]', 'p2[:, 2]'], {}), '(p1[:, 2], p2[:, 2])\n', (5050, 5070), True, 'import numpy as np\n'), ((5837, 5855), 'numpy.isnan', 'np.isnan', (['pp1_rmse'], {}), '(pp1_rmse)\n', (5845, 5855), True, 'import numpy as np\n'), ((5859, 5877), 'numpy.isnan', 'np.isnan', (['pp2_rmse'], {}), '(pp2_rmse)\n', (5867, 5877), True, 'import numpy as np\n'), ((7719, 7747), 'math.log', 'math.log', (['reprojection_error'], {}), '(reprojection_error)\n', (7727, 7747), False, 'import math\n')] |
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
import pandas as pd
from .mask import mask_module
from .modules import MaskedModule
from .utils import get_params
import tempfile, pathlib
import torch
class Pruning(ABC):
"""Base class for Pruning operations
"""
def __init__(self, model, inputs=None, outputs=None, **pruning_params):
"""Construct Pruning class
Passed params are set as attributes for convienence and
saved internally for __repr__
Arguments:
model {torch.nn.Module} -- Model for which to compute masks
inputs {torch.nn.Tensor} -- Sample inputs to estimate activation &| gradients
outputs {torch.nn.Tensor} -- Sample outputs to estimate activation &| gradients
Keyword Arguments:
**pruning_params {dict} -- [description]
"""
self.model = model
self.inputs = inputs
self.outputs = outputs
self.pruning_params = list(pruning_params.keys())
for k, v in pruning_params.items():
setattr(self, k, v)
if hasattr(self, "scheduler") and isinstance(self.scheduler, type):
self.scheduler_gen = self.scheduler(self, **self.scheduler_args)
@abstractmethod
def model_masks(self, prunable=None):
"""Compute masks for a given model
"""
# TODO Also accept a dataloader
pass
# return masks
def update_context(self, step):
# Update prunable parameters after backward pass
if hasattr(self, "scheduler_gen"):
# from generator class (stateful)
sparsity, next_waiting_steps = self.scheduler_gen.next(step)
elif hasattr(self, "scheduler"):
# from generator fn (stateless)
sparsity, next_waiting_steps = self.scheduler(self, step=step, **self.scheduler_args)
else:
raise AttributeError("Scheduler fn/obj is required to determine pruning step and amount")
self.compression = 1/(1-sparsity)
assert self.compression >= 1, "Unacceptable compression rate"
self.init(self.compression)
return next_waiting_steps
def apply(self, step):
next_waiting_steps = self.update_context(step)
masks = self.model_masks()
mask_module(self.model, masks)
return next_waiting_steps
@abstractmethod
def can_prune(self, module):
pass
def prunable_modules(self):
prunable = [module for module in self.model.modules() if self.can_prune(module)]
return prunable
def prunable_keys(self):
prunables = self.prunable_modules()
prunable_keys = []
for name, module in self.model.named_modules():
if module in prunables:
# Assuring prunable layer always have weight and bias
prunable_keys.append(name+".weight")
prunable_keys.append(name+".bias")
return prunable_keys
def capture_params(self, steps, only_prunable=True):
self._handle = tempfile.TemporaryDirectory()
tmp_path = pathlib.Path(self._handle.name)
tmp_path.mkdir(exist_ok=True, parents=True)
self.params_snapshot_path = tmp_path / f"{self.model.__class__.__name__}.{steps}"
params = self.model.state_dict()
if only_prunable:
params = dict(filter(lambda kv: kv[0] in self.prunable_keys(), params.items()))
torch.save({"model_state_dict":params}, self.params_snapshot_path)
# def weight_diff_norm(self):
# assert hasattr(self, "weights_path"), "Should be loaded with a pretrained model in advance"
#
# weights = torch.load(self.weights_path)["model_state_dict"]
# if list(weights.keys())[0].startswith('module.'):
# weights = {k[len("module."):]: v for k, v in weights.items()}
# self.load_state_dict(weights, strict=False)
# for k,v in weights.items():
# delta =
def reset_params(self):
assert hasattr(self, "params_snapshot_path"), "No saved model path (by self.captured_weights)"
weights = torch.load(self.params_snapshot_path)["model_state_dict"]
if list(weights.keys())[0].startswith('module.'):
weights = {k[len("module."):]: v for k, v in weights.items()}
self.model.load_state_dict(weights, strict=False)
def __repr__(self):
s = f"{self.__class__.__name__}("
for k in self.pruning_params:
s += f"{k}={repr(getattr(self, k))}, "
s = s[:-2] + ')'
return s
def __str__(self):
return repr(self)
def module_params(self, module):
return get_params(module)
def params(self, only_prunable=True, native=False):
if only_prunable:
return {module: get_params(module, native=native) for module in self.prunable}
else:
return {module: get_params(module, native=native) for module in self.model.modules()}
def summary(self):
rows = []
for name, module in self.model.named_modules():
for pname, param in module.named_parameters(recurse=False):
if isinstance(module, MaskedModule):
compression = 1/getattr(module, pname+'_mask').detach().cpu().numpy().mean()
else:
compression = 1
shape = param.detach().cpu().numpy().shape
rows.append([name, pname, compression, np.prod(shape), shape, self.can_prune(module)])
columns = ['module', 'param', 'comp', 'size', 'shape', 'prunable']
return pd.DataFrame(rows, columns=columns)
class LayerPruning(Pruning):
@abstractmethod
def layer_masks(self, module):
"""Instead of implementing masks for the entire model at once
User needs to specify a layer_masks fn that can be applied layerwise
Should return None is layer can't be masked
"""
pass
# return masks
def model_masks(self, prunable=None):
"""Compute masks using the said strategy for every module
This is a straight forward implementation that supports
strategies that prune each module independently
"""
masks = OrderedDict()
if prunable is None:
prunable = self.prunable_modules()
for module in prunable:
masks_ = self.layer_masks(module)
if masks_ is not None:
masks[module] = masks_
return masks
| [
"tempfile.TemporaryDirectory",
"collections.OrderedDict",
"numpy.prod",
"pathlib.Path",
"torch.load",
"torch.save",
"pandas.DataFrame"
] | [((3099, 3128), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3126, 3128), False, 'import tempfile, pathlib\n'), ((3148, 3179), 'pathlib.Path', 'pathlib.Path', (['self._handle.name'], {}), '(self._handle.name)\n', (3160, 3179), False, 'import tempfile, pathlib\n'), ((3490, 3557), 'torch.save', 'torch.save', (["{'model_state_dict': params}", 'self.params_snapshot_path'], {}), "({'model_state_dict': params}, self.params_snapshot_path)\n", (3500, 3557), False, 'import torch\n'), ((5652, 5687), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': 'columns'}), '(rows, columns=columns)\n', (5664, 5687), True, 'import pandas as pd\n'), ((6280, 6293), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6291, 6293), False, 'from collections import OrderedDict\n'), ((4168, 4205), 'torch.load', 'torch.load', (['self.params_snapshot_path'], {}), '(self.params_snapshot_path)\n', (4178, 4205), False, 'import torch\n'), ((5514, 5528), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (5521, 5528), True, 'import numpy as np\n')] |
"""
__Author__ : <NAME>
__desc__ : file for training an NCC model based on the data which has been genereated
"""
import tensorflow as tf
import json
import os
from collections import Counter
import random
import numpy as np
import pickle
class NCCTrain(object):
def __init__(self,fileName,trainSplitRatio=0.7,saveDir="./model",sizeEmbLayer=100,sizeClassfLayer=100,dropOutRatio=0.25,iterVal=25,batchSize=256,activation=tf.nn.relu,batchNorm=True,optimizer =tf.train.RMSPropOptimizer,intLrRate=0.0001):
"""
fileName : file to be processed
trainSplitRatio : ration of train :test data to consider for training v/s testing
saveDir : dir to save log and actual model
sizeEmbLayer : no of neuron in each emb layer
sizeclassFlayr : no of neuron in classification layer
dropOutRatio : amount of dropout to consider type List
iterVal : no of iteration to run(epoch)
batchSize : size of each batch
activation : activation layer to use
batchNorm : using batch normalization or not
optimizer : optimizer for the network
intLrRate : int learning rate of the trainig process
"""
self.fileName = fileName
self.ratio = trainSplitRatio
self.sizeEmbLayer = sizeEmbLayer
self.sizeClassfLayer = sizeClassfLayer
self.dropout = dropOutRatio
self.maxIter = iterVal
self.saveDir = saveDir
self.batchSize = batchSize
self.activation = activation
self.optimizer = optimizer
self.batchNorm = batchNorm
self.ilr = intLrRate
############# path for model and summary #############
self.modelAdd = os.path.join(self.saveDir,"model")
self.summaryAdd = os.path.join(self.saveDir,"summary")
if os.path.isdir(self.modelAdd):
pass
else:
os.mkdir(self.modelAdd)
if os.path.isdir(self.summaryAdd):
pass
else:
os.mkdir(self.summaryAdd)
self.modelAdd = self.modelAdd+"/NCCModelEncoder"
self.summaryAdd = self.summaryAdd+"/summaryWriter"
def buildNetwork(self):
## defining the network Of NCC
self.xVal = tf.placeholder(tf.float32,shape=[None,None,1], name="xVal")
self.yVal = tf.placeholder(tf.float32,shape=[None,None,1], name="yVal")
self.NCCLabel = tf.placeholder(tf.float32,shape=[None,1], name="NCCLabel")
self.keepProb = tf.placeholder(tf.float32, name="keepProb")
## for summary
self.avgTrainLoss = tf.placeholder(tf.float32, name="avgTrainLoss")
self.avgTestLoss = tf.placeholder(tf.float32, name="avgTestLoss")
self.isTrain = tf.placeholder(tf.bool, name="isTrain")
self.concateVal = tf.concat([self.xVal,self.yVal],2,name="concatedValue")#concatenating the values
with tf.name_scope("embededLayer-1") as scope: ## using embeded layer
## refer http://ruishu.io/2016/12/27/batchnorm/
self.embLayer1Dense = tf.layers.dense(self.concateVal,self.sizeEmbLayer, name = "embDense1")
self.embLayer1Norm = tf.layers.batch_normalization(self.embLayer1Dense, training=self.isTrain, name ="batchnorm1" )
self.embLayer1Relu = tf.nn.relu(self.embLayer1Norm)
self.emblayer1 = tf.nn.dropout(self.embLayer1Relu , self.keepProb,name = "embdroput1")
with tf.name_scope("embededLayer-2") as scope: ## using embeded layer
## refer http://ruishu.io/2016/12/27/batchnorm/
self.embLayer2Dense = tf.layers.dense(self.emblayer1,self.sizeEmbLayer, name = "embDense2")
self.embLayer2Norm = tf.layers.batch_normalization(self.embLayer2Dense, training=self.isTrain, name ="batchnorm2" )
self.embLayer2Relu = tf.nn.relu(self.embLayer2Norm)
self.emblayer2 = tf.nn.dropout(self.embLayer2Relu , self.keepProb,name = "embdroput2")
self.finalEmbLayer = tf.reduce_mean(self.emblayer2,axis=1, name="representation")## for getting the final rep
with tf.name_scope("classLayer-1") as scope: ## using classification layer
## refer http://ruishu.io/2016/12/27/batchnorm/
self.classLayer1Dense = tf.layers.dense(self.finalEmbLayer,self.sizeClassfLayer, name = "classfDens1")
self.classLayer1Norm = tf.layers.batch_normalization(self.classLayer1Dense, training=self.isTrain, name ="classfbatchnorm1" )
self.classLayer1Relu = tf.nn.relu(self.classLayer1Norm)
self.classLayer1 = tf.nn.dropout(self.classLayer1Relu , self.keepProb,name = "classdroput1")
with tf.name_scope("classLayer-2") as scope: ## using classification layer
## refer http://ruishu.io/2016/12/27/batchnorm/
self.classLayer2Dense = tf.layers.dense(self.classLayer1,self.sizeClassfLayer, name = "classfDense2")
self.classLayer2Norm = tf.layers.batch_normalization(self.classLayer2Dense, training=self.isTrain, name ="classfbatchnorm2" )
self.classLayer2Relu = tf.nn.relu(self.classLayer2Norm)
self.classLayer2 = tf.nn.dropout(self.classLayer2Relu , self.keepProb,name = "classdroput2")
self.logits = tf.layers.dense(self.classLayer2,1,name = "logits")
self.prob = tf.nn.sigmoid(self.logits)
with tf.name_scope("loss") as scope : # defining the loss function
# self.loss = tf.reduce_sum((self.NCCLabel*(1-self.prob) + (1-self.NCCLabel)*(self.prob))/2)
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,labels=self.NCCLabel))
updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(updateOps): ## important for
self.trainOp = self.optimizer(self.ilr).minimize(self.loss)
## merging the summary operation
with tf.name_scope("summary"):
tf.summary.scalar('loss_train',self.avgTrainLoss)
tf.summary.histogram('histogram loss_train', self.avgTrainLoss)
tf.summary.scalar('loss_test',self.avgTestLoss)
tf.summary.histogram('histogram loss_test', self.avgTestLoss)
self.summaryOp = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.summaryAdd, tf.get_default_graph())
def saveModel(self,sess,itr):
## function for saving the model
saver=tf.train.Saver()
saver.save(sess, self.modelAdd)
def returnArray(Self,listObj):
## return numpy array of training sample and class label
XList = []
YList = []
LabelList = []
for obj in listObj:
tempX =np.array(obj["trainX"])
tempX = tempX[np.newaxis,...]
XList.append(tempX)
tempY = np.array(obj["trainY"])
tempY = tempY[np.newaxis,...]
YList.append(tempY)
tempLabel = np.array([obj["label"]])
tempLabel = tempLabel[np.newaxis,...]
LabelList.append(tempLabel)
## format the data into np.float32 and single array
X = np.concatenate(XList,axis=0)
Y = np.concatenate(YList,axis=0)
Label = np.concatenate(LabelList,axis=0)
return X,Y,Label
def Run(self):
## code for running the system
self.buildNetwork()## building the network
with open(self.fileName,"r") as fileNameReader:
## reading the file
count = 0
datasetLoaded = {}
for line in fileNameReader:
data = json.loads(line)
## <NOTE> loading full dataset in the memory. Not an optimal approach for bigger datasets. Find better approach
if data["size"] not in datasetLoaded:
datasetLoaded[data["size"]] = [data]
else:
datasetLoaded[data["size"]].append(data)
count +=1
print("loaded data : ",count)
## segmenting the data into two part based on split ratio
trainingDataset = {}
testDataset = {}
for size in datasetLoaded:
### copying the data into two part
random.shuffle(datasetLoaded[size])
indexToConsider = int(np.floor(self.ratio*len(datasetLoaded[size])))
trainDataPerSize = datasetLoaded[size][:indexToConsider]
testDataPerSize = datasetLoaded[size][indexToConsider:]
## once data is loaded
trainingDataset[size] = trainDataPerSize
testDataset[size] = testDataPerSize
## once shuffleling is done start the training process
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("training Started...")
testLossFinal = -1
testAccFinal = -1
for itr in range(self.maxIter):
count = 0
avgLossList = []
for size in trainingDataset:
### training with a fixed size of dataset needed for matrix manipulation
for idx in range(0,len(trainingDataset[size]),self.batchSize):
## gettingthe training dataset
count+=1
trainData=trainingDataset[size][idx:idx+self.batchSize]
trainInputX,trainInputY,trainLabel = self.returnArray(trainData)
trainInputX = trainInputX[...,np.newaxis]
trainInputY = trainInputY[...,np.newaxis]
loss,_, = sess.run([self.loss,self.trainOp],{self.xVal:trainInputX,self.yVal:trainInputY,self.NCCLabel:trainLabel,self.isTrain:True,self.keepProb:np.array([1-self.dropout])})
avgLossList.append(loss)
print("itr : %d count : %d trainLoss : %f, avgLossVal : %f, testLoss : %f testAcc: %f "%(itr,count,loss,np.mean(avgLossList),testLossFinal,testAccFinal))
if (count%100==0):
print ("calculating test error ...")
testLossList = []
accList = []
for testSize in testDataset:
testInputX,testInputY,testLabel = self.returnArray(testDataset[testSize])
testInputX = testInputX[...,np.newaxis]
testInputY = testInputY[...,np.newaxis]
testLoss,testProb = sess.run([self.loss,self.prob],{self.xVal:testInputX,self.yVal:testInputY,self.NCCLabel:testLabel,self.isTrain:False,self.keepProb:np.array([1.0])})
testLossList.append(np.mean(testLoss))
accList.append(self.calcCrossValAcc(testProb,testLabel))
testLossFinal = np.mean(testLossList)
testAccFinal = np.mean(accList)
else:
pass
summary = sess.run(self.summaryOp,{self.avgTrainLoss:np.mean(avgLossList), self.avgTestLoss:testLossFinal})
self.writer.add_summary(summary)
if (itr%15==0):
self.ilr*=0.1
print ("saving model ..")
self.saveModel(sess,itr)
def calcCrossValAcc(self,predictionProb,actLabel):
## calculate average accuracy of the model (only for class Label -0 or label 1)
"""
predictionProb : the probability for the prediction of each testing dataset
actLabel : correct label of training dataset
"""
count = 0
correct =0
for prob,label in zip(predictionProb,actLabel):
if ( (label[0] ==0) or (label[0]==1) ):
if prob[0] > (1-prob[0]) :
prediction = 1
else:
prediction = 0
count+=1
if (prediction==label):
correct+=1
else:
continue
return correct/float(count)
def testModel(self,tubDataset):
## for testing the model
with tf.Session() as sess:
self.buildNetwork()
saver=tf.train.Saver()
saver.restore(sess, self.modelAdd)
with open(tubDataset,"r") as tubDataReader:
count =0
correct = 0
for line in tubDataReader:
data = json.loads(line)
testInputX,testInputY,testLabel = self.returnArray([data])
testInputX = testInputX[...,np.newaxis]
testInputY = testInputY[...,np.newaxis]
prob = sess.run([self.prob],{self.xVal:testInputX,self.yVal:testInputY,self.isTrain:False,self.keepProb:np.array([1.0])})
if prob[0][0] > (1-prob[0][0]) :
prediction = 1
else:
prediction = 0
count+=1
if prediction==testLabel[0][0] :
correct+=1
else:
print ("wrong Prediction : prob : %f label : %f"%(prob[0][0],testLabel[0][0]))
print("count : ",count, "correct : ",correct)
def predictOverResnet(self,NCCData):
## for predicting model class label
saveMapper = "FeatureResNetMapNCC.pickle"
NCCProbMap = {}
with tf.Session() as sess:
self.buildNetwork()
saver=tf.train.Saver()
saver.restore(sess, self.modelAdd)
with open(NCCData,"r+") as NCCDataReader,open(saveMapper,"wb") as saveMapWriter:
for line in NCCDataReader:
data = json.loads(line)
idx = data["featureIdx"]
className = data["className"]
testInputX,testInputY,testLabel = self.returnArray([data])
testInputX = testInputX[...,np.newaxis]
testInputY = testInputY[...,np.newaxis]
prob = sess.run([self.prob],{self.xVal:testInputX,self.yVal:testInputY,self.isTrain:False,self.keepProb:np.array([1.0])})
# print(prob[0])
print(prob[0][0],idx,className)
if prob[0][0][0]>0.5:
NCCLbl = "anticasual"
else:
NCCLbl = "casual"
if className not in NCCProbMap:
NCCProbMap[className] = [{"idx":idx,"prob":prob[0][0],"NCC":NCCLbl}]
else:
NCCProbMap[className].append({"idx":idx,"prob":prob[0][0],"NCC":NCCLbl})
pickle.dump(NCCProbMap,saveMapWriter, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=="__main__":
obj = NCCTrain(fileName="./casual-data-gen-30K.json-original")
# obj.Run()
### #######code to test output on tubenghen dataset ###############
# obj.testModel(tubDataset="./tubehengenDataFormat.json")
################################################################
### #######code to test output on NCC Resnet dataset ###############
obj.predictOverResnet(NCCData="./resnetTraining/resnetModelFeatureVector.json")
################################################################
| [
"numpy.array",
"tensorflow.control_dependencies",
"tensorflow.nn.dropout",
"tensorflow.reduce_mean",
"numpy.mean",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"os.path.isdir",
"os.mkdir",
"numpy.concatenate",
"tensorflow.layers.batch_normaliz... | [((1605, 1640), 'os.path.join', 'os.path.join', (['self.saveDir', '"""model"""'], {}), "(self.saveDir, 'model')\n", (1617, 1640), False, 'import os\n'), ((1661, 1698), 'os.path.join', 'os.path.join', (['self.saveDir', '"""summary"""'], {}), "(self.saveDir, 'summary')\n", (1673, 1698), False, 'import os\n'), ((1706, 1734), 'os.path.isdir', 'os.path.isdir', (['self.modelAdd'], {}), '(self.modelAdd)\n', (1719, 1734), False, 'import os\n'), ((1790, 1820), 'os.path.isdir', 'os.path.isdir', (['self.summaryAdd'], {}), '(self.summaryAdd)\n', (1803, 1820), False, 'import os\n'), ((2053, 2115), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, 1]', 'name': '"""xVal"""'}), "(tf.float32, shape=[None, None, 1], name='xVal')\n", (2067, 2115), True, 'import tensorflow as tf\n'), ((2128, 2190), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, 1]', 'name': '"""yVal"""'}), "(tf.float32, shape=[None, None, 1], name='yVal')\n", (2142, 2190), True, 'import tensorflow as tf\n'), ((2207, 2267), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]', 'name': '"""NCCLabel"""'}), "(tf.float32, shape=[None, 1], name='NCCLabel')\n", (2221, 2267), True, 'import tensorflow as tf\n'), ((2285, 2328), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keepProb"""'}), "(tf.float32, name='keepProb')\n", (2299, 2328), True, 'import tensorflow as tf\n'), ((2373, 2420), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""avgTrainLoss"""'}), "(tf.float32, name='avgTrainLoss')\n", (2387, 2420), True, 'import tensorflow as tf\n'), ((2443, 2489), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""avgTestLoss"""'}), "(tf.float32, name='avgTestLoss')\n", (2457, 2489), True, 'import tensorflow as tf\n'), ((2510, 2549), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""isTrain"""'}), "(tf.bool, name='isTrain')\n", (2524, 2549), True, 'import tensorflow as tf\n'), ((2571, 2629), 'tensorflow.concat', 'tf.concat', (['[self.xVal, self.yVal]', '(2)'], {'name': '"""concatedValue"""'}), "([self.xVal, self.yVal], 2, name='concatedValue')\n", (2580, 2629), True, 'import tensorflow as tf\n'), ((3683, 3744), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.emblayer2'], {'axis': '(1)', 'name': '"""representation"""'}), "(self.emblayer2, axis=1, name='representation')\n", (3697, 3744), True, 'import tensorflow as tf\n'), ((4861, 4912), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.classLayer2', '(1)'], {'name': '"""logits"""'}), "(self.classLayer2, 1, name='logits')\n", (4876, 4912), True, 'import tensorflow as tf\n'), ((4928, 4954), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['self.logits'], {}), '(self.logits)\n', (4941, 4954), True, 'import tensorflow as tf\n'), ((5254, 5296), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (5271, 5296), True, 'import tensorflow as tf\n'), ((5946, 5962), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5960, 5962), True, 'import tensorflow as tf\n'), ((6534, 6563), 'numpy.concatenate', 'np.concatenate', (['XList'], {'axis': '(0)'}), '(XList, axis=0)\n', (6548, 6563), True, 'import numpy as np\n'), ((6570, 6599), 'numpy.concatenate', 'np.concatenate', (['YList'], {'axis': '(0)'}), '(YList, axis=0)\n', (6584, 6599), True, 'import numpy as np\n'), ((6610, 6643), 'numpy.concatenate', 'np.concatenate', (['LabelList'], {'axis': '(0)'}), '(LabelList, axis=0)\n', (6624, 6643), True, 'import numpy as np\n'), ((1758, 1781), 'os.mkdir', 'os.mkdir', (['self.modelAdd'], {}), '(self.modelAdd)\n', (1766, 1781), False, 'import os\n'), ((1844, 1869), 'os.mkdir', 'os.mkdir', (['self.summaryAdd'], {}), '(self.summaryAdd)\n', (1852, 1869), False, 'import os\n'), ((2666, 2697), 'tensorflow.name_scope', 'tf.name_scope', (['"""embededLayer-1"""'], {}), "('embededLayer-1')\n", (2679, 2697), True, 'import tensorflow as tf\n'), ((2809, 2878), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.concateVal', 'self.sizeEmbLayer'], {'name': '"""embDense1"""'}), "(self.concateVal, self.sizeEmbLayer, name='embDense1')\n", (2824, 2878), True, 'import tensorflow as tf\n'), ((2906, 3002), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.embLayer1Dense'], {'training': 'self.isTrain', 'name': '"""batchnorm1"""'}), "(self.embLayer1Dense, training=self.isTrain,\n name='batchnorm1')\n", (2935, 3002), True, 'import tensorflow as tf\n'), ((3027, 3057), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.embLayer1Norm'], {}), '(self.embLayer1Norm)\n', (3037, 3057), True, 'import tensorflow as tf\n'), ((3080, 3147), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.embLayer1Relu', 'self.keepProb'], {'name': '"""embdroput1"""'}), "(self.embLayer1Relu, self.keepProb, name='embdroput1')\n", (3093, 3147), True, 'import tensorflow as tf\n'), ((3165, 3196), 'tensorflow.name_scope', 'tf.name_scope', (['"""embededLayer-2"""'], {}), "('embededLayer-2')\n", (3178, 3196), True, 'import tensorflow as tf\n'), ((3308, 3376), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.emblayer1', 'self.sizeEmbLayer'], {'name': '"""embDense2"""'}), "(self.emblayer1, self.sizeEmbLayer, name='embDense2')\n", (3323, 3376), True, 'import tensorflow as tf\n'), ((3403, 3499), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.embLayer2Dense'], {'training': 'self.isTrain', 'name': '"""batchnorm2"""'}), "(self.embLayer2Dense, training=self.isTrain,\n name='batchnorm2')\n", (3432, 3499), True, 'import tensorflow as tf\n'), ((3524, 3554), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.embLayer2Norm'], {}), '(self.embLayer2Norm)\n', (3534, 3554), True, 'import tensorflow as tf\n'), ((3577, 3644), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.embLayer2Relu', 'self.keepProb'], {'name': '"""embdroput2"""'}), "(self.embLayer2Relu, self.keepProb, name='embdroput2')\n", (3590, 3644), True, 'import tensorflow as tf\n'), ((3782, 3811), 'tensorflow.name_scope', 'tf.name_scope', (['"""classLayer-1"""'], {}), "('classLayer-1')\n", (3795, 3811), True, 'import tensorflow as tf\n'), ((3932, 4009), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.finalEmbLayer', 'self.sizeClassfLayer'], {'name': '"""classfDens1"""'}), "(self.finalEmbLayer, self.sizeClassfLayer, name='classfDens1')\n", (3947, 4009), True, 'import tensorflow as tf\n'), ((4038, 4142), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.classLayer1Dense'], {'training': 'self.isTrain', 'name': '"""classfbatchnorm1"""'}), "(self.classLayer1Dense, training=self.isTrain,\n name='classfbatchnorm1')\n", (4067, 4142), True, 'import tensorflow as tf\n'), ((4169, 4201), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.classLayer1Norm'], {}), '(self.classLayer1Norm)\n', (4179, 4201), True, 'import tensorflow as tf\n'), ((4226, 4297), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.classLayer1Relu', 'self.keepProb'], {'name': '"""classdroput1"""'}), "(self.classLayer1Relu, self.keepProb, name='classdroput1')\n", (4239, 4297), True, 'import tensorflow as tf\n'), ((4317, 4346), 'tensorflow.name_scope', 'tf.name_scope', (['"""classLayer-2"""'], {}), "('classLayer-2')\n", (4330, 4346), True, 'import tensorflow as tf\n'), ((4467, 4543), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.classLayer1', 'self.sizeClassfLayer'], {'name': '"""classfDense2"""'}), "(self.classLayer1, self.sizeClassfLayer, name='classfDense2')\n", (4482, 4543), True, 'import tensorflow as tf\n'), ((4572, 4676), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['self.classLayer2Dense'], {'training': 'self.isTrain', 'name': '"""classfbatchnorm2"""'}), "(self.classLayer2Dense, training=self.isTrain,\n name='classfbatchnorm2')\n", (4601, 4676), True, 'import tensorflow as tf\n'), ((4703, 4735), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.classLayer2Norm'], {}), '(self.classLayer2Norm)\n', (4713, 4735), True, 'import tensorflow as tf\n'), ((4760, 4831), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.classLayer2Relu', 'self.keepProb'], {'name': '"""classdroput2"""'}), "(self.classLayer2Relu, self.keepProb, name='classdroput2')\n", (4773, 4831), True, 'import tensorflow as tf\n'), ((4965, 4986), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (4978, 4986), True, 'import tensorflow as tf\n'), ((5305, 5339), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['updateOps'], {}), '(updateOps)\n', (5328, 5339), True, 'import tensorflow as tf\n'), ((5472, 5496), 'tensorflow.name_scope', 'tf.name_scope', (['"""summary"""'], {}), "('summary')\n", (5485, 5496), True, 'import tensorflow as tf\n'), ((5502, 5552), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_train"""', 'self.avgTrainLoss'], {}), "('loss_train', self.avgTrainLoss)\n", (5519, 5552), True, 'import tensorflow as tf\n'), ((5556, 5619), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram loss_train"""', 'self.avgTrainLoss'], {}), "('histogram loss_train', self.avgTrainLoss)\n", (5576, 5619), True, 'import tensorflow as tf\n'), ((5626, 5674), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_test"""', 'self.avgTestLoss'], {}), "('loss_test', self.avgTestLoss)\n", (5643, 5674), True, 'import tensorflow as tf\n'), ((5678, 5739), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram loss_test"""', 'self.avgTestLoss'], {}), "('histogram loss_test', self.avgTestLoss)\n", (5698, 5739), True, 'import tensorflow as tf\n'), ((5761, 5783), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5781, 5783), True, 'import tensorflow as tf\n'), ((5842, 5864), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (5862, 5864), True, 'import tensorflow as tf\n'), ((6174, 6197), 'numpy.array', 'np.array', (["obj['trainX']"], {}), "(obj['trainX'])\n", (6182, 6197), True, 'import numpy as np\n'), ((6270, 6293), 'numpy.array', 'np.array', (["obj['trainY']"], {}), "(obj['trainY'])\n", (6278, 6293), True, 'import numpy as np\n'), ((6370, 6394), 'numpy.array', 'np.array', (["[obj['label']]"], {}), "([obj['label']])\n", (6378, 6394), True, 'import numpy as np\n'), ((7447, 7482), 'random.shuffle', 'random.shuffle', (['datasetLoaded[size]'], {}), '(datasetLoaded[size])\n', (7461, 7482), False, 'import random\n'), ((7864, 7876), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7874, 7876), True, 'import tensorflow as tf\n'), ((10676, 10688), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10686, 10688), True, 'import tensorflow as tf\n'), ((10732, 10748), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10746, 10748), True, 'import tensorflow as tf\n'), ((11711, 11723), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11721, 11723), True, 'import tensorflow as tf\n'), ((11767, 11783), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11781, 11783), True, 'import tensorflow as tf\n'), ((5155, 5241), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'self.logits', 'labels': 'self.NCCLabel'}), '(logits=self.logits, labels=self.\n NCCLabel)\n', (5194, 5241), True, 'import tensorflow as tf\n'), ((6930, 6946), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6940, 6946), False, 'import json\n'), ((7899, 7932), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7930, 7932), True, 'import tensorflow as tf\n'), ((12694, 12766), 'pickle.dump', 'pickle.dump', (['NCCProbMap', 'saveMapWriter'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(NCCProbMap, saveMapWriter, protocol=pickle.HIGHEST_PROTOCOL)\n', (12705, 12766), False, 'import pickle\n'), ((10919, 10935), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (10929, 10935), False, 'import json\n'), ((11955, 11971), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (11965, 11971), False, 'import json\n'), ((9779, 9799), 'numpy.mean', 'np.mean', (['avgLossList'], {}), '(avgLossList)\n', (9786, 9799), True, 'import numpy as np\n'), ((9623, 9644), 'numpy.mean', 'np.mean', (['testLossList'], {}), '(testLossList)\n', (9630, 9644), True, 'import numpy as np\n'), ((9668, 9684), 'numpy.mean', 'np.mean', (['accList'], {}), '(accList)\n', (9675, 9684), True, 'import numpy as np\n'), ((11203, 11218), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (11211, 11218), True, 'import numpy as np\n'), ((12306, 12321), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (12314, 12321), True, 'import numpy as np\n'), ((8722, 8750), 'numpy.array', 'np.array', (['[1 - self.dropout]'], {}), '([1 - self.dropout])\n', (8730, 8750), True, 'import numpy as np\n'), ((8911, 8931), 'numpy.mean', 'np.mean', (['avgLossList'], {}), '(avgLossList)\n', (8918, 8931), True, 'import numpy as np\n'), ((9514, 9531), 'numpy.mean', 'np.mean', (['testLoss'], {}), '(testLoss)\n', (9521, 9531), True, 'import numpy as np\n'), ((9467, 9482), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (9475, 9482), True, 'import numpy as np\n')] |
"""A module which implements the time frequency estimation.
Authors : <NAME> <<EMAIL>>
License : BSD 3-clause
Multitaper wavelet method
"""
import warnings
from math import sqrt
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from .utils import logger, verbose
from .dpss import dpss_windows
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def tfr_multitaper(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial coherence
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
times : np.ndarray, shape (n_times, )
Time vector for convenience based on n_times, sfreq and decim
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
for m in range(n_taps):
psd_itc = (_time_frequency(data[:, c, :], Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
times = np.arange(n_times) / np.float(sfreq)
return psd, itc, times
@verbose
def rescale(data, times, baseline, mode, verbose=None, copy=True):
"""Rescale i.e., baseline correcting data
Parameters
----------
data : array
It can be of any shape. The only constraint is that the last
dimension should be time.
times : 1D array
Time instants is seconds.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
logratio is the same an mean but in log-scale, zlogratio is the
same as zscore but data is rendered in log-scale first.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
copy : bool
Operate on a copy of the data, or in place.
Returns
-------
data_scaled : array
Array of same shape as data after rescaling.
"""
if copy:
data = data.copy()
valid_modes = ('logratio', 'ratio', 'zscore', 'mean', 'percent',
'zlogratio')
if mode not in valid_modes:
raise Exception('mode should be any of : %s' % (valid_modes, ))
if baseline is not None:
logger.info("Applying baseline correction ... (mode: %s)" % mode)
bmin, bmax = baseline
if bmin is None:
imin = 0
else:
imin = int(np.where(times >= bmin)[0][0])
if bmax is None:
imax = len(times)
else:
imax = int(np.where(times <= bmax)[0][-1]) + 1
# avoid potential "empty slice" warning
if data.shape[-1] > 0:
mean = np.mean(data[..., imin:imax], axis=-1)[..., None]
else:
mean = 0 # otherwise we get an ugly nan
if mode == 'mean':
data -= mean
if mode == 'logratio':
data /= mean
data = np.log10(data) # a value of 1 means 10 times bigger
if mode == 'ratio':
data /= mean
elif mode == 'zscore':
std = np.std(data[..., imin:imax], axis=-1)[..., None]
data -= mean
data /= std
elif mode == 'percent':
data -= mean
data /= mean
elif mode == 'zlogratio':
data /= mean
data = np.log10(data)
std = np.std(data[..., imin:imax], axis=-1)[..., None]
data /= std
else:
logger.info("No baseline correction applied...")
return data
def plot_tfr(tfr, times, freqs, ch_idx=0, vmin=None, vmax=None,
x_label='Time (s)', y_label='Frequency (Hz)',
colorbar=True, cmap='RdBu_r', title=None):
""" Basic plotting function to show time-freq
Parameters
----------
tfr : np.ndarray, shape (n_channels, n_frequencies, n_times)
Time-frequency data from tfr_multitaper (power or itc)
times: np.ndarray, shape (n_times, )
Time array corresponding to tfr, also from tfr_multitaper
freqs : np.ndarray, shape (n_times, )
Frequency array over which tfr was calculated
ch_idx : integer, option, default: 0
Index of channel to plot
vmin : scalar, optional, default: tfr.min()
Minimum of colorbar
vmax : scalra, optional, default: tfr.max()
Maximum of colorbar
x_label : string, optional, default: 'Time (s)'
Label for x-axis (i.e., time axis)
y_label : string, optional, default: 'Frequency (Hz)'
Label for y-axis (i.e., frequency axis)
colorbar : boolean, optional, default: False
Whether to show colorbar
cmap : string, optional, default: 'RdBu_r'
matplotlib.colors.Colormap object name
title : string, optional, default: None
Title for the plot
Returns
-------
"""
if vmin is None:
vmin = tfr.min()
if vmax is None:
vmax = tfr.max()
import matplotlib.pyplot as plt
extent = (times[0], times[-1], freqs[0], freqs[-1])
plt.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
if title:
plt.title(title)
| [
"numpy.convolve",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"math.sqrt",
"scipy.fftpack.fftn",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.exp",
"numpy.empty",
"warnings.warn",
"numpy.abs",... | [((1280, 1303), 'numpy.atleast_1d', 'np.atleast_1d', (['n_cycles'], {}), '(n_cycles)\n', (1293, 1303), True, 'import numpy as np\n'), ((2512, 2531), 'numpy.asarray', 'np.asarray', (['newsize'], {}), '(newsize)\n', (2522, 2531), True, 'import numpy as np\n'), ((2547, 2566), 'numpy.array', 'np.array', (['arr.shape'], {}), '(arr.shape)\n', (2555, 2566), True, 'import numpy as np\n'), ((2881, 2894), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (2891, 2894), True, 'import numpy as np\n'), ((3215, 3262), 'numpy.empty', 'np.empty', (['(n_freqs, fsize)'], {'dtype': 'np.complex128'}), '((n_freqs, fsize), dtype=np.complex128)\n', (3223, 3262), True, 'import numpy as np\n'), ((4310, 4323), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (4320, 4323), True, 'import numpy as np\n'), ((5232, 5266), 'numpy.zeros', 'np.zeros', (['(n_frequencies, n_times)'], {}), '((n_frequencies, n_times))\n', (5240, 5266), True, 'import numpy as np\n'), ((5284, 5330), 'numpy.zeros', 'np.zeros', (['(n_frequencies, n_times)', 'np.complex'], {}), '((n_frequencies, n_times), np.complex)\n', (5292, 5330), True, 'import numpy as np\n'), ((8123, 8169), 'numpy.zeros', 'np.zeros', (['(n_channels, n_frequencies, n_times)'], {}), '((n_channels, n_frequencies, n_times))\n', (8131, 8169), True, 'import numpy as np\n'), ((8180, 8226), 'numpy.zeros', 'np.zeros', (['(n_channels, n_frequencies, n_times)'], {}), '((n_channels, n_frequencies, n_times))\n', (8188, 8226), True, 'import numpy as np\n'), ((13249, 13356), 'matplotlib.pyplot.imshow', 'plt.imshow', (['tfr[ch_idx]'], {'extent': 'extent', 'aspect': '"""auto"""', 'origin': '"""lower"""', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': 'cmap'}), "(tfr[ch_idx], extent=extent, aspect='auto', origin='lower', vmin=\n vmin, vmax=vmax, cmap=cmap)\n", (13259, 13356), True, 'import matplotlib.pyplot as plt\n'), ((1235, 1263), 'numpy.floor', 'np.floor', (['(time_bandwidth - 1)'], {}), '(time_bandwidth - 1)\n', (1243, 1263), True, 'import numpy as np\n'), ((3481, 3497), 'scipy.fftpack.fftn', 'fftn', (['W', '[fsize]'], {}), '(W, [fsize])\n', (3485, 3497), False, 'from scipy.fftpack import fftn, ifftn\n'), ((3755, 3771), 'scipy.fftpack.fftn', 'fftn', (['x', '[fsize]'], {}), '(x, [fsize])\n', (3759, 3771), False, 'from scipy.fftpack import fftn, ifftn\n'), ((4438, 4487), 'numpy.zeros', 'np.zeros', (['(n_freqs, n_times)'], {'dtype': 'np.complex128'}), '((n_freqs, n_times), dtype=np.complex128)\n', (4446, 4487), True, 'import numpy as np\n'), ((5539, 5550), 'numpy.abs', 'np.abs', (['tfr'], {}), '(tfr)\n', (5545, 5550), True, 'import numpy as np\n'), ((5638, 5649), 'numpy.abs', 'np.abs', (['plf'], {}), '(plf)\n', (5644, 5649), True, 'import numpy as np\n'), ((7992, 8097), 'warnings.warn', 'warnings.warn', (['"""Time windows are as long or longer than the epoch. Consider reducing n_cycles."""'], {}), "(\n 'Time windows are as long or longer than the epoch. Consider reducing n_cycles.'\n )\n", (8005, 8097), False, 'import warnings\n'), ((8545, 8563), 'numpy.arange', 'np.arange', (['n_times'], {}), '(n_times)\n', (8554, 8563), True, 'import numpy as np\n'), ((8566, 8581), 'numpy.float', 'np.float', (['sfreq'], {}), '(sfreq)\n', (8574, 8581), True, 'import numpy as np\n'), ((13403, 13422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (13413, 13422), True, 'import matplotlib.pyplot as plt\n'), ((13459, 13478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (13469, 13478), True, 'import matplotlib.pyplot as plt\n'), ((13504, 13518), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13516, 13518), True, 'import matplotlib.pyplot as plt\n'), ((13541, 13557), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13550, 13557), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1800), 'numpy.arange', 'np.arange', (['(0.0)', 't_win', '(1.0 / sfreq)'], {}), '(0.0, t_win, 1.0 / sfreq)\n', (1775, 1800), True, 'import numpy as np\n'), ((1890, 1940), 'numpy.exp', 'np.exp', (['(2.0 * 1.0j * np.pi * f * (t - t_win / 2.0))'], {}), '(2.0 * 1.0j * np.pi * f * (t - t_win / 2.0))\n', (1896, 1940), True, 'import numpy as np\n'), ((3574, 3621), 'numpy.zeros', 'np.zeros', (['(n_freqs, fsize)'], {'dtype': 'np.complex128'}), '((n_freqs, fsize), dtype=np.complex128)\n', (3582, 3621), True, 'import numpy as np\n'), ((4541, 4569), 'numpy.convolve', 'np.convolve', (['x', 'W'], {'mode': 'mode'}), '(x, W, mode=mode)\n', (4552, 4569), True, 'import numpy as np\n'), ((11153, 11167), 'numpy.log10', 'np.log10', (['data'], {}), '(data)\n', (11161, 11167), True, 'import numpy as np\n'), ((2274, 2283), 'math.sqrt', 'sqrt', (['(0.5)'], {}), '(0.5)\n', (2278, 2283), False, 'from math import sqrt\n'), ((3157, 3170), 'numpy.log2', 'np.log2', (['size'], {}), '(size)\n', (3164, 3170), True, 'import numpy as np\n'), ((3688, 3737), 'numpy.zeros', 'np.zeros', (['(n_freqs, n_times)'], {'dtype': 'np.complex128'}), '((n_freqs, n_times), dtype=np.complex128)\n', (3696, 3737), True, 'import numpy as np\n'), ((3825, 3849), 'scipy.fftpack.ifftn', 'ifftn', (['(fft_x * fft_Ws[i])'], {}), '(fft_x * fft_Ws[i])\n', (3830, 3849), False, 'from scipy.fftpack import fftn, ifftn\n'), ((10909, 10947), 'numpy.mean', 'np.mean', (['data[..., imin:imax]'], {'axis': '(-1)'}), '(data[..., imin:imax], axis=-1)\n', (10916, 10947), True, 'import numpy as np\n'), ((11308, 11345), 'numpy.std', 'np.std', (['data[..., imin:imax]'], {'axis': '(-1)'}), '(data[..., imin:imax], axis=-1)\n', (11314, 11345), True, 'import numpy as np\n'), ((10651, 10674), 'numpy.where', 'np.where', (['(times >= bmin)'], {}), '(times >= bmin)\n', (10659, 10674), True, 'import numpy as np\n'), ((11566, 11580), 'numpy.log10', 'np.log10', (['data'], {}), '(data)\n', (11574, 11580), True, 'import numpy as np\n'), ((10774, 10797), 'numpy.where', 'np.where', (['(times <= bmax)'], {}), '(times <= bmax)\n', (10782, 10797), True, 'import numpy as np\n'), ((11599, 11636), 'numpy.std', 'np.std', (['data[..., imin:imax]'], {'axis': '(-1)'}), '(data[..., imin:imax], axis=-1)\n', (11605, 11636), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
'''Test total energies for a small set of systems.'''
import eminus
from eminus import Atoms, read_xyz, SCF
from numpy.testing import assert_allclose
# Total energies calculated with PWDFT.jl for He, H2, LiH, CH4, and Ne with same parameters as below
Etot_ref = [-2.54356557, -1.10228799, -0.76598438, -7.70803736, -29.88936935]
def calc_spin_paired(system, E_ref):
'''Compare total energies for a test system with a reference value.'''
path = f'{eminus.__path__[0]}/../tests/spin_paired'
a = 16
ecut = 10
s = 48
atom, X = read_xyz(f'{path}/{system}.xyz')
atoms = Atoms(atom=atom, X=X, a=a, ecut=ecut, s=s)
E = SCF(atoms).run()
try:
assert_allclose(E, E_ref)
except AssertionError as err:
print(f'Test for {system} failed.')
raise SystemExit(err) from None
else:
print(f'Test for {system} passed.')
return
def test_He():
calc_spin_paired('He', Etot_ref[0])
def test_H2():
calc_spin_paired('H2', Etot_ref[1])
def test_LiH():
calc_spin_paired('LiH', Etot_ref[2])
def test_CH4():
calc_spin_paired('CH4', Etot_ref[3])
def test_Ne():
calc_spin_paired('Ne', Etot_ref[4])
if __name__ == '__main__':
test_He()
test_H2()
test_LiH()
test_CH4()
test_Ne()
| [
"numpy.testing.assert_allclose",
"eminus.Atoms",
"eminus.read_xyz",
"eminus.SCF"
] | [((574, 606), 'eminus.read_xyz', 'read_xyz', (['f"""{path}/{system}.xyz"""'], {}), "(f'{path}/{system}.xyz')\n", (582, 606), False, 'from eminus import Atoms, read_xyz, SCF\n'), ((619, 661), 'eminus.Atoms', 'Atoms', ([], {'atom': 'atom', 'X': 'X', 'a': 'a', 'ecut': 'ecut', 's': 's'}), '(atom=atom, X=X, a=a, ecut=ecut, s=s)\n', (624, 661), False, 'from eminus import Atoms, read_xyz, SCF\n'), ((705, 730), 'numpy.testing.assert_allclose', 'assert_allclose', (['E', 'E_ref'], {}), '(E, E_ref)\n', (720, 730), False, 'from numpy.testing import assert_allclose\n'), ((670, 680), 'eminus.SCF', 'SCF', (['atoms'], {}), '(atoms)\n', (673, 680), False, 'from eminus import Atoms, read_xyz, SCF\n')] |
import numpy as np
import pytest
from probnum.diffeq.perturbedsolvers import _perturbation_functions
random_state = np.random.mtrand.RandomState(seed=1)
@pytest.fixture
def step():
return 0.2
@pytest.fixture
def solver_order():
return 4
@pytest.fixture
def noise_scale():
return 1
@pytest.fixture
def num_samples():
return 100
@pytest.mark.parametrize(
"perturb_fct",
[
_perturbation_functions.perturb_uniform,
_perturbation_functions.perturb_lognormal,
],
)
def test_mean(perturb_fct, step, solver_order, noise_scale, num_samples):
suggested_steps = perturb_fct(
step, solver_order, noise_scale, random_state=1, size=num_samples
)
mean_suggested_step = np.sum(suggested_steps) / num_samples
np.testing.assert_allclose(mean_suggested_step, step, atol=1e-4, rtol=1e-4)
@pytest.mark.parametrize(
"perturb_fct",
[
_perturbation_functions.perturb_uniform,
_perturbation_functions.perturb_lognormal,
],
)
def test_var(perturb_fct, step, solver_order, noise_scale, num_samples):
expected_var = step ** (2 * solver_order + 1)
suggested_steps = perturb_fct(
step, solver_order, noise_scale, random_state=1, size=num_samples
)
var = ((suggested_steps - step) ** 2) / num_samples
np.testing.assert_allclose(expected_var, var, atol=1e-4, rtol=1e-4)
| [
"pytest.mark.parametrize",
"numpy.testing.assert_allclose",
"numpy.random.mtrand.RandomState",
"numpy.sum"
] | [((118, 154), 'numpy.random.mtrand.RandomState', 'np.random.mtrand.RandomState', ([], {'seed': '(1)'}), '(seed=1)\n', (146, 154), True, 'import numpy as np\n'), ((356, 485), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""perturb_fct"""', '[_perturbation_functions.perturb_uniform, _perturbation_functions.\n perturb_lognormal]'], {}), "('perturb_fct', [_perturbation_functions.\n perturb_uniform, _perturbation_functions.perturb_lognormal])\n", (379, 485), False, 'import pytest\n'), ((851, 980), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""perturb_fct"""', '[_perturbation_functions.perturb_uniform, _perturbation_functions.\n perturb_lognormal]'], {}), "('perturb_fct', [_perturbation_functions.\n perturb_uniform, _perturbation_functions.perturb_lognormal])\n", (874, 980), False, 'import pytest\n'), ((772, 851), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean_suggested_step', 'step'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(mean_suggested_step, step, atol=0.0001, rtol=0.0001)\n', (798, 851), True, 'import numpy as np\n'), ((1308, 1379), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected_var', 'var'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(expected_var, var, atol=0.0001, rtol=0.0001)\n', (1334, 1379), True, 'import numpy as np\n'), ((730, 753), 'numpy.sum', 'np.sum', (['suggested_steps'], {}), '(suggested_steps)\n', (736, 753), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from rl import online_learners as ol
from rl.online_learners import base_algorithms as balg
def get_learner(optimizer, policy, scheduler, max_kl=None):
""" Return an first-order optimizer. """
x0 = policy.variable
if optimizer=='adam':
return ol.BasicOnlineOptimizer(balg.Adam(x0, scheduler))
elif optimizer=='natgrad':
return ol.FisherOnlineOptimizer(
balg.AdaptiveSecondOrderUpdate(x0, scheduler),
policy=policy)
elif optimizer=='rnatgrad':
return ol.FisherOnlineOptimizer(
balg.RobustAdaptiveSecondOrderUpdate(x0, scheduler, max_dist=max_kl),
policy=policy)
elif 'trpo' in optimizer:
return ol.FisherOnlineOptimizer(
balg.TrustRegionSecondOrderUpdate(x0, scheduler),
policy=policy)
else:
raise NotImplementedError
# Different schemes for sampling the switch time step in RIRO
def natural_t(horizon, gamma):
# Sampling according the problem's original weighting
if horizon < float('Inf'):
p0 = gamma**np.arange(horizon)
sump0 = np.sum(p0)
p0 = p0/sump0
ind = np.random.multinomial(1,p0)
t_switch = np.where(ind==1)[0][0]
p = p0[t_switch-1]
else:
gamma = min(gamma, 0.999999)
t_switch = np.random.geometric(p=1-gamma)[0]
p = gamma**t_switch*(1-gamma)
prob, scale = compute_prob_and_scale(t_switch, horizon, gamma)
return t_switch, prob/p
def cyclic_t(rate, horizon, gamma):
if getattr(cyclic_t, '_itr', None) is None:
cyclic_t._itr = 0
assert horizon < float('Inf')
t_switch = (int(rate*cyclic_t._itr)%horizon)+1 # start from 1
p = 1./horizon
cyclic_t._itr +=1
prob, scale = compute_prob_and_scale(t_switch, horizon, gamma)
return t_switch, prob/p
def geometric_t(mean, horizon, gamma):
prob = 1/mean
t_switch = np.random.geometric(prob) # starts from 1
if t_switch>horizon-1:
t_switch=horizon-1
p = (1-prob)**t_switch # tail probability
else:
p = (1-prob)**(t_switch-1)*prob
prob, scale = compute_prob_and_scale(t_switch, horizon, gamma)
return t_switch, prob/p
def compute_prob_and_scale(t, horizon, gamma):
""" Treat the weighting in a problem as probability. Compute the
probability for a time step and the sum of the weights.
For the objective below,
\sum_{t=0}^{T-1} \gamma^t c_t
where T is finite and \gamma in [0,1], or T is infinite and gamma<1.
It computes
scale = \sum_{t=0}^{T-1} \gamma^t
prob = \gamma^t / scale
"""
assert t<=horizon-1
if horizon < float('Inf'):
p0 = gamma**np.arange(horizon)
sump0 = np.sum(p0)
prob = p0[t]/sump0
else:
sump0 = 1/(1-gamma)
prob = gamma**t_switch*(1-gamma)
return prob, sump0
| [
"rl.online_learners.base_algorithms.Adam",
"numpy.random.geometric",
"numpy.where",
"rl.online_learners.base_algorithms.RobustAdaptiveSecondOrderUpdate",
"numpy.random.multinomial",
"numpy.sum",
"rl.online_learners.base_algorithms.AdaptiveSecondOrderUpdate",
"rl.online_learners.base_algorithms.TrustRe... | [((2057, 2082), 'numpy.random.geometric', 'np.random.geometric', (['prob'], {}), '(prob)\n', (2076, 2082), True, 'import numpy as np\n'), ((1259, 1269), 'numpy.sum', 'np.sum', (['p0'], {}), '(p0)\n', (1265, 1269), True, 'import numpy as np\n'), ((1306, 1334), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'p0'], {}), '(1, p0)\n', (1327, 1334), True, 'import numpy as np\n'), ((2904, 2914), 'numpy.sum', 'np.sum', (['p0'], {}), '(p0)\n', (2910, 2914), True, 'import numpy as np\n'), ((403, 427), 'rl.online_learners.base_algorithms.Adam', 'balg.Adam', (['x0', 'scheduler'], {}), '(x0, scheduler)\n', (412, 427), True, 'from rl.online_learners import base_algorithms as balg\n'), ((1224, 1242), 'numpy.arange', 'np.arange', (['horizon'], {}), '(horizon)\n', (1233, 1242), True, 'import numpy as np\n'), ((1469, 1501), 'numpy.random.geometric', 'np.random.geometric', ([], {'p': '(1 - gamma)'}), '(p=1 - gamma)\n', (1488, 1501), True, 'import numpy as np\n'), ((2869, 2887), 'numpy.arange', 'np.arange', (['horizon'], {}), '(horizon)\n', (2878, 2887), True, 'import numpy as np\n'), ((521, 566), 'rl.online_learners.base_algorithms.AdaptiveSecondOrderUpdate', 'balg.AdaptiveSecondOrderUpdate', (['x0', 'scheduler'], {}), '(x0, scheduler)\n', (551, 566), True, 'from rl.online_learners import base_algorithms as balg\n'), ((1353, 1371), 'numpy.where', 'np.where', (['(ind == 1)'], {}), '(ind == 1)\n', (1361, 1371), True, 'import numpy as np\n'), ((696, 764), 'rl.online_learners.base_algorithms.RobustAdaptiveSecondOrderUpdate', 'balg.RobustAdaptiveSecondOrderUpdate', (['x0', 'scheduler'], {'max_dist': 'max_kl'}), '(x0, scheduler, max_dist=max_kl)\n', (732, 764), True, 'from rl.online_learners import base_algorithms as balg\n'), ((892, 940), 'rl.online_learners.base_algorithms.TrustRegionSecondOrderUpdate', 'balg.TrustRegionSecondOrderUpdate', (['x0', 'scheduler'], {}), '(x0, scheduler)\n', (925, 940), True, 'from rl.online_learners import base_algorithms as balg\n')] |
import os
import numpy as np
import pandas as pd
import h5py
from bmtk.utils.sonata.utils import add_hdf5_magic, add_hdf5_version
def create_single_pop_h5():
h5_file_old = h5py.File('spike_files/spikes.old.h5', 'r')
node_ids = h5_file_old['/spikes/gids']
timestamps = h5_file_old['/spikes/timestamps']
with h5py.File('spike_files/spikes.one_pop.h5', 'w') as h5:
add_hdf5_magic(h5)
add_hdf5_version(h5)
core_grp = h5.create_group('/spikes/v1')
core_grp.attrs['sorting'] = 'by_time'
ts_ds = core_grp.create_dataset('timestamps', data=timestamps, dtype=np.float64)
ts_ds.attrs['units'] = 'milliseconds'
nids_ds = core_grp.create_dataset('node_ids', data=node_ids, dtype=np.uint64)
def create_multipop_csv(dir_path='/local1/workspace/bmtk/docs/examples/NWB_files'):
lgn_h5 = h5py.File(os.path.join(dir_path, 'lgn_spikes.nwb'), 'r')
tw_h5 = h5py.File(os.path.join(dir_path, 'tw_spikes.nwb'), 'r')
full_df = pd.DataFrame({
'timestamps': pd.Series(dtype=np.float64),
'population': pd.Series(dtype=np.string_),
'node_ids': pd.Series(dtype=np.uint64)
})
for pop_name, pop_h5, n_nodes in [('lgn', lgn_h5, 4000), ('tw', tw_h5, 2000)]:
spike_train_grp = pop_h5['/processing/trial_0/spike_train']
for node_id in range(n_nodes):
tmp_df = pd.DataFrame({
'timestamps': spike_train_grp[str(node_id)]['data'][()],
'population': pop_name,
'node_ids': np.uint64(node_id)
})
full_df = full_df.append(tmp_df)
full_df = full_df[['timestamps', 'population', 'node_ids']]
full_df.to_csv('spike_files/spikes.multipop.csv', sep=' ', index=False)
def create_multipop_h5():
spikes_df = pd.read_csv('spike_files/spikes.multipop.csv', sep=' ')
lgn_spikes_df = spikes_df[spikes_df['population'] == 'lgn']
tw_spikes_df = spikes_df[spikes_df['population'] == 'tw']
with h5py.File('spike_files/spikes.multipop.h5', 'w') as h5:
add_hdf5_magic(h5)
add_hdf5_version(h5)
lgn_grp = h5.create_group('/spikes/lgn')
lgn_grp.attrs['sorting'] = 'by_id'
ts_ds = lgn_grp.create_dataset('timestamps', data=lgn_spikes_df['timestamps'], dtype=np.float64)
ts_ds.attrs['units'] = 'milliseconds'
lgn_grp.create_dataset('node_ids', data=lgn_spikes_df['node_ids'], dtype=np.uint64)
tw_grp = h5.create_group('/spikes/tw')
tw_grp.attrs['sorting'] = 'by_id'
ts_ds = tw_grp.create_dataset('timestamps', data=tw_spikes_df['timestamps'], dtype=np.float64)
ts_ds.attrs['units'] = 'milliseconds'
tw_grp.create_dataset('node_ids', data=tw_spikes_df['node_ids'], dtype=np.uint64)
def create_nwb():
spikes_df = pd.read_csv('spike_files/spikes.one_pop.csv', sep=' ')
with h5py.File('spike_files/spikes.onepop.v1.0.nwb', 'w') as h5:
spikes_grp = h5.create_group('/processing/trial_0/spike_train')
for node_id in range(14):
timestamps = spikes_df[spikes_df['node_ids'] == node_id]['timestamps'].values
data_ds = spikes_grp.create_dataset('{}/data'.format(node_id), data=timestamps, dtype=np.float64)
data_ds.attrs['dimension'] = 'time'
data_ds.attrs['unit'] = 'millisecond'
if __name__ == '__main__':
# create_multipop_csv()
# create_multipop_h5()
create_nwb()
| [
"pandas.Series",
"pandas.read_csv",
"bmtk.utils.sonata.utils.add_hdf5_version",
"bmtk.utils.sonata.utils.add_hdf5_magic",
"os.path.join",
"h5py.File",
"numpy.uint64"
] | [((179, 222), 'h5py.File', 'h5py.File', (['"""spike_files/spikes.old.h5"""', '"""r"""'], {}), "('spike_files/spikes.old.h5', 'r')\n", (188, 222), False, 'import h5py\n'), ((1797, 1852), 'pandas.read_csv', 'pd.read_csv', (['"""spike_files/spikes.multipop.csv"""'], {'sep': '""" """'}), "('spike_files/spikes.multipop.csv', sep=' ')\n", (1808, 1852), True, 'import pandas as pd\n'), ((2802, 2856), 'pandas.read_csv', 'pd.read_csv', (['"""spike_files/spikes.one_pop.csv"""'], {'sep': '""" """'}), "('spike_files/spikes.one_pop.csv', sep=' ')\n", (2813, 2856), True, 'import pandas as pd\n'), ((327, 374), 'h5py.File', 'h5py.File', (['"""spike_files/spikes.one_pop.h5"""', '"""w"""'], {}), "('spike_files/spikes.one_pop.h5', 'w')\n", (336, 374), False, 'import h5py\n'), ((390, 408), 'bmtk.utils.sonata.utils.add_hdf5_magic', 'add_hdf5_magic', (['h5'], {}), '(h5)\n', (404, 408), False, 'from bmtk.utils.sonata.utils import add_hdf5_magic, add_hdf5_version\n'), ((417, 437), 'bmtk.utils.sonata.utils.add_hdf5_version', 'add_hdf5_version', (['h5'], {}), '(h5)\n', (433, 437), False, 'from bmtk.utils.sonata.utils import add_hdf5_magic, add_hdf5_version\n'), ((863, 903), 'os.path.join', 'os.path.join', (['dir_path', '"""lgn_spikes.nwb"""'], {}), "(dir_path, 'lgn_spikes.nwb')\n", (875, 903), False, 'import os\n'), ((932, 971), 'os.path.join', 'os.path.join', (['dir_path', '"""tw_spikes.nwb"""'], {}), "(dir_path, 'tw_spikes.nwb')\n", (944, 971), False, 'import os\n'), ((1988, 2036), 'h5py.File', 'h5py.File', (['"""spike_files/spikes.multipop.h5"""', '"""w"""'], {}), "('spike_files/spikes.multipop.h5', 'w')\n", (1997, 2036), False, 'import h5py\n'), ((2052, 2070), 'bmtk.utils.sonata.utils.add_hdf5_magic', 'add_hdf5_magic', (['h5'], {}), '(h5)\n', (2066, 2070), False, 'from bmtk.utils.sonata.utils import add_hdf5_magic, add_hdf5_version\n'), ((2079, 2099), 'bmtk.utils.sonata.utils.add_hdf5_version', 'add_hdf5_version', (['h5'], {}), '(h5)\n', (2095, 2099), False, 'from bmtk.utils.sonata.utils import add_hdf5_magic, add_hdf5_version\n'), ((2866, 2918), 'h5py.File', 'h5py.File', (['"""spike_files/spikes.onepop.v1.0.nwb"""', '"""w"""'], {}), "('spike_files/spikes.onepop.v1.0.nwb', 'w')\n", (2875, 2918), False, 'import h5py\n'), ((1030, 1057), 'pandas.Series', 'pd.Series', ([], {'dtype': 'np.float64'}), '(dtype=np.float64)\n', (1039, 1057), True, 'import pandas as pd\n'), ((1081, 1108), 'pandas.Series', 'pd.Series', ([], {'dtype': 'np.string_'}), '(dtype=np.string_)\n', (1090, 1108), True, 'import pandas as pd\n'), ((1130, 1156), 'pandas.Series', 'pd.Series', ([], {'dtype': 'np.uint64'}), '(dtype=np.uint64)\n', (1139, 1156), True, 'import pandas as pd\n'), ((1532, 1550), 'numpy.uint64', 'np.uint64', (['node_id'], {}), '(node_id)\n', (1541, 1550), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN Agent with time input."""
import collections
import functools
from typing import Tuple
from dopamine.jax import losses
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from flax import linen as nn
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import optax
import tensorflow as tf
from aux_tasks.auxiliary_mc import gammas_monte_carlo_replay_buffer as monte_carlo_rb
from aux_tasks.auxiliary_mc import networks as aux_mc_networks
AuxiliaryPredictionDQNNetworkType = collections.namedtuple(
'dqn_network_with_random_rewards', ['q_values', 'aux_prediction'])
@gin.configurable
class DQNNetworkWithAuxiliaryPredictions(nn.Module):
"""Generates q_values with per-state auxiliary predictions.
Attributes:
num_actions: int, number of actions the agent can take at any state.
num_predictions: int, number of auxiliary predictions.
rng_key: int, Fixed rng for random reward generation.
inputs_preprocessed: bool, Whether inputs are already preprocessed.
"""
num_actions: int
num_predictions: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x):
initializer = nn.initializers.xavier_uniform()
if not self.inputs_preprocessed:
x = networks.preprocess_atari_inputs(x)
hidden_sizes = [32, 64, 64]
kernel_sizes = [8, 4, 3]
stride_sizes = [4, 2, 1]
for hidden_size, kernel_size, stride_size in zip(hidden_sizes, kernel_sizes,
stride_sizes):
x = nn.Conv(
features=hidden_size,
kernel_size=(kernel_size, kernel_size),
strides=(stride_size, stride_size),
kernel_init=initializer)(x)
x = nn.relu(x)
features = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(features)
x = nn.relu(x)
q_values = nn.Dense(features=self.num_actions, kernel_init=initializer)(x)
# MSE loss for Auxiliary task MC predictions.
auxiliary_pred = nn.Dense(features=512, kernel_init=initializer)(features)
auxiliary_pred = nn.relu(auxiliary_pred)
auxiliary_pred = nn.Dense(
features=self.num_predictions, kernel_init=initializer)(auxiliary_pred)
return AuxiliaryPredictionDQNNetworkType(q_values, auxiliary_pred)
@gin.configurable
class ImpalaEncoderWithAuxiliaryPredictions(nn.Module):
"""Impala Network generating q_values with per-state auxiliary predictions."""
num_actions: int
num_predictions: int
inputs_preprocessed: bool = False
stack_sizes: Tuple[int, Ellipsis] = (16, 32, 32)
num_blocks: int = 2
def setup(self):
self.encoder = aux_mc_networks.ImpalaEncoder()
@nn.compact
def __call__(self, x, key=None):
# Generate a random number generation key if not provided
initializer = nn.initializers.xavier_uniform()
if not self.inputs_preprocessed:
x = networks.preprocess_atari_inputs(x)
x = self.encoder(x)
features = x.reshape((-1)) # flatten
x = nn.Dense(
features=512, kernel_init=initializer)(features)
x = nn.relu(x)
q_values = nn.Dense(features=self.num_actions, kernel_init=initializer)(x)
# MSE loss for Auxiliary task MC predictions.
auxiliary_pred = nn.Dense(features=512, kernel_init=initializer)(features)
auxiliary_pred = nn.relu(auxiliary_pred)
auxiliary_pred = nn.Dense(
features=self.num_predictions, kernel_init=initializer)(auxiliary_pred)
return AuxiliaryPredictionDQNNetworkType(q_values, auxiliary_pred)
@gin.configurable
class RandomRewardNetwork(nn.Module):
"""Generates random rewards using a noisy network.
Attributes:
num_actions: int, number of actions the agent can take at any state.
num_rewards: int, number of random rewards to generate.
rng_key: int, Fixed rng for random reward generation.
inputs_preprocessed: bool, Whether inputs are already preprocessed.
"""
num_actions: int
num_rewards: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, rng_key):
initializer = nn.initializers.xavier_uniform()
if not self.inputs_preprocessed:
x = networks.preprocess_atari_inputs(x)
hidden_sizes = [32, 64, 64]
kernel_sizes = [8, 4, 3]
stride_sizes = [4, 2, 1]
for hidden_size, kernel_size, stride_size in zip(hidden_sizes, kernel_sizes,
stride_sizes):
x = nn.Conv(
features=hidden_size,
kernel_size=(kernel_size, kernel_size),
strides=(stride_size, stride_size),
kernel_init=initializer)(x)
x = nn.relu(x)
features = x.reshape((-1)) # flatten
# Use a fixed random seed for NoisyNetwork.
net = networks.NoisyNetwork(rng_key=rng_key, eval_mode=False)
# Return `self.num_rewards` random outputs.
rewards = net(features, self.num_rewards)
x = jax.nn.sigmoid(features) # clip rewards between -1 and 1
return rewards
@functools.partial(jax.jit, static_argnames=('network_def'))
def get_rewards(network_def, params, state, rng_key):
return network_def.apply(params, state, rng_key=rng_key)
@functools.partial(
jax.jit,
static_argnames=('network_def', 'optimizer', 'cumulative_gamma',
'loss_type'))
def train(network_def,
online_params,
target_params,
optimizer,
optimizer_state,
states,
auxiliary_mc_returns,
actions,
next_states,
rewards,
terminals,
cumulative_gamma,
auxloss_weight=0.0):
"""Run the training step."""
def loss_fn(params, target, auxiliary_target):
def q_online(state):
return network_def.apply(params, state)
model_output = jax.vmap(q_online)(states)
q_values = jnp.squeeze(model_output.q_values)
replay_chosen_q = jax.vmap(lambda x, y: x[y])(q_values, actions)
td_loss = jnp.mean(jax.vmap(losses.mse_loss)(target, replay_chosen_q))
# Auxiliary task loss.
auxiliary_predictions = jnp.squeeze(model_output.aux_prediction)
aux_loss = jnp.mean(jax.vmap(losses.mse_loss)(
auxiliary_predictions, auxiliary_target))
loss = ((1. - auxloss_weight) * td_loss +
auxloss_weight * aux_loss)
return loss, (td_loss, aux_loss)
def q_target(state):
return network_def.apply(target_params, state)
target = dqn_agent.target_q(q_target, next_states, rewards, terminals,
cumulative_gamma)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, component_losses), grad = grad_fn(online_params, target,
auxiliary_mc_returns)
td_loss, aux_loss = component_losses
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=online_params)
online_params = optax.apply_updates(online_params, updates)
return optimizer_state, online_params, loss, td_loss, aux_loss
@gin.configurable
class DiscountedJaxDQNAgentWithAuxiliaryMC(dqn_agent.JaxDQNAgent):
"""An implementation of the DQN agent with replay buffer logging to disk."""
def __init__(self,
num_actions,
network=DQNNetworkWithAuxiliaryPredictions,
num_rewards=2,
auxloss_weight=0.0,
summary_writer=None,
preprocess_fn=None,
seed=None):
"""Initializes the agent and constructs the components of its graph.
Args:
num_actions: int, number of actions the agent can take at any state.
network: Jax network to use for training.
num_rewards: int, Number of random rewards to generate at each step.
auxloss_weight: float: weight for aux loss.
summary_writer: Tensorflow summary writer for logging summaries.
preprocess_fn: Preprocessing function.
seed: int, Agent seed.
"""
network = functools.partial(network, num_predictions=num_rewards)
self.num_rewards = num_rewards
self._auxloss_weight = auxloss_weight
super().__init__(
num_actions, network=network, summary_writer=summary_writer, seed=seed,
preprocess_fn=preprocess_fn)
# Create network for random reward generation.
def _build_replay_buffer(self):
"""Creates a monte carlo replay buffer used by the agent."""
return monte_carlo_rb.OutOfGraphReplayBufferdiscountedWithMC(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype,
list_of_discounts=onp.linspace(0.1, 0.999, self.num_rewards))
# Pass a compy of `extra_storage_types` to avoid updating it when
# updating `extra_monte_carlo_storage_types`.
# extra_monte_carlo_storage_types=extra_storage_types[:],
# reverse_fill=True)
def _train_step(self):
"""Runs a single training step."""
# Run a train op at the rate of self.update_period if enough training steps
# have been run. This matches the Nature DQN behaviour.
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
states = self.preprocess_fn(self.replay_elements['state'])
next_states = self.preprocess_fn(self.replay_elements['next_state'])
self.optimizer_state, self.online_params, loss, td_loss, auxloss = train(
self.network_def,
self.online_params,
self.target_network_params,
self.optimizer,
self.optimizer_state,
states,
# List of monte carlo returns for all gamma.
self.replay_elements['monte_carlo_gamma'],
self.replay_elements['action'],
next_states,
self.replay_elements['reward'],
self.replay_elements['terminal'],
self.cumulative_gamma,
self._auxloss_weight)
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
with self.summary_writer.as_default():
tf.summary.scalar('Losses/Aggregate', loss, step=self.training_steps)
tf.summary.scalar(
'Losses/Auxiliary',
auxloss,
step=self.training_steps)
tf.summary.scalar('Losses/TD', td_loss, step=self.training_steps)
self.summary_writer.flush()
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
| [
"jax.vmap",
"collections.namedtuple",
"jax.nn.sigmoid",
"flax.linen.initializers.xavier_uniform",
"flax.linen.Dense",
"dopamine.jax.networks.preprocess_atari_inputs",
"flax.linen.Conv",
"optax.apply_updates",
"dopamine.jax.agents.dqn.dqn_agent.target_q",
"functools.partial",
"numpy.linspace",
... | [((1134, 1227), 'collections.namedtuple', 'collections.namedtuple', (['"""dqn_network_with_random_rewards"""', "['q_values', 'aux_prediction']"], {}), "('dqn_network_with_random_rewards', ['q_values',\n 'aux_prediction'])\n", (1156, 1227), False, 'import collections\n'), ((5571, 5628), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnames': '"""network_def"""'}), "(jax.jit, static_argnames='network_def')\n", (5588, 5628), False, 'import functools\n'), ((5747, 5856), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnames': "('network_def', 'optimizer', 'cumulative_gamma', 'loss_type')"}), "(jax.jit, static_argnames=('network_def', 'optimizer',\n 'cumulative_gamma', 'loss_type'))\n", (5764, 5856), False, 'import functools\n'), ((6994, 7073), 'dopamine.jax.agents.dqn.dqn_agent.target_q', 'dqn_agent.target_q', (['q_target', 'next_states', 'rewards', 'terminals', 'cumulative_gamma'], {}), '(q_target, next_states, rewards, terminals, cumulative_gamma)\n', (7012, 7073), False, 'from dopamine.jax.agents.dqn import dqn_agent\n'), ((7116, 7157), 'jax.value_and_grad', 'jax.value_and_grad', (['loss_fn'], {'has_aux': '(True)'}), '(loss_fn, has_aux=True)\n', (7134, 7157), False, 'import jax\n'), ((7483, 7526), 'optax.apply_updates', 'optax.apply_updates', (['online_params', 'updates'], {}), '(online_params, updates)\n', (7502, 7526), False, 'import optax\n'), ((1784, 1816), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (1814, 1816), True, 'from flax import linen as nn\n'), ((2462, 2472), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (2469, 2472), True, 'from flax import linen as nn\n'), ((2703, 2726), 'flax.linen.relu', 'nn.relu', (['auxiliary_pred'], {}), '(auxiliary_pred)\n', (2710, 2726), True, 'from flax import linen as nn\n'), ((3256, 3287), 'aux_tasks.auxiliary_mc.networks.ImpalaEncoder', 'aux_mc_networks.ImpalaEncoder', ([], {}), '()\n', (3285, 3287), True, 'from aux_tasks.auxiliary_mc import networks as aux_mc_networks\n'), ((3418, 3450), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (3448, 3450), True, 'from flax import linen as nn\n'), ((3685, 3695), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (3692, 3695), True, 'from flax import linen as nn\n'), ((3926, 3949), 'flax.linen.relu', 'nn.relu', (['auxiliary_pred'], {}), '(auxiliary_pred)\n', (3933, 3949), True, 'from flax import linen as nn\n'), ((4670, 4702), 'flax.linen.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (4700, 4702), True, 'from flax import linen as nn\n'), ((5333, 5388), 'dopamine.jax.networks.NoisyNetwork', 'networks.NoisyNetwork', ([], {'rng_key': 'rng_key', 'eval_mode': '(False)'}), '(rng_key=rng_key, eval_mode=False)\n', (5354, 5388), False, 'from dopamine.jax import networks\n'), ((5491, 5515), 'jax.nn.sigmoid', 'jax.nn.sigmoid', (['features'], {}), '(features)\n', (5505, 5515), False, 'import jax\n'), ((6408, 6442), 'jax.numpy.squeeze', 'jnp.squeeze', (['model_output.q_values'], {}), '(model_output.q_values)\n', (6419, 6442), True, 'import jax.numpy as jnp\n'), ((6643, 6683), 'jax.numpy.squeeze', 'jnp.squeeze', (['model_output.aux_prediction'], {}), '(model_output.aux_prediction)\n', (6654, 6683), True, 'import jax.numpy as jnp\n'), ((8529, 8584), 'functools.partial', 'functools.partial', (['network'], {'num_predictions': 'num_rewards'}), '(network, num_predictions=num_rewards)\n', (8546, 8584), False, 'import functools\n'), ((1864, 1899), 'dopamine.jax.networks.preprocess_atari_inputs', 'networks.preprocess_atari_inputs', (['x'], {}), '(x)\n', (1896, 1899), False, 'from dopamine.jax import networks\n'), ((2335, 2345), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (2342, 2345), True, 'from flax import linen as nn\n'), ((2396, 2443), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (2404, 2443), True, 'from flax import linen as nn\n'), ((2488, 2548), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.num_actions', 'kernel_init': 'initializer'}), '(features=self.num_actions, kernel_init=initializer)\n', (2496, 2548), True, 'from flax import linen as nn\n'), ((2624, 2671), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (2632, 2671), True, 'from flax import linen as nn\n'), ((2748, 2812), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.num_predictions', 'kernel_init': 'initializer'}), '(features=self.num_predictions, kernel_init=initializer)\n', (2756, 2812), True, 'from flax import linen as nn\n'), ((3498, 3533), 'dopamine.jax.networks.preprocess_atari_inputs', 'networks.preprocess_atari_inputs', (['x'], {}), '(x)\n', (3530, 3533), False, 'from dopamine.jax import networks\n'), ((3610, 3657), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (3618, 3657), True, 'from flax import linen as nn\n'), ((3711, 3771), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.num_actions', 'kernel_init': 'initializer'}), '(features=self.num_actions, kernel_init=initializer)\n', (3719, 3771), True, 'from flax import linen as nn\n'), ((3847, 3894), 'flax.linen.Dense', 'nn.Dense', ([], {'features': '(512)', 'kernel_init': 'initializer'}), '(features=512, kernel_init=initializer)\n', (3855, 3894), True, 'from flax import linen as nn\n'), ((3971, 4035), 'flax.linen.Dense', 'nn.Dense', ([], {'features': 'self.num_predictions', 'kernel_init': 'initializer'}), '(features=self.num_predictions, kernel_init=initializer)\n', (3979, 4035), True, 'from flax import linen as nn\n'), ((4750, 4785), 'dopamine.jax.networks.preprocess_atari_inputs', 'networks.preprocess_atari_inputs', (['x'], {}), '(x)\n', (4782, 4785), False, 'from dopamine.jax import networks\n'), ((5221, 5231), 'flax.linen.relu', 'nn.relu', (['x'], {}), '(x)\n', (5228, 5231), True, 'from flax import linen as nn\n'), ((6366, 6384), 'jax.vmap', 'jax.vmap', (['q_online'], {}), '(q_online)\n', (6374, 6384), False, 'import jax\n'), ((6465, 6492), 'jax.vmap', 'jax.vmap', (['(lambda x, y: x[y])'], {}), '(lambda x, y: x[y])\n', (6473, 6492), False, 'import jax\n'), ((2150, 2284), 'flax.linen.Conv', 'nn.Conv', ([], {'features': 'hidden_size', 'kernel_size': '(kernel_size, kernel_size)', 'strides': '(stride_size, stride_size)', 'kernel_init': 'initializer'}), '(features=hidden_size, kernel_size=(kernel_size, kernel_size),\n strides=(stride_size, stride_size), kernel_init=initializer)\n', (2157, 2284), True, 'from flax import linen as nn\n'), ((5036, 5170), 'flax.linen.Conv', 'nn.Conv', ([], {'features': 'hidden_size', 'kernel_size': '(kernel_size, kernel_size)', 'strides': '(stride_size, stride_size)', 'kernel_init': 'initializer'}), '(features=hidden_size, kernel_size=(kernel_size, kernel_size),\n strides=(stride_size, stride_size), kernel_init=initializer)\n', (5043, 5170), True, 'from flax import linen as nn\n'), ((6535, 6560), 'jax.vmap', 'jax.vmap', (['losses.mse_loss'], {}), '(losses.mse_loss)\n', (6543, 6560), False, 'import jax\n'), ((6708, 6733), 'jax.vmap', 'jax.vmap', (['losses.mse_loss'], {}), '(losses.mse_loss)\n', (6716, 6733), False, 'import jax\n'), ((9251, 9293), 'numpy.linspace', 'onp.linspace', (['(0.1)', '(0.999)', 'self.num_rewards'], {}), '(0.1, 0.999, self.num_rewards)\n', (9263, 9293), True, 'import numpy as onp\n'), ((10833, 10902), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Losses/Aggregate"""', 'loss'], {'step': 'self.training_steps'}), "('Losses/Aggregate', loss, step=self.training_steps)\n", (10850, 10902), True, 'import tensorflow as tf\n'), ((10915, 10987), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Losses/Auxiliary"""', 'auxloss'], {'step': 'self.training_steps'}), "('Losses/Auxiliary', auxloss, step=self.training_steps)\n", (10932, 10987), True, 'import tensorflow as tf\n'), ((11049, 11114), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Losses/TD"""', 'td_loss'], {'step': 'self.training_steps'}), "('Losses/TD', td_loss, step=self.training_steps)\n", (11066, 11114), True, 'import tensorflow as tf\n')] |
import numpy as np
from scipy.optimize import minimize_scalar
import statsmodels.regression.linear_model as lm
from astropy.visualization import PercentileInterval
class InputError(Exception):
"""Raised when a required parameter is not included."""
def __init__(self, expression, message):
self.expression = expression
self.message = message
def fit_model(data, model, sigma, fit_method='chisq', masking=None,
mask_only=False, **kwargs):
def chisq(x):
return np.sum((data[mask] - x*model[mask])**2 /
sigma[mask]**2)/(sum(mask)-1)
def difference(x):
return np.sum(np.abs(data[mask] - x*model[mask]))
mask = np.array([True for _ in data])
sigmalimit = None
if masking is not None:
for masktype in masking.split(';'):
masktype = masktype.strip().lower()
if masktype.startswith('middle'):
perinterval = float(masktype[6:])
# Estimate model strength (source rate) by fitting middle %
interval = PercentileInterval(perinterval)
lim = interval.get_limits(data)
mask = (mask &
(data >= lim[0]) &
(data <= lim[1]))
elif (masktype.startswith('minalt')) and ('altitude' in kwargs):
minalt = float(masktype[6:])
mask = mask & (kwargs['altitude'] >= minalt)
elif masktype.startswith('minalt'):
raise InputError('mathMB.fit_model', 'Altitude not supplied.')
elif masktype.startswith('minsnr'):
minSNR = float(masktype[6:])
snr = data/sigma
mask = mask & (snr > minSNR)
elif masktype.startswith('siglimit'):
sigmalimit = masktype
else:
raise InputError('MESSENGERdata.fit_model',
f'masking = {masktype} not defined.')
else:
pass
if mask_only:
return None, None, mask
else:
available_fitfunctions = ['chisq', 'difference', 'wls']
if np.any(mask) == False:
# No data points are included - just do a simple fit for show
mask_ = mask.copy()
mask[:] = True
model_strength = minimize_scalar(difference)
mask = mask_
return model_strength.x, model_strength.fun, mask
elif fit_method.lower() in available_fitfunctions:
if fit_method == 'wls':
# Weighted least squares fit
wls_model = lm.WLS(model[mask], data[mask], 1./sigma[mask]**2)
result = wls_model.fit()
if sigmalimit is not None:
siglimit = float(sigmalimit[8:])
diff = (data - model/result.params[0])/sigma
mask = mask & (diff < siglimit*sigma)
wls_model = lm.WLS(model[mask], data[mask], 1./sigma[mask]**2)
result = wls_model.fit()
else:
pass
return 1./result.params[0], result.rsquared, mask
else:
model_strength = minimize_scalar(eval(fit_method.lower()))
if sigmalimit is not None:
siglimit = float(sigmalimit[8:])
diff = (data - model_strength.x*model)/sigma
mask = mask & (diff < siglimit*sigma)
model_strength = minimize_scalar(eval(fit_method.lower()))
else:
pass
return model_strength.x, model_strength.fun, mask
else:
raise InputError('mathMB.fit_model',
f'fit_method = {fit_method} not defined.')
| [
"numpy.abs",
"statsmodels.regression.linear_model.WLS",
"numpy.any",
"numpy.array",
"numpy.sum",
"scipy.optimize.minimize_scalar",
"astropy.visualization.PercentileInterval"
] | [((707, 739), 'numpy.array', 'np.array', (['[(True) for _ in data]'], {}), '([(True) for _ in data])\n', (715, 739), True, 'import numpy as np\n'), ((512, 574), 'numpy.sum', 'np.sum', (['((data[mask] - x * model[mask]) ** 2 / sigma[mask] ** 2)'], {}), '((data[mask] - x * model[mask]) ** 2 / sigma[mask] ** 2)\n', (518, 574), True, 'import numpy as np\n'), ((655, 691), 'numpy.abs', 'np.abs', (['(data[mask] - x * model[mask])'], {}), '(data[mask] - x * model[mask])\n', (661, 691), True, 'import numpy as np\n'), ((2156, 2168), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (2162, 2168), True, 'import numpy as np\n'), ((2341, 2368), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['difference'], {}), '(difference)\n', (2356, 2368), False, 'from scipy.optimize import minimize_scalar\n'), ((1079, 1110), 'astropy.visualization.PercentileInterval', 'PercentileInterval', (['perinterval'], {}), '(perinterval)\n', (1097, 1110), False, 'from astropy.visualization import PercentileInterval\n'), ((2624, 2679), 'statsmodels.regression.linear_model.WLS', 'lm.WLS', (['model[mask]', 'data[mask]', '(1.0 / sigma[mask] ** 2)'], {}), '(model[mask], data[mask], 1.0 / sigma[mask] ** 2)\n', (2630, 2679), True, 'import statsmodels.regression.linear_model as lm\n'), ((2984, 3039), 'statsmodels.regression.linear_model.WLS', 'lm.WLS', (['model[mask]', 'data[mask]', '(1.0 / sigma[mask] ** 2)'], {}), '(model[mask], data[mask], 1.0 / sigma[mask] ** 2)\n', (2990, 3039), True, 'import statsmodels.regression.linear_model as lm\n')] |
import socket
import sys
from ledapy.deconvolution import sdeconv_analysis
from numpy import array as npa
# import cvxEDA as cvx
import numpy as np
import neurokit2 as nk
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('localhost', 8052)
print('starting up on %s port %s' % server_address)
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print('waiting for a connection')
connection, client_address = sock.accept()
#tonic_old = []
try:
print(f'connection from {0}', client_address)
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(1024).decode("utf-8")
#print(len(data))
if len(data) >= 1:
data_splitted_list = data.split("_")
data_float_list = []
for data_str in data_splitted_list:
if data_str == "":
break
data_float_list.append(float(data_str))
data_np = npa(data_float_list)
print(data_np)
# Process it
signals, info = nk.eda_process(data_np, sampling_rate=1000)
# Visualise the processing
# nk.eda_plot(signals, sampling_rate=1000)
# average = np.average(signals['EDA_Tonic'])
signals['EDA_Tonic']
signals_data = ""
for signal in signals['EDA_Tonic']:
signals_data += str(signal) + "_"
signals_data = signals_data[:len(signals_data)-1]
connection.sendall(signals_data.encode("utf-8"))
#print('received {0}'.format(data))
#if data:
#print('sending data back to the client')
#connection.sendall(data)
#else:
# print(f'no more data from {0}', client_address)
# break
finally:
# Clean up the connection
connection.close() | [
"numpy.array",
"neurokit2.eda_process",
"socket.socket"
] | [((204, 253), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (217, 253), False, 'import socket\n'), ((1158, 1178), 'numpy.array', 'npa', (['data_float_list'], {}), '(data_float_list)\n', (1161, 1178), True, 'from numpy import array as npa\n'), ((1272, 1315), 'neurokit2.eda_process', 'nk.eda_process', (['data_np'], {'sampling_rate': '(1000)'}), '(data_np, sampling_rate=1000)\n', (1286, 1315), True, 'import neurokit2 as nk\n')] |
import os
import shutil
import readdy
import tempfile
import unittest
import numpy as np
class TestTopologyReactionCount(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.dir = tempfile.mkdtemp("test-topology-reaction-count")
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree(cls.dir, ignore_errors=True)
def _test_kernel(self, kernel):
system = readdy.ReactionDiffusionSystem(box_size=[20, 20, 20])
system.topologies.add_type("T1")
system.topologies.add_type("T2")
system.add_species("A")
system.add_topology_species("B")
system.topologies.configure_harmonic_bond("B", "B", 1., .1)
system.add_topology_species("C")
system.topologies.configure_harmonic_bond("C", "C", 1., .1)
system.topologies.add_spatial_reaction("attach: T1(B) + (A) -> T1(B--B)", rate=1e-1, radius=.5)
def flip1(topology):
recipe = readdy.StructuralReactionRecipe(topology)
for v in topology.graph.vertices:
recipe.change_particle_type(v, "C")
recipe.change_topology_type("T2")
return recipe
def flip2(topology):
recipe = readdy.StructuralReactionRecipe(topology)
for v in topology.graph.vertices:
recipe.change_particle_type(v, "B")
recipe.change_topology_type("T1")
return recipe
system.topologies.add_structural_reaction("flip_types_1", "T1", flip1, lambda x: 5e-2)
system.topologies.add_structural_reaction("flip_types_2", "T2", flip2, lambda x: 5e-2)
sim = system.simulation(kernel=kernel)
sim.output_file = os.path.join(self.dir, "out_{}.h5".format(kernel))
collected_counts = []
def callback(results):
nonlocal collected_counts
collected_counts.append(results)
sim.observe.reaction_counts(1, callback=callback)
sim.observe.number_of_particles(1, types=["A", "B", "C"])
sim.add_particles("A", np.random.normal(scale=1, size=(1000, 3)))
for _ in range(10):
sim.add_topology("T1", "B", np.random.normal(size=(1, 3)))
sim.run(1000, timestep=1, show_summary=False)
traj = readdy.Trajectory(sim.output_file)
times, n_particles = traj.read_observable_number_of_particles()
times2, counts = traj.read_observable_reaction_counts()
np.testing.assert_array_equal(times, times2)
assert not counts["reactions"]
spatials = counts["spatial_topology_reactions"]
n_spatial = 0
cA_prev = None
for t, (cA, cB, cC), cc in zip(times, n_particles, collected_counts):
assert cA_prev is None or cA <= cA_prev
np.testing.assert_equal(cA + cB + cC, 1010)
cc_normal = cc[0]
assert not cc_normal
cc_spatial = cc[1]
cc_structural = cc[2]
n_spatial += spatials["attach"][t]
assert cA == 1000 - n_spatial, f"Got {cA} A particles, expected {1000 - n_spatial}, at time t {t}"
for sp in cc_spatial.keys():
recorded = spatials[sp][t]
assert cc_spatial[sp] == recorded, f"Got {cc_spatial[sp]} != {recorded} (t={t})"
for st in cc_structural.keys():
recorded = counts["structural_topology_reactions"][st][t]
assert cc_structural[st] == recorded, f"Got {cc_structural[st]} != {recorded} (t={t})"
cA_prev = cA
def test_scpu(self):
self._test_kernel("SingleCPU")
def test_cpu(self):
self._test_kernel("CPU")
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.normal",
"numpy.testing.assert_equal",
"readdy.Trajectory",
"readdy.StructuralReactionRecipe",
"tempfile.mkdtemp",
"shutil.rmtree",
"unittest.main",
"readdy.ReactionDiffusionSystem",
"numpy.testing.assert_array_equal"
] | [((3697, 3712), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3710, 3712), False, 'import unittest\n'), ((213, 261), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""test-topology-reaction-count"""'], {}), "('test-topology-reaction-count')\n", (229, 261), False, 'import tempfile\n'), ((324, 366), 'shutil.rmtree', 'shutil.rmtree', (['cls.dir'], {'ignore_errors': '(True)'}), '(cls.dir, ignore_errors=True)\n', (337, 366), False, 'import shutil\n'), ((422, 475), 'readdy.ReactionDiffusionSystem', 'readdy.ReactionDiffusionSystem', ([], {'box_size': '[20, 20, 20]'}), '(box_size=[20, 20, 20])\n', (452, 475), False, 'import readdy\n'), ((2271, 2305), 'readdy.Trajectory', 'readdy.Trajectory', (['sim.output_file'], {}), '(sim.output_file)\n', (2288, 2305), False, 'import readdy\n'), ((2452, 2496), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['times', 'times2'], {}), '(times, times2)\n', (2481, 2496), True, 'import numpy as np\n'), ((963, 1004), 'readdy.StructuralReactionRecipe', 'readdy.StructuralReactionRecipe', (['topology'], {}), '(topology)\n', (994, 1004), False, 'import readdy\n'), ((1226, 1267), 'readdy.StructuralReactionRecipe', 'readdy.StructuralReactionRecipe', (['topology'], {}), '(topology)\n', (1257, 1267), False, 'import readdy\n'), ((2057, 2098), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(1)', 'size': '(1000, 3)'}), '(scale=1, size=(1000, 3))\n', (2073, 2098), True, 'import numpy as np\n'), ((2782, 2825), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(cA + cB + cC)', '(1010)'], {}), '(cA + cB + cC, 1010)\n', (2805, 2825), True, 'import numpy as np\n'), ((2169, 2198), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3)'}), '(size=(1, 3))\n', (2185, 2198), True, 'import numpy as np\n')] |
import json
import os
import time
from abc import ABC
import numpy as np
import torch
import torchvision
from modules.trainer.regularization import weight_clipping
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from tqdm import tqdm
import utils
from modules.evaluator import Evaluation
from utils import makedir_if_not_exist
class TrainerBase(ABC):
"""
Basic abstract to train your model, you need override this abstract and set the params
"""
list_loss = []
fixed_samples = {}
tensorboard_scalar = {}
tensorboard_histogram = {}
global_step = 0
evaluation = None
# initial Tensor Type
FloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor
Eval_net = None
one = FloatTensor([1])
mone = one * -1
epoch = 0
def __init__(self, params, network, pre_train=None):
# Init id
self.date_train = time.asctime()
self.params = params.copy()
# Init Network and DataParallel ..............................................................................
self.models = MultipleModel(
Dis=network.Discriminator(self.params),
Gen=network.Generator(self.params)
)
if pre_train:
self.networks_load(pre_train, eval=False, check_result=False)
# Set Parallel Train
if torch.cuda.is_available() and self.params["n_gpu"] > 1:
self.models.set_parallel(device_ids=range(self.params["n_gpu"]))
# Set network eval
self.Eval_net = "Gen"
# Init number iter each network
self.n_critic = self.params["train"]["n_critic"]
# Init dir
self.init_dir()
# Init Datasets ................................................................................
if self.params["dataloader"]["type"] is not None:
self.dataset = Datasets(is_train=True, **self.params)
# Init writer to tensorboard, and add configuration text
self.writer = SummaryWriter(self.params["dir"])
self.writer.add_text("parameters", json.dumps(self.params))
# Init eval
if "eval" in self.params.keys():
if self.params["evaluator"]["type"] is not None:
self.evaluation = Evaluation(self.writer, **self.params)
def init_dir(self):
"""
init all directory
:return:
"""
# reset root_dir = root_dir/experiments
try:
self.params["root_dir"]
except KeyError:
self.params["root_dir"] = ".."
self.params["root_dir"] = os.path.join(self.params["root_dir"], "experiments")
utils.print_wline("Initiate all directories", "=")
makedir_if_not_exist(self.params["root_dir"])
# set dataloader dir = root_dir/experiments/dataloader
self.params["dataloader"]["dir"] = os.path.join(self.params["root_dir"], "dataloader")
makedir_if_not_exist(self.params["dataloader"]["dir"])
# set train dir = root_dir/experiments/train/dataloader/train_mode/model/experiment
experiment_dir = "train/{datasets}/{train_mode}/{network}/{experiment}".format(
datasets=self.params['dataloader']['type'],
train_mode=self.__class__.__name__,
network=self.params["network"]["name"],
experiment=self.params["experiment_name"]
)
# set experiment dir and logs, if exist add number on post
_dir = os.path.join(self.params["root_dir"], experiment_dir)
self.params["dir"] = _dir
_copy = 1
while os.path.exists(self.params["dir"]):
self.params["dir"] = "{dir}_{cp}".format(dir=_dir, cp=_copy)
_copy += 1
makedir_if_not_exist(self.params["dir"])
# set save models dir = root_dir/experiments/train/dataloader/train_mode/model/case/saved_model
self.params["train"]["saved_model_dir"] = os.path.join(self.params["dir"], "saved_model")
makedir_if_not_exist(self.params["train"]["saved_model_dir"])
_print = [" - Root : {}/...".format(os.path.abspath(self.params["root_dir"])),
" - Datasets : {}".format(os.path.abspath(self.params["dataloader"]["dir"])),
" - Session : {}".format(os.path.abspath(self.params["dir"])),
" - Save Model : {}".format(os.path.abspath(self.params["train"]["saved_model_dir"]))]
print('\n'.join(_print))
def forward(self, **kwargs):
with torch.no_grad():
samples = self.models[self.Eval_net](z_input=kwargs["z_input"]).detach()
return samples
# Training ##############################################################################
def pre_train(self):
"""
For adding some process before training session
:return:
"""
pass
def post_train(self):
"""
For adding some process after training session
:return:
"""
pass
def train(self, epochs, batch_size, **kwargs):
"""
:param epochs:
:param batch_size:
:return:
"""
self.global_step = 0
self.models.cuda()
if "eval" in self.params.keys():
if self.Eval_net is None and self.params["evaluator"]["type"] is not None:
raise ValueError('Please define your eval network in self.Eval_net')
# save network architecture on tensorboard
self.tensorboard_add_networks()
utils.print_wline("Script for Check log".format(epochs), line="=")
print("tensorboard --logdir {} --bind_all".format(os.path.abspath(self.params["dir"])))
utils.print_wline("Start Train with {} Epoch".format(epochs), line="=")
# Init data loader
if torch.cuda.is_available() and self.params["n_gpu"] > 1:
batch_size = batch_size * self.params["n_gpu"]
self.pre_train()
data_loader = DataLoader(self.dataset.data, batch_size=batch_size, **kwargs)
for self.epoch in range(self.epoch + 1, epochs + 1):
self.models.train()
for idx, (x, y) in enumerate(
tqdm(data_loader, desc="Epoch {epoch}/{epochs}".format(epoch=self.epoch, epochs=epochs))):
if idx == data_loader.dataset.__len__() // batch_size:
break
x = x.type(self.FloatTensor)
y = y.type(self.FloatTensor)
# Update parameters
self.update_parameters(idx, x, y)
self.global_step += 1
self.tensorboard_update(global_step=self.epoch, pos_tag="Epoch_")
# check trained data
if self.params["train"]["check_generated_data"]:
self.check_generated_data(self.fixed_samples, global_step=self.epoch, tag="result")
# save each self.params["train"]["save_period"] epoch
if self.epoch % self.params["train"]["save_period"] == 0 and self.epoch != epochs:
self.networks_save()
if "eval" in self.params.keys():
self.evaluate()
self.post_train()
self.networks_save()
utils.print_wline("Train with {} Epoch Done !".format(self.epoch + 1), line="=")
def update_parameters(self, idx, x, y):
"""
This is the main Update parameters
:param idx: idx
:param x:
:param y:
:return:
"""
d_loss = []
g_loss = []
if self.epoch < 25 or self.epoch % 500 == 0:
self.n_critic = 20
else:
self.n_critic = 5
z = self.FloatTensor(np.random.normal(0, 1, (x.shape[0], self.params["network"]["z_dim"])))
self.models.zero_grad("Dis")
d_loss.append(self.update_parameters_discriminator(z, x, y).item())
self.models.step("Dis")
# Clipping D
if self.params["regularization"]["clipping"]:
weight_clipping(self.models["Dis"], self.params["regularization"]["clipping"])
self.models.zero_grad("Gen")
if idx % self.n_critic == 0:
g_loss.append(self.update_parameters_generator(z, x, y).item())
self.models.step("Gen")
_scalar = {
"Gen_loss": np.mean(g_loss),
"Dis_loss": np.mean(d_loss)
}
for name, value in _scalar.items():
if name not in self.tensorboard_scalar:
self.tensorboard_scalar[name] = [value]
else:
self.tensorboard_scalar[name].append(value)
# self.tensorboard_update(self.global_step, pos_tag="Step_")
def update_parameters_generator(self, z, x, y):
"""
Optional use
:param idx:
:param z:
:param x:
:param y:
:return:
"""
raise ValueError('Please put update generator')
def update_parameters_discriminator(self, z, x, y):
"""
Optional use
:param idx:
:param z:
:param x:
:param y:
:return:
"""
raise ValueError('Please put update discriminator')
# Evaluation ##############################################################################
def check_generated_data(self, samples, global_step=0, tag="result"):
"""
this function used for check the result of generator network and save it to tensorboard
:param global_step:
:param samples(dict): samples of input network
:param tag: save the output to tensorboard log wit tag
:return:
"""
self.models.eval()
images = self.forward(**samples)
self.tensorboard_add_images(images, global_step, tag)
def evaluate(self):
self.evaluation.run(self.models[self.Eval_net], self.global_step)
# Tensorboard #############################################################################
def tensorboard_update(self, global_step=0, pos_tag=""):
"""
save all self.tensorboard_* to tensorboard log
:return:
"""
if len(self.tensorboard_scalar.keys()) != 0:
for key, value in self.tensorboard_scalar.items():
if isinstance(value, list):
value = np.mean(value, 0)
self.writer.add_scalar(tag=pos_tag + key, scalar_value=value, global_step=global_step,
walltime=time.time())
if len(self.tensorboard_histogram.keys()) != 0:
for key, value in self.tensorboard_histogram.items():
value = np.array(value)
if len(value.shape) > 1:
value = np.mean(value, 0)
self.writer.add_histogram(tag=pos_tag + key, values=value, global_step=global_step,
walltime=time.time())
# Clean Dictionary
self.tensorboard_scalar.clear()
self.tensorboard_histogram.clear()
def tensorboard_add_images(self, images, global_step, tag):
if torch.cuda.is_available():
images = images.cpu()
else:
images = images
grid = torchvision.utils.make_grid(images, normalize=True)
self.writer.add_image(tag, grid, global_step, walltime=time.time())
def tensorboard_add_networks(self):
for name, model in self.models.items():
num_params = 0
for param in model.parameters():
num_params += param.numel()
self.writer.add_text("Network {name}-{num_params}params".format(name=name, num_params=num_params),
str(model))
# Networks Utils ##########################################################################
def networks_save(self, output_ext="pkl"):
"""
save model to root_dir/type/name/saved_model
:param output_ext:
:return:
"""
more_info = {
"dataset": self.params['dataloader']['type'],
"train_mode": self.__class__.__name__
}
self.models.save(path=self.params["train"]["saved_model_dir"], global_step=None, more_info=more_info,
output_ext=output_ext)
print('model saved! [epoch:{epoch}]'.format(epoch=self.epoch))
def networks_load(self, dir, eval=False, check_result=False):
"""
:param path: path model
:return:
"""
utils.print_wline("Load Network", line="=")
self.models.load(dir)
self.epoch = self.models.global_step
if eval:
self.evaluate()
if check_result:
self.check_generated_data(self.fixed_samples, tag="Pre-trained_Model_Result")
| [
"utils.makedir_if_not_exist",
"os.path.exists",
"numpy.random.normal",
"time.asctime",
"modules.evaluator.Evaluation",
"tensorboardX.SummaryWriter",
"numpy.mean",
"json.dumps",
"os.path.join",
"modules.trainer.regularization.weight_clipping",
"torch.no_grad",
"numpy.array",
"utils.print_wlin... | [((709, 734), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (732, 734), False, 'import torch\n'), ((800, 825), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (823, 825), False, 'import torch\n'), ((1033, 1047), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1045, 1047), False, 'import time\n'), ((2141, 2174), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["self.params['dir']"], {}), "(self.params['dir'])\n", (2154, 2174), False, 'from tensorboardX import SummaryWriter\n'), ((2732, 2784), 'os.path.join', 'os.path.join', (["self.params['root_dir']", '"""experiments"""'], {}), "(self.params['root_dir'], 'experiments')\n", (2744, 2784), False, 'import os\n'), ((2793, 2843), 'utils.print_wline', 'utils.print_wline', (['"""Initiate all directories"""', '"""="""'], {}), "('Initiate all directories', '=')\n", (2810, 2843), False, 'import utils\n'), ((2852, 2897), 'utils.makedir_if_not_exist', 'makedir_if_not_exist', (["self.params['root_dir']"], {}), "(self.params['root_dir'])\n", (2872, 2897), False, 'from utils import makedir_if_not_exist\n'), ((3005, 3056), 'os.path.join', 'os.path.join', (["self.params['root_dir']", '"""dataloader"""'], {}), "(self.params['root_dir'], 'dataloader')\n", (3017, 3056), False, 'import os\n'), ((3065, 3119), 'utils.makedir_if_not_exist', 'makedir_if_not_exist', (["self.params['dataloader']['dir']"], {}), "(self.params['dataloader']['dir'])\n", (3085, 3119), False, 'from utils import makedir_if_not_exist\n'), ((3605, 3658), 'os.path.join', 'os.path.join', (["self.params['root_dir']", 'experiment_dir'], {}), "(self.params['root_dir'], experiment_dir)\n", (3617, 3658), False, 'import os\n'), ((3725, 3759), 'os.path.exists', 'os.path.exists', (["self.params['dir']"], {}), "(self.params['dir'])\n", (3739, 3759), False, 'import os\n'), ((3866, 3906), 'utils.makedir_if_not_exist', 'makedir_if_not_exist', (["self.params['dir']"], {}), "(self.params['dir'])\n", (3886, 3906), False, 'from utils import makedir_if_not_exist\n'), ((4062, 4109), 'os.path.join', 'os.path.join', (["self.params['dir']", '"""saved_model"""'], {}), "(self.params['dir'], 'saved_model')\n", (4074, 4109), False, 'import os\n'), ((4118, 4179), 'utils.makedir_if_not_exist', 'makedir_if_not_exist', (["self.params['train']['saved_model_dir']"], {}), "(self.params['train']['saved_model_dir'])\n", (4138, 4179), False, 'from utils import makedir_if_not_exist\n'), ((6095, 6157), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset.data'], {'batch_size': 'batch_size'}), '(self.dataset.data, batch_size=batch_size, **kwargs)\n', (6105, 6157), False, 'from torch.utils.data import DataLoader\n'), ((11228, 11253), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11251, 11253), False, 'import torch\n'), ((11347, 11398), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images'], {'normalize': '(True)'}), '(images, normalize=True)\n', (11374, 11398), False, 'import torchvision\n'), ((12622, 12665), 'utils.print_wline', 'utils.print_wline', (['"""Load Network"""'], {'line': '"""="""'}), "('Load Network', line='=')\n", (12639, 12665), False, 'import utils\n'), ((1489, 1514), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1512, 1514), False, 'import torch\n'), ((2218, 2241), 'json.dumps', 'json.dumps', (['self.params'], {}), '(self.params)\n', (2228, 2241), False, 'import json\n'), ((4642, 4657), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4655, 4657), False, 'import torch\n'), ((5931, 5956), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5954, 5956), False, 'import torch\n'), ((7795, 7864), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', "(x.shape[0], self.params['network']['z_dim'])"], {}), "(0, 1, (x.shape[0], self.params['network']['z_dim']))\n", (7811, 7864), True, 'import numpy as np\n'), ((8099, 8177), 'modules.trainer.regularization.weight_clipping', 'weight_clipping', (["self.models['Dis']", "self.params['regularization']['clipping']"], {}), "(self.models['Dis'], self.params['regularization']['clipping'])\n", (8114, 8177), False, 'from modules.trainer.regularization import weight_clipping\n'), ((2400, 2438), 'modules.evaluator.Evaluation', 'Evaluation', (['self.writer'], {}), '(self.writer, **self.params)\n', (2410, 2438), False, 'from modules.evaluator import Evaluation\n'), ((4231, 4271), 'os.path.abspath', 'os.path.abspath', (["self.params['root_dir']"], {}), "(self.params['root_dir'])\n", (4246, 4271), False, 'import os\n'), ((4320, 4369), 'os.path.abspath', 'os.path.abspath', (["self.params['dataloader']['dir']"], {}), "(self.params['dataloader']['dir'])\n", (4335, 4369), False, 'import os\n'), ((4418, 4453), 'os.path.abspath', 'os.path.abspath', (["self.params['dir']"], {}), "(self.params['dir'])\n", (4433, 4453), False, 'import os\n'), ((4502, 4558), 'os.path.abspath', 'os.path.abspath', (["self.params['train']['saved_model_dir']"], {}), "(self.params['train']['saved_model_dir'])\n", (4517, 4558), False, 'import os\n'), ((5773, 5808), 'os.path.abspath', 'os.path.abspath', (["self.params['dir']"], {}), "(self.params['dir'])\n", (5788, 5808), False, 'import os\n'), ((8418, 8433), 'numpy.mean', 'np.mean', (['g_loss'], {}), '(g_loss)\n', (8425, 8433), True, 'import numpy as np\n'), ((8463, 8478), 'numpy.mean', 'np.mean', (['d_loss'], {}), '(d_loss)\n', (8470, 8478), True, 'import numpy as np\n'), ((10774, 10789), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (10782, 10789), True, 'import numpy as np\n'), ((11462, 11473), 'time.time', 'time.time', ([], {}), '()\n', (11471, 11473), False, 'import time\n'), ((10445, 10462), 'numpy.mean', 'np.mean', (['value', '(0)'], {}), '(value, 0)\n', (10452, 10462), True, 'import numpy as np\n'), ((10859, 10876), 'numpy.mean', 'np.mean', (['value', '(0)'], {}), '(value, 0)\n', (10866, 10876), True, 'import numpy as np\n'), ((10614, 10625), 'time.time', 'time.time', ([], {}), '()\n', (10623, 10625), False, 'import time\n'), ((11028, 11039), 'time.time', 'time.time', ([], {}), '()\n', (11037, 11039), False, 'import time\n')] |
# Copyright 2020 NXP.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the NXP Semiconductors nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import signal
import os
import time
import copy
import csv
import pickle
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
import numpy as np
import drv_ftdi
PROGRAM_VERSION = 'PMT v2.1'
COPYRIGHT_INFO = 'Copyright 2020 NXP'
COLORS = ["#8B7825", "#842D2C", "#5E3450", "#00253D", "#205632", "#4E2B1B", "#6C561A", "#8A2533", "#5A2C5D", "#005474",
"#4C762A", "#463626", "#6E4A1C", "#802247", "#2E1B45", "#00454E", "#554E24", "#2D2926", "#BE4C00", "#691F42",
"#543074", "#244A57", "#817800", "#99AA00", "#73371B", "#572831", "#0A282E", "#004C40", "#B39900", "#83322E",
"#632D4F", "#1A4086", "#005544"]
GROUPS_COLORS = ["#4FA383", "#007A4D", "#95A7C8", "#385C9B"]
FLAGS = {'display_all': False}
WINDOW_WIDTH = 1280
WINDOW_HEIGHT = 768
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
class SplashScreen():
def __init__(self):
super().__init__()
app = QtGui.QApplication([])
self.windows = QtGui.QDialog()
self.windows.setWindowFlags(self.windows.windowFlags() | QtCore.Qt.CustomizeWindowHint)
self.windows.setWindowFlags(self.windows.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)
self.windows.setFixedSize(600, 380)
self.windows.setStyleSheet("background-color: white;")
#self.title = "Loading"
self.text = QtGui.QLabel('Power Measurement Tool (PMT)')
self.text.setAlignment(QtCore.Qt.AlignCenter)
self.text.setStyleSheet("background-color: grey;"
"color: black;"
"font: bold 32px;"
)
self.text1 = QtGui.QLabel(PROGRAM_VERSION + ' - ' + COPYRIGHT_INFO)
self.text1.setAlignment(QtCore.Qt.AlignCenter)
self.text1.setStyleSheet("color: black;"
"font: bold 24px;"
)
self.pic = QtGui.QLabel(self.windows)
self.pic.setAlignment(QtCore.Qt.AlignCenter)
self.pic.setPixmap(QtGui.QPixmap("docs/images/nxp.png"))
self.timer = pg.QtCore.QTimer()
self.timer.setSingleShot(True)
self.layout = QtGui.QVBoxLayout()
self.windows.setLayout(self.layout)
#self.windows.setWindowTitle(self.title)
self.layout.addWidget(self.text)
self.layout.addWidget(self.pic)
self.layout.addWidget(self.text1)
self.windows.show()
self.timer.singleShot(2500, app.quit)
app.exec_()
class Worker(QtCore.QObject):
"""creates worker class for thread"""
def __init__(self, board):
QtCore.QObject.__init__(self)
self.board = board
def do_work(self):
"""runs function for collecting data"""
self.board.get_data()
def pause_thread(self):
drv_ftdi.FLAG_PAUSE_CAPTURE = True
def resume_thread(self):
drv_ftdi.FLAG_PAUSE_CAPTURE = False
class ZoomDataWin(QtGui.QDialog):
"""extern window displaying data in zoom region"""
def __init__(self, parent=None):
super(ZoomDataWin, self).__init__(parent)
self.parent = parent
self.header_title = ["Rail", "P_avg", "P_min", "P_max", "V_avg", "V_min", "V_max", "I_avg", "I_min", "I_max"]
self.data_table = QtGui.QTableWidget(0, len(self.header_title))
self.data_table.setHorizontalHeaderLabels(self.header_title)
for rail in self.parent.b.rails_to_display:
rowposition = self.data_table.rowCount()
self.data_table.insertRow(rowposition)
self.data_table.setItem(rowposition, 0, QtGui.QTableWidgetItem(rail['name']))
for group in self.parent.b.power_groups:
rowposition = self.data_table.rowCount()
self.data_table.insertRow(rowposition)
self.data_table.setItem(rowposition, 0, QtGui.QTableWidgetItem(group['name']))
self.data_table.item(rowposition, 0).setBackground(QtGui.QColor(169, 169, 0, 169))
self.w_data_lay = QtGui.QVBoxLayout()
self.rail_control = QtGui.QLabel("Time :")
self.w_data_lay.addWidget(self.rail_control)
self.w_data_lay.addWidget(self.data_table)
self.setLayout(self.w_data_lay)
def update_data(self, minx, maxx):
"""updates data window"""
if self.isVisible():
self.rail_control.setText("Time : " + str(maxx - minx) + " sec")
i = 0
for d_rail in self.parent.b.rails_to_display:
rail = next((item for item in self.parent.rail_buf if item['railnumber'] == d_rail['name']), None)
voltage = rail['voltage'][1:]
current = rail['current'][1:]
power = np.empty_like(voltage)
power[:, 0] = voltage[:, 0]
power[:, 1] = voltage[:, 1] * current[:, 1]
min_t_p = power[:, 0].searchsorted(minx)
max_t_p = power[:, 0].searchsorted(maxx)
if min_t_p and min_t_p != max_t_p:
p_avg = power[min_t_p:max_t_p, 1].mean()
p_min = power[min_t_p:max_t_p, 1].min()
p_max = power[min_t_p:max_t_p, 1].max()
self.data_table.setItem(i, 1, QtGui.QTableWidgetItem(str(p_avg)))
self.data_table.setItem(i, 2, QtGui.QTableWidgetItem(str(p_min)))
self.data_table.setItem(i, 3, QtGui.QTableWidgetItem(str(p_max)))
min_t_v = voltage[:, 0].searchsorted(minx)
max_t_v = voltage[:, 0].searchsorted(maxx)
if min_t_v and min_t_v != max_t_v:
v_avg = voltage[min_t_v:max_t_v, 1].mean()
v_min = voltage[min_t_v:max_t_v, 1].min()
v_max = voltage[min_t_v:max_t_v, 1].max()
self.data_table.setItem(i, 4, QtGui.QTableWidgetItem(str(v_avg)))
self.data_table.setItem(i, 5, QtGui.QTableWidgetItem(str(v_min)))
self.data_table.setItem(i, 6, QtGui.QTableWidgetItem(str(v_max)))
min_t_c = current[:, 0].searchsorted(minx)
max_t_c = current[:, 0].searchsorted(maxx)
if min_t_c and min_t_c != max_t_c:
c_avg = current[min_t_c:max_t_c, 1].mean()
c_min = current[min_t_c:max_t_c, 1].min()
c_max = current[min_t_c:max_t_c, 1].max()
self.data_table.setItem(i, 7, QtGui.QTableWidgetItem(str(c_avg)))
self.data_table.setItem(i, 8, QtGui.QTableWidgetItem(str(c_min)))
self.data_table.setItem(i, 9, QtGui.QTableWidgetItem(str(c_max)))
i += 1
for j, group in enumerate(self.parent.groups_buf):
time_group = group['power'][:, 0]
power = group['power'][:, 1]
min_t_gp = time_group.searchsorted(minx)
max_t_gp = time_group.searchsorted(maxx)
if min_t_gp and min_t_gp != max_t_gp:
gp_avg = power[min_t_gp:max_t_gp].mean()
gp_min = power[min_t_gp:max_t_gp].min()
gp_max = power[min_t_gp:max_t_gp].max()
self.data_table.setItem(i + j, 1, QtGui.QTableWidgetItem(str(gp_avg)))
self.data_table.setItem(i + j, 2, QtGui.QTableWidgetItem(str(gp_min)))
self.data_table.setItem(i + j, 3, QtGui.QTableWidgetItem(str(gp_max)))
class MPDataWin(QtGui.QDialog):
"""extern window displaying data pointed by mouse pointer"""
def __init__(self, parent=None):
super(MPDataWin, self).__init__(parent)
self.parent = parent
self.header_title = ["Rail", "Power (mW)", "Voltage (V)", "Current (mA)"]
self.data_table = QtGui.QTableWidget(0, len(self.header_title))
self.data_table.setHorizontalHeaderLabels(self.header_title)
for rail in self.parent.b.rails_to_display:
rowposition = self.data_table.rowCount()
self.data_table.insertRow(rowposition)
self.data_table.setItem(rowposition, 0, QtGui.QTableWidgetItem(rail['name']))
for group in self.parent.b.power_groups:
rowposition = self.data_table.rowCount()
self.data_table.insertRow(rowposition)
self.data_table.setItem(rowposition, 0, QtGui.QTableWidgetItem(group['name']))
self.data_table.item(rowposition, 0).setBackground(QtGui.QColor(169, 169, 0, 169))
self.w_data_lay = QtGui.QVBoxLayout()
self.rail_control = QtGui.QLabel("Time :")
self.w_data_lay.addWidget(self.rail_control)
self.w_data_lay.addWidget(self.data_table)
self.setLayout(self.w_data_lay)
def update_data(self, time_coord):
"""updates data window"""
if self.isVisible():
self.rail_control.setText("Time : " + str(time_coord) + " sec")
i = 0
for d_rail in self.parent.b.rails_to_display:
rail = next((item for item in self.parent.rail_buf if item['railnumber'] == d_rail['name']), None)
voltage = rail['voltage'][1:]
current = rail['current'][1:]
power = np.empty_like(voltage)
power[:, 0] = voltage[:, 0]
power[:, 1] = voltage[:, 1] * current[:, 1]
x_coord_p = power[:, 0].searchsorted(time_coord)
x_coord_v = voltage[:, 0].searchsorted(time_coord)
x_coord_c = current[:, 0].searchsorted(time_coord)
if x_coord_p:
mp_power = power[x_coord_p - 1, 1]
self.data_table.setItem(i, 1, QtGui.QTableWidgetItem(str(mp_power)))
if x_coord_v:
mp_voltage = voltage[x_coord_v - 1, 1]
self.data_table.setItem(i, 2, QtGui.QTableWidgetItem(str(mp_voltage)))
if x_coord_c:
mp_current = current[x_coord_c - 1, 1]
self.data_table.setItem(i, 3, QtGui.QTableWidgetItem(str(mp_current)))
i += 1
for j, group in enumerate(self.parent.groups_buf):
time_group = group['power'][1:, 0]
power = group['power'][1:, 1]
x_coord_gp = time_group.searchsorted(time_coord)
if x_coord_gp:
mp_gpower = power[x_coord_p - 1]
self.data_table.setItem(i + j + 1, 1, QtGui.QTableWidgetItem(str(mp_gpower)))
def closeEvent(self, event):
"""function called when window is quit by clicking the red cross"""
self.parent.proxy1.disconnect()
self.parent.proxy2.disconnect()
class GlobalDataWin(QtGui.QDialog):
"""extern window displaying data collected since app starts"""
def __init__(self, parent=None):
super(GlobalDataWin, self).__init__(parent)
self.parent = parent
self.setWindowTitle('Global Data Window')
self.header_title = ["Rail", "P_avg", "P_min", "P_max", "V_avg", "V_min", "V_max", "I_avg", "I_min", "I_max"]
self.data_table = QtGui.QTableWidget(0, len(self.header_title))
self.data_table.setHorizontalHeaderLabels(self.header_title)
for rail in self.parent.b.rails_to_display:
rowposition = self.data_table.rowCount()
self.data_table.insertRow(rowposition)
self.data_table.setItem(rowposition, 0, QtGui.QTableWidgetItem(rail['name']))
for group in self.parent.b.power_groups:
rowposition = self.data_table.rowCount()
self.data_table.insertRow(rowposition)
self.data_table.setItem(rowposition, 0, QtGui.QTableWidgetItem(group['name']))
self.data_table.item(rowposition, 0).setBackground(QtGui.QColor(169, 169, 0, 169))
self.w_data_lay = QtGui.QVBoxLayout()
self.w_data_lay.addWidget(self.data_table)
self.setLayout(self.w_data_lay)
def update_data(self):
"""updates data window"""
if self.isVisible():
i = 0
for d_rail in self.parent.b.rails_to_display:
rail = next((item for item in self.parent.rail_buf if item['railnumber'] == d_rail['name']), None)
voltage = rail['voltage'][1:, 1]
current = rail['current'][1:, 1]
power = voltage * current
p_avg = power.mean()
p_min = power.min()
p_max = power.max()
v_avg = voltage.mean()
v_min = voltage.min()
v_max = voltage.max()
c_avg = current.mean()
c_min = current.min()
c_max = current.max()
self.data_table.setItem(i, 1, QtGui.QTableWidgetItem(str(p_avg)))
self.data_table.setItem(i, 2, QtGui.QTableWidgetItem(str(p_min)))
self.data_table.setItem(i, 3, QtGui.QTableWidgetItem(str(p_max)))
self.data_table.setItem(i, 4, QtGui.QTableWidgetItem(str(v_avg)))
self.data_table.setItem(i, 5, QtGui.QTableWidgetItem(str(v_min)))
self.data_table.setItem(i, 6, QtGui.QTableWidgetItem(str(v_max)))
self.data_table.setItem(i, 7, QtGui.QTableWidgetItem(str(c_avg)))
self.data_table.setItem(i, 8, QtGui.QTableWidgetItem(str(c_min)))
self.data_table.setItem(i, 9, QtGui.QTableWidgetItem(str(c_max)))
i += 1
for j, group in enumerate(self.parent.groups_buf):
power = group['power'][:, 1]
gp_avg = power.mean()
gp_min = power.min()
gp_max = power.max()
self.data_table.setItem(i + j, 1, QtGui.QTableWidgetItem(str(gp_avg)))
self.data_table.setItem(i + j, 2, QtGui.QTableWidgetItem(str(gp_min)))
self.data_table.setItem(i + j, 3, QtGui.QTableWidgetItem(str(gp_max)))
class GUI(QtWidgets.QMainWindow):
def __init__(self, board, args, parent=None):
super(GUI, self).__init__(parent)
self.b = board
self.args = args
self.rail_buf = []
self.groups_buf = []
self.list_rails_p = []
self.list_groups_p = []
self.list_rails_v = []
self.list_rails_c = []
self.list_rails_label = []
self.list_groups_label = []
self.list_menu = []
self.list_menu_g = []
self.list_switch_res = []
self.list_color_rails = []
self.list_color_groups = []
self.list_right_lay_n = []
self.list_right_lay_group_n = []
self.list_right_lay_p = []
self.list_right_lay_group_p = []
self.list_right_lay_v = []
self.list_right_lay_c = []
self.state = 'start'
self.central_widget = QtGui.QWidget()
self.timer = pg.QtCore.QTimer()
self.menu_bar = self.menuBar()
self.winmenu = QtGui.QMenu()
self.status_bar = self.statusBar()
self.spacer = QtGui.QWidget()
self.wid_rail_scrollbar = QtGui.QWidget()
self.rail_scrollbar = QtWidgets.QScrollArea()
self.global_lay = QtGui.QHBoxLayout()
self.button_lay = QtGui.QGridLayout()
self.group_lay = QtGui.QGridLayout()
self.left_lay = QtGui.QVBoxLayout()
self.plot_lay = QtGui.QGridLayout()
self.right_lay = QtGui.QVBoxLayout()
self.right_lay_group = QtGui.QGridLayout()
self.right_lay_rail = QtGui.QGridLayout()
self.global_graph = pg.PlotWidget(title='Main Window')
self.global_graph_vb = pg.ViewBox()
self.global_graph_pi = self.global_graph.plotItem
self.zoom_graph = pg.PlotWidget(title='Zoom Area')
self.zoom_graph_vb = pg.ViewBox()
self.zoom_graph_pi = self.zoom_graph.plotItem
self.zoom_region = pg.LinearRegionItem()
self.stop_region = []
self.stop = 0
self.resume = 0
self.resize(WINDOW_WIDTH, WINDOW_HEIGHT)
self.thread_data = QtCore.QThread(parent=self)
self.worker = Worker(self.b)
signal.signal(signal.SIGINT, self.sigint_handler)
self.start_setup()
def sigint_handler(self, *args):
"""displays a message box if the GUI is exit with CTRL+C command"""
if QtGui.QMessageBox.question(None, '', "Are you sure you want to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
self.worker.resume_thread()
drv_ftdi.FLAG_UI_STOP = True
self.thread_data.quit()
self.thread_data.wait()
QtGui.QApplication.quit()
def closeEvent(self, event):
"""function called when app is quit by clicking the red cross"""
self.worker.resume_thread()
drv_ftdi.FLAG_UI_STOP = True
self.thread_data.quit()
self.thread_data.wait()
QtGui.QApplication.quit()
def mousemoved_zoom_graph(self, evt):
"""gets mouse pointer values in zoom graph and updates window values"""
pos = evt[0]
mousepoint = self.zoom_graph.getPlotItem().getViewBox().mapSceneToView(pos)
time_coor = mousepoint.x()
self.mouse_pointer_window.update_data(time_coor)
def mousemove_global_graph(self, evt):
"""gets mouse pointer values in global graph and updates window values"""
pos = evt[0]
mousepoint = self.global_graph.getPlotItem().getViewBox().mapSceneToView(pos)
time_coor = mousepoint.x()
self.mouse_pointer_window.update_data(time_coor)
def global_update(self):
"""stores in local buffer the shared variable with measured values"""
drv_ftdi.DATA_LOCK.acquire()
self.rail_buf = copy.deepcopy(self.b.data_buf)
drv_ftdi.DATA_LOCK.release()
self.get_power_group()
self.traces_update()
def get_power_group(self):
"""retrieves power groups data"""
self.groups_buf = []
for i, group in enumerate(self.b.power_groups):
self.groups_buf.append({'group_name': group['name'], 'power': np.zeros([1, 2], dtype=np.float16)})
power_group = np.zeros([1, 2], dtype=np.float16)
for rail_group in group['rails']:
rail = next((item for item in self.rail_buf if item['railnumber'] == rail_group), None)
power_rail = np.empty_like(rail['voltage'][1:])
power_rail[:, 0] = rail['voltage'][1:, 0]
power_rail[:, 1] = (rail['voltage'][1:, 1] * rail['current'][1:, 1])
if power_group.shape[0] > power_rail.shape[0]:
power_group.resize(power_rail.shape)
elif power_rail.shape[0] - power_group.shape[0] <= 2:
power_rail.resize(power_group.shape)
power_group = power_group + power_rail
power_group[:, 0] = power_rail[:, 0]
self.groups_buf[i]['power'] = power_group
def traces_update(self):
"""updates global / zoom plot and updates values"""
self.zoom_graph.clear()
self.zoom_graph_vb.clear()
self.global_graph.clear()
self.global_graph_vb.clear()
self.zoom_graph.blockSignals(True)
self.zoom_region.blockSignals(True)
self.global_graph.blockSignals(True)
for i, d_rail in enumerate(self.b.rails_to_display):
rail = next((item for item in self.rail_buf if item['railnumber'] == d_rail['name']), None)
voltage = rail['voltage'][1:]
current = rail['current'][1:]
power = np.empty_like(voltage)
power[:, 0] = voltage[:, 0]
power[:, 1] = voltage[:, 1] * current[:, 1]
if self.list_rails_p[i].isChecked():
if power.shape[0] > 2:
self.global_graph.plot(power, pen=COLORS[i])
self.zoom_graph.plot(power, pen=COLORS[i])
if self.list_rails_v[i].isChecked():
if voltage.shape[0] > 2:
self.global_graph_vb.addItem(pg.PlotCurveItem(voltage[:, 0], voltage[:, 1],
pen=pg.mkPen(COLORS[i], width=2,
style=QtCore.Qt.DashDotDotLine)))
self.zoom_graph_vb.addItem(pg.PlotCurveItem(voltage[:, 0], voltage[:, 1],
pen=pg.mkPen(COLORS[i], width=2,
style=QtCore.Qt.DashDotDotLine)))
if self.list_rails_c[i].isChecked():
if current.shape[0] > 2:
self.global_graph.plot(current, pen=pg.mkPen(COLORS[i], style=QtCore.Qt.DotLine))
self.zoom_graph.plot(current, pen=pg.mkPen(COLORS[i], style=QtCore.Qt.DotLine))
for j, group in enumerate(self.groups_buf):
if self.list_groups_p[j].isChecked():
if group['power'].shape[0] > 2:
self.global_graph.plot(group['power'], pen=pg.mkPen(GROUPS_COLORS[j], width=3))
self.zoom_graph.plot(group['power'], pen=pg.mkPen(GROUPS_COLORS[j], width=3))
if self.timer.isActive():
time_len = self.rail_buf[0]['voltage'][-1, 0]
if time_len < 2:
minx = 0
maxx = time_len
else:
minx = time_len - 2
maxx = time_len
self.zoom_region.setRegion((minx, maxx))
self.zoom_graph.setXRange(minx, maxx, padding=0)
self.global_graph.enableAutoRange('x')
self.global_graph.addItem(self.zoom_region, ignoreBounds=True)
for reg in self.stop_region:
self.global_graph.addItem(reg, ignoreBounds=True)
self.zoom_graph.blockSignals(False)
self.zoom_region.blockSignals(False)
self.global_graph.blockSignals(False)
self.global_data_window.update_data()
self.update_zoom_data()
self.update_right_lay_data()
def update_right_lay_data(self):
"""updates average values of plotted rails and groups"""
for j, group in enumerate(self.groups_buf):
p_avg = group['power'][:, 1].mean()
self.list_right_lay_group_p[j].setText(str("%.2f" % p_avg))
for i, d_rail in enumerate(self.b.rails_to_display):
if self.list_rails_p[i].isChecked() or self.list_rails_v[i].isChecked() or self.list_rails_c[i].isChecked():
rail = next((item for item in self.rail_buf if item['railnumber'] == d_rail['name']), None)
voltage = rail['voltage'][1:, 1]
current = rail['current'][1:, 1]
power = voltage * current
p_avg = power.mean()
v_avg = voltage.mean()
c_avg = current.mean()
self.list_right_lay_p[i].setText(str("%.2f" % p_avg))
self.list_right_lay_v[i].setText(str("%.2f" % v_avg))
self.list_right_lay_c[i].setText(str("%.2f" % c_avg))
self.list_right_lay_n[i].setStyleSheet('color: black')
self.list_right_lay_p[i].setStyleSheet('color: black')
self.list_right_lay_v[i].setStyleSheet('color: black')
self.list_right_lay_c[i].setStyleSheet('color: black')
else:
self.list_right_lay_n[i].setStyleSheet('color: grey')
self.list_right_lay_p[i].setStyleSheet('color: grey')
self.list_right_lay_v[i].setStyleSheet('color: grey')
self.list_right_lay_c[i].setStyleSheet('color: grey')
def update_zoom_view(self):
"""updates zoom view"""
self.zoom_graph_vb.setGeometry(self.zoom_graph_pi.vb.sceneBoundingRect())
self.zoom_graph_vb.linkedViewChanged(self.zoom_graph_pi.vb, self.zoom_graph_vb.XAxis)
def update_global_view(self):
"""updates global view"""
self.global_graph_vb.setGeometry(self.global_graph_pi.vb.sceneBoundingRect())
self.global_graph_vb.linkedViewChanged(self.global_graph_pi.vb, self.global_graph_vb.XAxis)
def update_zoom_region(self, window, viewrange):
"""updates zoom region if user moove zoom graph"""
self.zoom_region.blockSignals(True)
rgn = viewrange[0]
self.zoom_region.setRegion(rgn)
self.update_zoom_data()
self.zoom_region.blockSignals(False)
def update_zoom_graph(self):
"""updates zoom graph if user moove zoom region"""
self.zoom_graph.blockSignals(True)
self.zoom_region.blockSignals(True)
self.zoom_region.setZValue(10)
minx, maxx = self.zoom_region.getRegion()
self.zoom_graph.setXRange(minx, maxx, padding=0)
self.update_zoom_data()
self.zoom_region.blockSignals(False)
self.zoom_graph.blockSignals(False)
def update_zoom_data(self):
"""updates values in zoom data windows"""
minx, maxx = self.zoom_region.getRegion()
self.zoom_data_window.update_data(minx, maxx)
def g_power_changed(self):
"""signal called when power checkbox state changed"""
if not FLAGS['display_all']:
self.traces_update()
def power_changed(self):
"""signal called when power checkbox state changed"""
if not FLAGS['display_all']:
self.traces_update()
def voltage_changed(self):
"""signal called when voltage checkbox state changed"""
if not FLAGS['display_all']:
self.traces_update()
def current_changed(self):
"""signal called when current checkbox state changed"""
if not FLAGS['display_all']:
self.traces_update()
def switch_res_changed(self, index):
"""switches the resistance and update the corresponding box if it has been correctly done"""
done, autorised = self.b.switch_res(self.b.rails_to_display[index], index)
if autorised:
if done:
curr_state = self.list_switch_res[index].text()
if curr_state == "L":
new_state = "H"
else:
new_state = "L"
self.list_switch_res[index].setText(new_state)
def hide_all_power(self):
"""hides / shows all power rails depending of the checkbox's state"""
FLAGS['display_all'] = True
current_state = self.label_p.isChecked()
for i, rail in enumerate(self.b.rails_to_display):
self.list_rails_p[i].setChecked(current_state)
FLAGS['display_all'] = False
self.traces_update()
def hide_all_voltage(self):
"""hides / shows all voltage rails depending of the checkbox's state"""
FLAGS['display_all'] = True
current_state = self.label_v.isChecked()
for i, rail in enumerate(self.b.rails_to_display):
self.list_rails_v[i].setChecked(current_state)
FLAGS['display_all'] = False
self.traces_update()
def hide_all_current(self):
"""hides / shows all current rails depending of the checkbox's state"""
FLAGS['display_all'] = True
current_state = self.label_c.isChecked()
for i, rail in enumerate(self.b.rails_to_display):
self.list_rails_c[i].setChecked(current_state)
FLAGS['display_all'] = False
self.traces_update()
def change_color(self, index):
"""updates the color of the selected rail"""
COLORS[index] = self.list_color_rails[index].color().name()
self.traces_update()
def change_color_g(self, index):
"""updates the color of the selected group"""
GROUPS_COLORS[index] = self.list_color_groups[index].color().name()
self.traces_update()
def save_pmt(self):
"""saves the capture as binary file with specified name"""
name = QtGui.QFileDialog.getSaveFileName(caption='Save captured data as binary file',
filter="PMT captures .pmt (*.pmt)")
if name[0]:
filename = os.path.splitext(name[0])[0]
filename += '.pmt'
file_out = open(filename, 'wb')
print('Saving to binary file ' + str(filename))
pickle.dump(self.rail_buf, file_out, -1)
pickle.dump(self.groups_buf, file_out, -1)
file_out.close()
print('Done.')
def save_csv(self):
"""saves the capture as csv file with specified name"""
headers = []
data = []
name = QtGui.QFileDialog.getSaveFileName(caption='Save captured data as csv file', filter='csv')
if name[0]:
filename = os.path.splitext(name[0])
if filename:
type_data = ['voltage', 'current', 'power']
type_data_unit = [' (V)', ' (mA)', ' (mW)']
array_size = self.rail_buf[-1]['voltage'].shape[0]
data.append(self.rail_buf[0]['voltage'][1:array_size, 0])
headers.append('Time (ms)')
for d_rail in self.b.rails_to_display:
rail = next((item for item in self.rail_buf if item['railnumber'] == d_rail['name']), None)
for j in range(3):
headers.append(str(d_rail['name'] + " " + type_data[j] + type_data_unit[j]))
if j != 2:
data.append(rail[type_data[j]][1:array_size, 1])
else:
data.append(rail['current'][1:array_size, 1] * rail['voltage'][1:array_size, 1])
if self.b.power_groups:
power_group = np.zeros([1, 2], dtype=np.float16)
for group in self.b.power_groups:
headers.append(group['name'] + ' power (mW)')
for rail_group in group['rails']:
rail = next((item for item in self.rail_buf if item['railnumber'] == rail_group),
None)
power_rail = np.empty_like(rail['voltage'][1:])
power_rail[:, 0] = rail['voltage'][1:, 0]
power_rail[:, 1] = (rail['voltage'][1:, 1] * rail['current'][1:, 1])
if power_group.shape[0] > power_rail.shape[0]:
power_group.resize(power_rail.shape)
elif power_rail.shape[0] - power_group.shape[0] <= 2:
power_rail.resize(power_group.shape)
power_group = power_group + power_rail
data.append(power_group[:, 1])
np.savetxt(filename[0] + ".csv", np.column_stack(data), delimiter=",", header=','.join(headers), fmt='%1.4f', comments='')
print("Saved data in file " + filename[0] + ".csv")
def save_png(self):
"""saves the capture as png picture with specified name"""
name = QtGui.QFileDialog.getSaveFileName(caption='Capture plot picture to (.png) file', filter='png')
if name[0]:
filename = os.path.splitext(name[0])[0]
filename += '.png'
time.sleep(1)
screen = QtGui.QApplication.primaryScreen()
screenshot = screen.grabWindow(self.winId())
screenshot.save(filename, 'png')
print("Saved image to: ", filename)
def display_about(self):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setWindowTitle("About")
msg.setText('Power Measurement Tool ' + PROGRAM_VERSION + '\n' + COPYRIGHT_INFO +'\nLicense: BSD-3-Clause\nContact info: Create issue in PMT Github')
msg.exec()
def start_record(self):
"""starts the timer if the user clicks on start button"""
if self.state != 'pause':
if self.state == 'reinit':
self.stop_region.clear()
drv_ftdi.T_START = time.time()
self.status_bar.showMessage("Recording")
self.worker.resume_thread()
self.timer.start(1000)
self.pause_but.setChecked(False)
self.start_but.setChecked(True)
self.stop_but.setChecked(False)
self.redo_but.setChecked(False)
if self.state == 'stop':
self.resume = time.time() - drv_ftdi.T_START
region = pg.LinearRegionItem(brush=QtGui.QBrush(QtGui.QColor(255, 0, 0, 50)), movable=False)
self.zoom_region.setZValue(10)
region.setRegion((self.stop, self.resume))
self.stop_region.append(region)
self.state = 'start'
else:
self.start_but.setChecked(True)
def stop_record(self):
"""stops the capture and data collection (low-level) if the user clicks on stop button"""
if self.state != 'stop':
self.status_bar.showMessage("Stop Recording")
self.worker.pause_thread()
self.pause_but.setChecked(False)
self.start_but.setChecked(False)
self.stop_but.setChecked(True)
self.redo_but.setChecked(False)
self.timer.stop()
self.state = 'stop'
self.stop = time.time() - drv_ftdi.T_START
else:
self.stop_but.setChecked(True)
def pause_record(self):
"""stops the capture refresh if the user clicks on pause button"""
if self.state != 'stop':
if self.state != 'pause':
self.status_bar.showMessage("Pause Recording")
self.pause_but.setChecked(True)
self.start_but.setChecked(True)
self.stop_but.setChecked(False)
self.redo_but.setChecked(False)
self.timer.stop()
self.state = 'pause'
else:
self.status_bar.showMessage("Recording")
self.timer.start(1000)
self.state = 'start'
else:
self.pause_but.setChecked(False)
def redo_record(self):
"""re initialization of the shared variable containing measured values"""
self.stop_record()
drv_ftdi.DATA_LOCK.acquire()
for rail in self.b.data_buf:
rail['current'] = np.empty([1, 2], dtype=np.float16)
rail['voltage'] = np.empty([1, 2], dtype=np.float16)
drv_ftdi.DATA_LOCK.release()
self.zoom_graph.clear()
self.zoom_graph_vb.clear()
self.global_graph.clear()
self.global_graph_vb.clear()
self.state = 'reinit'
def sh_global_data_window(self):
"""shows / hides global data window if user clicks in Windows menu bar item"""
current_state = self.global_data_window.isVisible()
self.global_data_window.setVisible(not current_state)
if self.global_data_window.isVisible():
self.global_data_window.update_data()
def sh_zoom_data_window(self):
"""shows / hides zoom data window if user clicks in Windows menu bar item"""
current_state = self.zoom_data_window.isVisible()
self.zoom_data_window.setVisible(not current_state)
if self.zoom_data_window.isVisible():
self.update_zoom_data()
def sh_mouse_pointer_data_window(self):
"""shows / hides mouse pointer data window if user clicks in Windows menu bar item"""
current_state = self.mouse_pointer_window.isVisible()
self.mouse_pointer_window.setVisible(not current_state)
if self.mouse_pointer_window.isVisible():
self.proxy1 = pg.SignalProxy(self.zoom_graph.scene().sigMouseMoved, rateLimit=20,
slot=self.mousemoved_zoom_graph)
self.proxy2 = pg.SignalProxy(self.global_graph.scene().sigMouseMoved, rateLimit=20,
slot=self.mousemove_global_graph)
else:
self.proxy1.disconnect()
self.proxy2.disconnect()
def board_reset(self):
"""calls low level function for resetting board"""
print('Not implemented yet')
def board_onoff(self):
"""calls low level function for suspend / resume board"""
print("Not implemented yet")
def hardware_filter(self):
self.b.pac_hw_filter()
def pac_bipolar(self):
self.b.pac_set_bipolar()
def start_setup(self):
"""setup of the application"""
self.setCentralWidget(self.central_widget)
self.centralWidget().setLayout(self.global_lay)
if self.args.load:
self.setWindowTitle('Power Measurement Tool Offline')
print('Reading %s file...' % self.args.load)
if self.args.load.split('.')[1] == 'csv':
with open(self.args.load, mode='r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
self.b.rails_to_display = []
self.b.power_groups = []
first_run = True
for row in csv_reader:
if first_run:
last_el = ''
for r in row[1:]:
if r.split(' ')[0] == last_el:
pass
elif not 'GROUP' in r.split(' ')[0]:
self.b.rails_to_display.append({'name': r.split(' ')[0]})
self.rail_buf.append(
{'railnumber': r.split(' ')[0], 'current': np.empty([1, 2], dtype=np.float16),
'voltage': np.empty([1, 2], dtype=np.float16)})
last_el = r.split(' ')[0]
else:
self.b.power_groups.append({'name': r.split(' ')[0]})
self.groups_buf.append(
{'group_name': r.split(' ')[0], 'power': np.zeros([1, 2], dtype=np.float16)})
last_el = r.split(' ')[0]
first_run = False
else:
ind = 0
for i in range(1, len(self.rail_buf) * 3, 3):
tmp_cur = self.rail_buf[ind]['current']
tmp_volt = self.rail_buf[ind]['voltage']
self.rail_buf[ind]['current'] = np.empty([(self.rail_buf[ind]['current'].shape[0] + 1), 2],
dtype=np.float16)
self.rail_buf[ind]['voltage'] = np.empty([(self.rail_buf[ind]['voltage'].shape[0] + 1), 2],
dtype=np.float16)
self.rail_buf[ind]['current'][:tmp_cur.shape[0]] = tmp_cur
self.rail_buf[ind]['voltage'][:tmp_cur.shape[0]] = tmp_volt
self.rail_buf[ind]['current'][tmp_cur.shape[0]:, 0] = row[0]
self.rail_buf[ind]['voltage'][tmp_cur.shape[0]:, 0] = row[0]
self.rail_buf[ind]['voltage'][tmp_cur.shape[0]:, 1] = row[i]
self.rail_buf[ind]['current'][tmp_cur.shape[0]:, 1] = row[i+1]
ind += 1
ind_g = 0
for i in range(1, len(self.groups_buf) + 1):
tmp_power = self.groups_buf[ind_g]['power']
self.groups_buf[ind_g]['power'] = np.empty(
[(self.groups_buf[ind_g]['power'].shape[0] + 1), 2], dtype=np.float16)
self.groups_buf[ind_g]['power'][:tmp_power.shape[0]] = tmp_power
self.groups_buf[ind_g]['power'][tmp_power.shape[0]:, 0] = row[0]
self.groups_buf[ind_g]['power'][tmp_power.shape[0]:, 1] = row[(ind * 3) + i]
ind_g += 1
elif self.args.load.split('.')[1] == 'pmt':
with open(self.args.load, mode='rb') as pkl_file:
self.rail_buf = pickle.load(pkl_file)
self.b.rails_to_display = []
for rail in self.rail_buf:
self.b.rails_to_display.append({'name': rail['railnumber']})
try:
self.groups_buf = pickle.load(pkl_file)
self.b.power_groups = []
for group in self.groups_buf:
self.b.power_groups.append({'name': group['group_name']})
except EOFError:
self.b.power_groups = []
else:
print('Please enter valid file to load')
if not self.args.load:
self.setWindowTitle('Power Measurement Tool Live Capture')
self.menu_bar.setNativeMenuBar(False)
self.filemenu = self.menu_bar.addMenu('File')
self.save_pmt = self.filemenu.addAction('Save capture as .pmt', self.save_pmt)
self.save_csv = self.filemenu.addAction('Save capture as .csv', self.save_csv)
self.save_png = self.filemenu.addAction('Save capture as .png', self.save_png)
self.exit = self.filemenu.addAction('Exit')
self.exit.triggered.connect(self.closeEvent)
self.settingmenu = self.menu_bar.addMenu('Settings')
self.settingmenu.setToolTipsVisible(True)
if not self.args.load:
self.en_hw_filter = QtWidgets.QAction("Enable PAC hardware filter", self.settingmenu, checkable=True)
self.en_hw_filter.setToolTip("Use the PAC's rolling average of eight most recent measurements")
self.en_bipolar = QtWidgets.QAction("Enable PAC bipolar values", self.settingmenu, checkable=True)
self.en_bipolar.setToolTip("Switch from 0mV -- +100mV range to -100mV -- +100mV")
self.en_hw_filter.setChecked(False)
self.en_bipolar.setChecked(True)
self.en_hw_filter.triggered.connect(self.hardware_filter)
self.en_bipolar.triggered.connect(self.pac_bipolar)
self.settingmenu.addAction(self.en_hw_filter)
self.settingmenu.addAction(self.en_bipolar)
self.winmenu = self.menu_bar.addMenu('Windows')
self.winmenu.addAction("Show / hide Global data window", self.sh_global_data_window)
self.winmenu.addAction("Show / hide Zoom data window", self.sh_zoom_data_window)
self.winmenu.addAction("Show / hide Mouse Pointer data window",self.sh_mouse_pointer_data_window)
self.helpmenu = self.menu_bar.addMenu('Help')
self.about = self.helpmenu.addAction('About PMT', self.display_about)
if not self.args.load:
self.status_bar.showMessage("Recording")
#self.board_controlmenu = self.menu_bar.addMenu('Board Control')
#self.board_controlmenu.addAction("Reset board", self.board_reset)
#self.board_controlmenu.addAction("On / Off board", self.board_onoff)
self.tool_bar = self.addToolBar("tt")
self.start_but = QtWidgets.QPushButton("")
self.start_but.setIcon(QtGui.QIcon('docs/images/record.png'))
self.start_but.setToolTip('Start capture')
self.stop_but = QtWidgets.QPushButton("")
self.stop_but.setIcon(QtGui.QIcon('docs/images/stop.png'))
self.stop_but.setToolTip('Stop capturing data')
self.pause_but = QtWidgets.QPushButton("")
self.pause_but.setIcon(QtGui.QIcon('docs/images/pause.png'))
self.pause_but.setToolTip('Stop monitor refresh')
self.redo_but = QtWidgets.QPushButton("")
self.redo_but.setIcon(QtGui.QIcon('docs/images/trash.png'))
self.redo_but.setToolTip('Re-init capture')
self.start_but.setCheckable(True)
self.pause_but.setCheckable(True)
self.stop_but.setCheckable(True)
self.spacer.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.tool_bar.addWidget(self.spacer)
self.tool_bar.addWidget(self.start_but)
self.tool_bar.addWidget(self.stop_but)
self.tool_bar.addWidget(self.pause_but)
self.tool_bar.addWidget(self.redo_but)
self.start_but.clicked.connect(self.start_record)
self.stop_but.clicked.connect(self.stop_record)
self.pause_but.clicked.connect(self.pause_record)
self.redo_but.clicked.connect(self.redo_record)
self.rail_control = QtGui.QLabel("Rails")
self.rail_control.setFont(QtGui.QFont("Arial", 8, QtGui.QFont.Black))
self.button_lay.setAlignment(QtCore.Qt.AlignTop)
self.label_loc = QtGui.QLabel("Rail name")
self.label_p = QtGui.QCheckBox("P")
self.label_v = QtGui.QCheckBox("V")
self.label_c = QtGui.QCheckBox("I")
self.label_res = QtGui.QLabel("Res")
self.label_p.setChecked(True)
self.label_p.stateChanged.connect(self.hide_all_power)
self.label_v.stateChanged.connect(self.hide_all_voltage)
self.label_c.stateChanged.connect(self.hide_all_current)
self.button_lay.addWidget(self.label_loc, 0, 0)
self.button_lay.addWidget(self.label_p, 0, 2)
self.button_lay.addWidget(self.label_v, 0, 3)
self.button_lay.addWidget(self.label_c, 0, 4)
self.button_lay.addWidget(self.label_res, 0, 5)
for i, rail in enumerate(self.b.rails_to_display):
self.list_rails_label.append(QtGui.QPushButton(rail['name']))
self.list_menu.append(QtGui.QMenu())
if not self.args.load:
if rail['rsense'][0] == rail['rsense'][1]:
self.list_switch_res.append(QtGui.QLabel('X'))
else:
self.switch = self.list_menu[i].addAction("Switch resistance")
self.list_rails_label[i].setMenu(self.list_menu[i])
self.list_switch_res.append(QtGui.QLabel('H'))
self.switch.triggered.connect(lambda init, i=i: self.switch_res_changed(i))
self.button_lay.addWidget(self.list_switch_res[i], i + 1, 5)
self.list_color_rails.append(pg.ColorButton(color=COLORS[i]))
self.list_color_rails[i].setMinimumHeight(30)
self.list_color_rails[i].setMinimumWidth(30)
self.list_rails_p.append(QtGui.QCheckBox())
self.list_rails_p[i].setChecked(True)
self.list_rails_v.append(QtGui.QCheckBox())
self.list_rails_c.append(QtGui.QCheckBox())
self.button_lay.addWidget(self.list_rails_label[i], i + 1, 0)
self.button_lay.addWidget(self.list_color_rails[i], i + 1, 1)
self.button_lay.addWidget(self.list_rails_p[i], i + 1, 2)
self.button_lay.addWidget(self.list_rails_v[i], i + 1, 3)
self.button_lay.addWidget(self.list_rails_c[i], i + 1, 4)
self.list_color_rails[i].sigColorChanged.connect(lambda init, i=i: self.change_color(i))
self.list_rails_p[i].stateChanged.connect(self.power_changed)
self.list_rails_v[i].stateChanged.connect(self.voltage_changed)
self.list_rails_c[i].stateChanged.connect(self.current_changed)
self.list_right_lay_n.append(QtGui.QLabel(rail['name']))
self.list_right_lay_p.append(QtGui.QLabel(""))
self.list_right_lay_v.append(QtGui.QLabel(""))
self.list_right_lay_c.append(QtGui.QLabel(""))
self.right_lay_rail.addWidget(self.list_right_lay_n[i], i + 1, 0)
self.right_lay_rail.addWidget(self.list_right_lay_p[i], i + 1, 1)
self.right_lay_rail.addWidget(self.list_right_lay_v[i], i + 1, 2)
self.right_lay_rail.addWidget(self.list_right_lay_c[i], i + 1, 3)
self.wid_rail_scrollbar.setLayout(self.button_lay)
self.rail_scrollbar.setWidget(self.wid_rail_scrollbar)
self.left_lay.addWidget(self.rail_control)
self.left_lay.addWidget(self.rail_scrollbar)
if self.b.power_groups:
self.group_control = QtGui.QLabel("Groups")
self.group_control.setFont(QtGui.QFont("Arial", 8, QtGui.QFont.Black))
self.left_lay.addWidget(self.group_control)
for i, group in enumerate(self.b.power_groups):
self.list_groups_label.append(QtGui.QPushButton(group['name']))
self.list_color_groups.append(pg.ColorButton(color=GROUPS_COLORS[i]))
self.list_color_groups[i].setMinimumHeight(30)
self.list_color_groups[i].setMinimumWidth(30)
self.list_menu_g.append(QtGui.QMenu())
self.list_groups_label[i].setMenu(self.list_menu_g[i])
self.list_groups_p.append(QtGui.QCheckBox())
self.list_groups_p[i].setChecked(False)
self.list_groups_p[i].stateChanged.connect(self.g_power_changed)
self.group_lay.addWidget(self.list_groups_label[i], i + 1, 0)
self.group_lay.addWidget(self.list_color_groups[i], i + 1, 1)
self.group_lay.addWidget(self.list_groups_p[i], i + 1, 2)
self.list_color_groups[i].sigColorChanged.connect(lambda init, i=i: self.change_color_g(i))
self.list_right_lay_group_n.append(QtGui.QLabel(group['name']))
self.list_right_lay_group_p.append(QtGui.QLabel(""))
self.right_lay_group.addWidget(self.list_right_lay_group_n[i], i + 1, 0)
self.right_lay_group.addWidget(self.list_right_lay_group_p[i], i + 1, 1)
self.group_lay.setAlignment(QtCore.Qt.AlignTop)
self.left_lay.addLayout(self.group_lay)
self.global_lay.addLayout(self.left_lay)
self.global_graph.setDownsampling(ds=True, auto=True, mode='peak')
self.global_graph.setClipToView(True)
self.global_graph.setMouseEnabled(x=True, y=False)
self.global_graph_pi.showAxis('right')
self.global_graph_pi.scene().addItem(self.global_graph_vb)
self.global_graph_pi.getAxis('right').linkToView(self.global_graph_vb)
self.global_graph_vb.setXLink(self.global_graph_pi)
self.global_graph.setLabels(left='Power (mW) / Current (mA) ', bottom='Time (sec)', right='Voltage (V)')
self.global_graph.addLine(y=0)
self.global_graph.showGrid(x=True, y=True, alpha=0.30)
self.plot_lay.addWidget(self.global_graph, 0, 0)
self.zoom_graph.setDownsampling(ds=True, auto=True, mode='peak')
self.zoom_graph.setClipToView(False)
self.zoom_graph.setMouseEnabled(x=True, y=False)
self.zoom_graph_pi.showAxis('right')
self.zoom_graph_pi.scene().addItem(self.zoom_graph_vb)
self.zoom_graph_pi.getAxis('right').linkToView(self.zoom_graph_vb)
self.zoom_graph_vb.setXLink(self.zoom_graph_pi)
self.zoom_graph.setLabels(left='Power (mW) / Current (mA)', bottom='Time (sec)', right='Voltage (V)')
self.zoom_graph.enableAutoRange('y')
self.zoom_graph.setAutoVisible(y=True)
self.zoom_graph.addLine(y=0)
self.zoom_graph.showGrid(x=True, y=True, alpha=0.30)
self.plot_lay.addWidget(self.zoom_graph, 1, 0)
self.zoom_region.setZValue(10)
self.global_lay.addLayout(self.plot_lay, 2)
self.zoom_graph.sigRangeChanged.connect(self.update_zoom_region)
self.zoom_region.sigRegionChanged.connect(self.update_zoom_graph)
self.zoom_graph_pi.vb.sigResized.connect(self.update_zoom_view)
self.global_graph_pi.vb.sigResized.connect(self.update_global_view)
if self.b.power_groups:
self.right_group = QtGui.QLabel("GROUPS")
self.right_group.setFont(QtGui.QFont("Arial", 8, QtGui.QFont.Black))
self.right_lay_group.addWidget(QtGui.QLabel("Name"), 0, 0)
self.right_lay_group.addWidget(QtGui.QLabel("P_avg"), 0, 1)
self.right_lay.addWidget(self.right_group)
self.right_lay.addLayout(self.right_lay_group)
self.right_rail = QtGui.QLabel("RAILS")
self.right_rail.setFont(QtGui.QFont("Arial", 8, QtGui.QFont.Black))
self.right_lay_rail.addWidget(QtGui.QLabel("Name"), 0, 0)
self.right_lay_rail.addWidget(QtGui.QLabel("P_avg"), 0, 1)
self.right_lay_rail.addWidget(QtGui.QLabel("V_avg"), 0, 2)
self.right_lay_rail.addWidget(QtGui.QLabel("I_avg"), 0, 3)
self.right_lay.addWidget(self.right_rail)
self.right_lay.addLayout(self.right_lay_rail)
self.right_lay.addStretch(1)
self.global_lay.addLayout(self.right_lay)
self.global_data_window = GlobalDataWin(self)
self.zoom_data_window = ZoomDataWin(self)
self.mouse_pointer_window = MPDataWin(self)
self.zoom_data_window.setWindowTitle('Zoom Data Window')
self.mouse_pointer_window.setWindowTitle('Mouse Pointer Data Window')
if not self.args.load:
self.worker.moveToThread(self.thread_data)
self.thread_data.started.connect(self.worker.do_work)
self.thread_data.finished.connect(self.thread_data.deleteLater)
self.thread_data.start()
self.timer.timeout.connect(self.global_update)
self.timer.start(1000)
self.start_but.setChecked(True)
else:
self.traces_update()
def run_ui(board, args):
"""starts the GUI application"""
app = QtGui.QApplication([])
display = GUI(board, args)
display.show()
QtGui.QApplication.instance().exec_()
| [
"pyqtgraph.Qt.QtGui.QVBoxLayout",
"pyqtgraph.Qt.QtGui.QWidget",
"numpy.column_stack",
"pyqtgraph.Qt.QtGui.QDialog",
"time.sleep",
"pyqtgraph.Qt.QtWidgets.QAction",
"pyqtgraph.Qt.QtGui.QApplication",
"pyqtgraph.Qt.QtGui.QFileDialog.getSaveFileName",
"pyqtgraph.mkPen",
"copy.deepcopy",
"pyqtgraph.... | [((2325, 2362), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""w"""'], {}), "('background', 'w')\n", (2343, 2362), True, 'import pyqtgraph as pg\n'), ((2363, 2400), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""foreground"""', '"""k"""'], {}), "('foreground', 'k')\n", (2381, 2400), True, 'import pyqtgraph as pg\n'), ((55694, 55716), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (55712, 55716), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((2490, 2512), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (2508, 2512), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((2536, 2551), 'pyqtgraph.Qt.QtGui.QDialog', 'QtGui.QDialog', ([], {}), '()\n', (2549, 2551), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((2906, 2950), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Power Measurement Tool (PMT)"""'], {}), "('Power Measurement Tool (PMT)')\n", (2918, 2950), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((3216, 3270), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (["(PROGRAM_VERSION + ' - ' + COPYRIGHT_INFO)"], {}), "(PROGRAM_VERSION + ' - ' + COPYRIGHT_INFO)\n", (3228, 3270), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((3480, 3506), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['self.windows'], {}), '(self.windows)\n', (3492, 3506), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((3646, 3664), 'pyqtgraph.QtCore.QTimer', 'pg.QtCore.QTimer', ([], {}), '()\n', (3662, 3664), True, 'import pyqtgraph as pg\n'), ((3726, 3745), 'pyqtgraph.Qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (3743, 3745), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((4170, 4199), 'pyqtgraph.Qt.QtCore.QObject.__init__', 'QtCore.QObject.__init__', (['self'], {}), '(self)\n', (4193, 4199), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((5555, 5574), 'pyqtgraph.Qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (5572, 5574), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((5603, 5625), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Time :"""'], {}), "('Time :')\n", (5615, 5625), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((10073, 10092), 'pyqtgraph.Qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (10090, 10092), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((10121, 10143), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Time :"""'], {}), "('Time :')\n", (10133, 10143), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((13400, 13419), 'pyqtgraph.Qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (13417, 13419), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16387, 16402), 'pyqtgraph.Qt.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (16400, 16402), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16424, 16442), 'pyqtgraph.QtCore.QTimer', 'pg.QtCore.QTimer', ([], {}), '()\n', (16440, 16442), True, 'import pyqtgraph as pg\n'), ((16505, 16518), 'pyqtgraph.Qt.QtGui.QMenu', 'QtGui.QMenu', ([], {}), '()\n', (16516, 16518), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16584, 16599), 'pyqtgraph.Qt.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (16597, 16599), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16634, 16649), 'pyqtgraph.Qt.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (16647, 16649), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16680, 16703), 'pyqtgraph.Qt.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', ([], {}), '()\n', (16701, 16703), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16730, 16749), 'pyqtgraph.Qt.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', ([], {}), '()\n', (16747, 16749), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16776, 16795), 'pyqtgraph.Qt.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (16793, 16795), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16821, 16840), 'pyqtgraph.Qt.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (16838, 16840), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16865, 16884), 'pyqtgraph.Qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (16882, 16884), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16909, 16928), 'pyqtgraph.Qt.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (16926, 16928), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((16954, 16973), 'pyqtgraph.Qt.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (16971, 16973), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((17005, 17024), 'pyqtgraph.Qt.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (17022, 17024), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((17055, 17074), 'pyqtgraph.Qt.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (17072, 17074), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((17103, 17137), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'title': '"""Main Window"""'}), "(title='Main Window')\n", (17116, 17137), True, 'import pyqtgraph as pg\n'), ((17169, 17181), 'pyqtgraph.ViewBox', 'pg.ViewBox', ([], {}), '()\n', (17179, 17181), True, 'import pyqtgraph as pg\n'), ((17266, 17298), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'title': '"""Zoom Area"""'}), "(title='Zoom Area')\n", (17279, 17298), True, 'import pyqtgraph as pg\n'), ((17328, 17340), 'pyqtgraph.ViewBox', 'pg.ViewBox', ([], {}), '()\n', (17338, 17340), True, 'import pyqtgraph as pg\n'), ((17422, 17443), 'pyqtgraph.LinearRegionItem', 'pg.LinearRegionItem', ([], {}), '()\n', (17441, 17443), True, 'import pyqtgraph as pg\n'), ((17596, 17623), 'pyqtgraph.Qt.QtCore.QThread', 'QtCore.QThread', ([], {'parent': 'self'}), '(parent=self)\n', (17610, 17623), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((17669, 17718), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.sigint_handler'], {}), '(signal.SIGINT, self.sigint_handler)\n', (17682, 17718), False, 'import signal\n'), ((18555, 18580), 'pyqtgraph.Qt.QtGui.QApplication.quit', 'QtGui.QApplication.quit', ([], {}), '()\n', (18578, 18580), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((19342, 19370), 'drv_ftdi.DATA_LOCK.acquire', 'drv_ftdi.DATA_LOCK.acquire', ([], {}), '()\n', (19368, 19370), False, 'import drv_ftdi\n'), ((19395, 19425), 'copy.deepcopy', 'copy.deepcopy', (['self.b.data_buf'], {}), '(self.b.data_buf)\n', (19408, 19425), False, 'import copy\n'), ((19434, 19462), 'drv_ftdi.DATA_LOCK.release', 'drv_ftdi.DATA_LOCK.release', ([], {}), '()\n', (19460, 19462), False, 'import drv_ftdi\n'), ((29640, 29759), 'pyqtgraph.Qt.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', ([], {'caption': '"""Save captured data as binary file"""', 'filter': '"""PMT captures .pmt (*.pmt)"""'}), "(caption=\n 'Save captured data as binary file', filter='PMT captures .pmt (*.pmt)')\n", (29673, 29759), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((30318, 30411), 'pyqtgraph.Qt.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', ([], {'caption': '"""Save captured data as csv file"""', 'filter': '"""csv"""'}), "(caption='Save captured data as csv file',\n filter='csv')\n", (30351, 30411), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((32786, 32885), 'pyqtgraph.Qt.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', ([], {'caption': '"""Capture plot picture to (.png) file"""', 'filter': '"""png"""'}), "(caption=\n 'Capture plot picture to (.png) file', filter='png')\n", (32819, 32885), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((33260, 33283), 'pyqtgraph.Qt.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (33281, 33283), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((36028, 36056), 'drv_ftdi.DATA_LOCK.acquire', 'drv_ftdi.DATA_LOCK.acquire', ([], {}), '()\n', (36054, 36056), False, 'import drv_ftdi\n'), ((36232, 36260), 'drv_ftdi.DATA_LOCK.release', 'drv_ftdi.DATA_LOCK.release', ([], {}), '()\n', (36258, 36260), False, 'import drv_ftdi\n'), ((46733, 46754), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Rails"""'], {}), "('Rails')\n", (46745, 46754), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((46916, 46941), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Rail name"""'], {}), "('Rail name')\n", (46928, 46941), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((46965, 46985), 'pyqtgraph.Qt.QtGui.QCheckBox', 'QtGui.QCheckBox', (['"""P"""'], {}), "('P')\n", (46980, 46985), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((47009, 47029), 'pyqtgraph.Qt.QtGui.QCheckBox', 'QtGui.QCheckBox', (['"""V"""'], {}), "('V')\n", (47024, 47029), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((47053, 47073), 'pyqtgraph.Qt.QtGui.QCheckBox', 'QtGui.QCheckBox', (['"""I"""'], {}), "('I')\n", (47068, 47073), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((47099, 47118), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Res"""'], {}), "('Res')\n", (47111, 47118), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54312, 54333), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""RAILS"""'], {}), "('RAILS')\n", (54324, 54333), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((3587, 3623), 'pyqtgraph.Qt.QtGui.QPixmap', 'QtGui.QPixmap', (['"""docs/images/nxp.png"""'], {}), "('docs/images/nxp.png')\n", (3600, 3623), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((17871, 18014), 'pyqtgraph.Qt.QtGui.QMessageBox.question', 'QtGui.QMessageBox.question', (['None', '""""""', '"""Are you sure you want to quit?"""', '(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)', 'QtGui.QMessageBox.No'], {}), "(None, '', 'Are you sure you want to quit?', \n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)\n", (17897, 18014), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((18277, 18302), 'pyqtgraph.Qt.QtGui.QApplication.quit', 'QtGui.QApplication.quit', ([], {}), '()\n', (18300, 18302), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((19819, 19853), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (19827, 19853), True, 'import numpy as np\n'), ((21247, 21269), 'numpy.empty_like', 'np.empty_like', (['voltage'], {}), '(voltage)\n', (21260, 21269), True, 'import numpy as np\n'), ((30023, 30063), 'pickle.dump', 'pickle.dump', (['self.rail_buf', 'file_out', '(-1)'], {}), '(self.rail_buf, file_out, -1)\n', (30034, 30063), False, 'import pickle\n'), ((30076, 30118), 'pickle.dump', 'pickle.dump', (['self.groups_buf', 'file_out', '(-1)'], {}), '(self.groups_buf, file_out, -1)\n', (30087, 30118), False, 'import pickle\n'), ((30451, 30476), 'os.path.splitext', 'os.path.splitext', (['name[0]'], {}), '(name[0])\n', (30467, 30476), False, 'import os\n'), ((32996, 33009), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (33006, 33009), False, 'import time\n'), ((33031, 33065), 'pyqtgraph.Qt.QtGui.QApplication.primaryScreen', 'QtGui.QApplication.primaryScreen', ([], {}), '()\n', (33063, 33065), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((36124, 36158), 'numpy.empty', 'np.empty', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (36132, 36158), True, 'import numpy as np\n'), ((36189, 36223), 'numpy.empty', 'np.empty', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (36197, 36223), True, 'import numpy as np\n'), ((43640, 43726), 'pyqtgraph.Qt.QtWidgets.QAction', 'QtWidgets.QAction', (['"""Enable PAC hardware filter"""', 'self.settingmenu'], {'checkable': '(True)'}), "('Enable PAC hardware filter', self.settingmenu, checkable\n =True)\n", (43657, 43726), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((43860, 43945), 'pyqtgraph.Qt.QtWidgets.QAction', 'QtWidgets.QAction', (['"""Enable PAC bipolar values"""', 'self.settingmenu'], {'checkable': '(True)'}), "('Enable PAC bipolar values', self.settingmenu, checkable=True\n )\n", (43877, 43945), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((45257, 45282), 'pyqtgraph.Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['""""""'], {}), "('')\n", (45278, 45282), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((45440, 45465), 'pyqtgraph.Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['""""""'], {}), "('')\n", (45461, 45465), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((45626, 45651), 'pyqtgraph.Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['""""""'], {}), "('')\n", (45647, 45651), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((45815, 45840), 'pyqtgraph.Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['""""""'], {}), "('')\n", (45836, 45840), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((46789, 46831), 'pyqtgraph.Qt.QtGui.QFont', 'QtGui.QFont', (['"""Arial"""', '(8)', 'QtGui.QFont.Black'], {}), "('Arial', 8, QtGui.QFont.Black)\n", (46800, 46831), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((50331, 50353), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Groups"""'], {}), "('Groups')\n", (50343, 50353), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((53924, 53946), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""GROUPS"""'], {}), "('GROUPS')\n", (53936, 53946), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54366, 54408), 'pyqtgraph.Qt.QtGui.QFont', 'QtGui.QFont', (['"""Arial"""', '(8)', 'QtGui.QFont.Black'], {}), "('Arial', 8, QtGui.QFont.Black)\n", (54377, 54408), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54448, 54468), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Name"""'], {}), "('Name')\n", (54460, 54468), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54514, 54535), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""P_avg"""'], {}), "('P_avg')\n", (54526, 54535), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54581, 54602), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""V_avg"""'], {}), "('V_avg')\n", (54593, 54602), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54648, 54669), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""I_avg"""'], {}), "('I_avg')\n", (54660, 54669), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((55771, 55800), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (55798, 55800), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((5150, 5186), 'pyqtgraph.Qt.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', (["rail['name']"], {}), "(rail['name'])\n", (5172, 5186), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((5394, 5431), 'pyqtgraph.Qt.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', (["group['name']"], {}), "(group['name'])\n", (5416, 5431), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((5496, 5526), 'pyqtgraph.Qt.QtGui.QColor', 'QtGui.QColor', (['(169)', '(169)', '(0)', '(169)'], {}), '(169, 169, 0, 169)\n', (5508, 5526), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((6257, 6279), 'numpy.empty_like', 'np.empty_like', (['voltage'], {}), '(voltage)\n', (6270, 6279), True, 'import numpy as np\n'), ((9668, 9704), 'pyqtgraph.Qt.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', (["rail['name']"], {}), "(rail['name'])\n", (9690, 9704), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((9912, 9949), 'pyqtgraph.Qt.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', (["group['name']"], {}), "(group['name'])\n", (9934, 9949), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((10014, 10044), 'pyqtgraph.Qt.QtGui.QColor', 'QtGui.QColor', (['(169)', '(169)', '(0)', '(169)'], {}), '(169, 169, 0, 169)\n', (10026, 10044), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((10774, 10796), 'numpy.empty_like', 'np.empty_like', (['voltage'], {}), '(voltage)\n', (10787, 10796), True, 'import numpy as np\n'), ((12995, 13031), 'pyqtgraph.Qt.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', (["rail['name']"], {}), "(rail['name'])\n", (13017, 13031), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((13239, 13276), 'pyqtgraph.Qt.QtGui.QTableWidgetItem', 'QtGui.QTableWidgetItem', (["group['name']"], {}), "(group['name'])\n", (13261, 13276), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((13341, 13371), 'pyqtgraph.Qt.QtGui.QColor', 'QtGui.QColor', (['(169)', '(169)', '(0)', '(169)'], {}), '(169, 169, 0, 169)\n', (13353, 13371), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((20033, 20067), 'numpy.empty_like', 'np.empty_like', (["rail['voltage'][1:]"], {}), "(rail['voltage'][1:])\n", (20046, 20067), True, 'import numpy as np\n'), ((29847, 29872), 'os.path.splitext', 'os.path.splitext', (['name[0]'], {}), '(name[0])\n', (29863, 29872), False, 'import os\n'), ((32924, 32949), 'os.path.splitext', 'os.path.splitext', (['name[0]'], {}), '(name[0])\n', (32940, 32949), False, 'import os\n'), ((33796, 33807), 'time.time', 'time.time', ([], {}), '()\n', (33805, 33807), False, 'import time\n'), ((35084, 35095), 'time.time', 'time.time', ([], {}), '()\n', (35093, 35095), False, 'import time\n'), ((45318, 45355), 'pyqtgraph.Qt.QtGui.QIcon', 'QtGui.QIcon', (['"""docs/images/record.png"""'], {}), "('docs/images/record.png')\n", (45329, 45355), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((45500, 45535), 'pyqtgraph.Qt.QtGui.QIcon', 'QtGui.QIcon', (['"""docs/images/stop.png"""'], {}), "('docs/images/stop.png')\n", (45511, 45535), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((45687, 45723), 'pyqtgraph.Qt.QtGui.QIcon', 'QtGui.QIcon', (['"""docs/images/pause.png"""'], {}), "('docs/images/pause.png')\n", (45698, 45723), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((45875, 45911), 'pyqtgraph.Qt.QtGui.QIcon', 'QtGui.QIcon', (['"""docs/images/trash.png"""'], {}), "('docs/images/trash.png')\n", (45886, 45911), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((47726, 47757), 'pyqtgraph.Qt.QtGui.QPushButton', 'QtGui.QPushButton', (["rail['name']"], {}), "(rail['name'])\n", (47743, 47757), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((47793, 47806), 'pyqtgraph.Qt.QtGui.QMenu', 'QtGui.QMenu', ([], {}), '()\n', (47804, 47806), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((48428, 48459), 'pyqtgraph.ColorButton', 'pg.ColorButton', ([], {'color': 'COLORS[i]'}), '(color=COLORS[i])\n', (48442, 48459), True, 'import pyqtgraph as pg\n'), ((48613, 48630), 'pyqtgraph.Qt.QtGui.QCheckBox', 'QtGui.QCheckBox', ([], {}), '()\n', (48628, 48630), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((48719, 48736), 'pyqtgraph.Qt.QtGui.QCheckBox', 'QtGui.QCheckBox', ([], {}), '()\n', (48734, 48736), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((48775, 48792), 'pyqtgraph.Qt.QtGui.QCheckBox', 'QtGui.QCheckBox', ([], {}), '()\n', (48790, 48792), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((49521, 49547), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (["rail['name']"], {}), "(rail['name'])\n", (49533, 49547), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((49590, 49606), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['""""""'], {}), "('')\n", (49602, 49606), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((49649, 49665), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['""""""'], {}), "('')\n", (49661, 49665), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((49708, 49724), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['""""""'], {}), "('')\n", (49720, 49724), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((50393, 50435), 'pyqtgraph.Qt.QtGui.QFont', 'QtGui.QFont', (['"""Arial"""', '(8)', 'QtGui.QFont.Black'], {}), "('Arial', 8, QtGui.QFont.Black)\n", (50404, 50435), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((53984, 54026), 'pyqtgraph.Qt.QtGui.QFont', 'QtGui.QFont', (['"""Arial"""', '(8)', 'QtGui.QFont.Black'], {}), "('Arial', 8, QtGui.QFont.Black)\n", (53995, 54026), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54071, 54091), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""Name"""'], {}), "('Name')\n", (54083, 54091), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((54142, 54163), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""P_avg"""'], {}), "('P_avg')\n", (54154, 54163), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((19756, 19790), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (19764, 19790), True, 'import numpy as np\n'), ((31439, 31473), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (31447, 31473), True, 'import numpy as np\n'), ((32521, 32542), 'numpy.column_stack', 'np.column_stack', (['data'], {}), '(data)\n', (32536, 32542), True, 'import numpy as np\n'), ((34180, 34191), 'time.time', 'time.time', ([], {}), '()\n', (34189, 34191), False, 'import time\n'), ((38693, 38728), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (38703, 38728), False, 'import csv\n'), ((50600, 50632), 'pyqtgraph.Qt.QtGui.QPushButton', 'QtGui.QPushButton', (["group['name']"], {}), "(group['name'])\n", (50617, 50632), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((50680, 50718), 'pyqtgraph.ColorButton', 'pg.ColorButton', ([], {'color': 'GROUPS_COLORS[i]'}), '(color=GROUPS_COLORS[i])\n', (50694, 50718), True, 'import pyqtgraph as pg\n'), ((50885, 50898), 'pyqtgraph.Qt.QtGui.QMenu', 'QtGui.QMenu', ([], {}), '()\n', (50896, 50898), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((51013, 51030), 'pyqtgraph.Qt.QtGui.QCheckBox', 'QtGui.QCheckBox', ([], {}), '()\n', (51028, 51030), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((51558, 51585), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (["group['name']"], {}), "(group['name'])\n", (51570, 51585), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((51638, 51654), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['""""""'], {}), "('')\n", (51650, 51654), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((42226, 42247), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (42237, 42247), False, 'import pickle\n'), ((47950, 47967), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""X"""'], {}), "('X')\n", (47962, 47967), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((48194, 48211), 'pyqtgraph.Qt.QtGui.QLabel', 'QtGui.QLabel', (['"""H"""'], {}), "('H')\n", (48206, 48211), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((22428, 22472), 'pyqtgraph.mkPen', 'pg.mkPen', (['COLORS[i]'], {'style': 'QtCore.Qt.DotLine'}), '(COLORS[i], style=QtCore.Qt.DotLine)\n', (22436, 22472), True, 'import pyqtgraph as pg\n'), ((22528, 22572), 'pyqtgraph.mkPen', 'pg.mkPen', (['COLORS[i]'], {'style': 'QtCore.Qt.DotLine'}), '(COLORS[i], style=QtCore.Qt.DotLine)\n', (22536, 22572), True, 'import pyqtgraph as pg\n'), ((22788, 22823), 'pyqtgraph.mkPen', 'pg.mkPen', (['GROUPS_COLORS[j]'], {'width': '(3)'}), '(GROUPS_COLORS[j], width=3)\n', (22796, 22823), True, 'import pyqtgraph as pg\n'), ((22886, 22921), 'pyqtgraph.mkPen', 'pg.mkPen', (['GROUPS_COLORS[j]'], {'width': '(3)'}), '(GROUPS_COLORS[j], width=3)\n', (22894, 22921), True, 'import pyqtgraph as pg\n'), ((31853, 31887), 'numpy.empty_like', 'np.empty_like', (["rail['voltage'][1:]"], {}), "(rail['voltage'][1:])\n", (31866, 31887), True, 'import numpy as np\n'), ((34275, 34302), 'pyqtgraph.Qt.QtGui.QColor', 'QtGui.QColor', (['(255)', '(0)', '(0)', '(50)'], {}), '(255, 0, 0, 50)\n', (34287, 34302), False, 'from pyqtgraph.Qt import QtGui, QtCore, QtWidgets\n'), ((42496, 42517), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (42507, 42517), False, 'import pickle\n'), ((21838, 21898), 'pyqtgraph.mkPen', 'pg.mkPen', (['COLORS[i]'], {'width': '(2)', 'style': 'QtCore.Qt.DashDotDotLine'}), '(COLORS[i], width=2, style=QtCore.Qt.DashDotDotLine)\n', (21846, 21898), True, 'import pyqtgraph as pg\n'), ((22142, 22202), 'pyqtgraph.mkPen', 'pg.mkPen', (['COLORS[i]'], {'width': '(2)', 'style': 'QtCore.Qt.DashDotDotLine'}), '(COLORS[i], width=2, style=QtCore.Qt.DashDotDotLine)\n', (22150, 22202), True, 'import pyqtgraph as pg\n'), ((40386, 40461), 'numpy.empty', 'np.empty', (["[self.rail_buf[ind]['current'].shape[0] + 1, 2]"], {'dtype': 'np.float16'}), "([self.rail_buf[ind]['current'].shape[0] + 1, 2], dtype=np.float16)\n", (40394, 40461), True, 'import numpy as np\n'), ((40601, 40676), 'numpy.empty', 'np.empty', (["[self.rail_buf[ind]['voltage'].shape[0] + 1, 2]"], {'dtype': 'np.float16'}), "([self.rail_buf[ind]['voltage'].shape[0] + 1, 2], dtype=np.float16)\n", (40609, 40676), True, 'import numpy as np\n'), ((41604, 41681), 'numpy.empty', 'np.empty', (["[self.groups_buf[ind_g]['power'].shape[0] + 1, 2]"], {'dtype': 'np.float16'}), "([self.groups_buf[ind_g]['power'].shape[0] + 1, 2], dtype=np.float16)\n", (41612, 41681), True, 'import numpy as np\n'), ((39436, 39470), 'numpy.empty', 'np.empty', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (39444, 39470), True, 'import numpy as np\n'), ((39524, 39558), 'numpy.empty', 'np.empty', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (39532, 39558), True, 'import numpy as np\n'), ((39892, 39926), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {'dtype': 'np.float16'}), '([1, 2], dtype=np.float16)\n', (39900, 39926), True, 'import numpy as np\n')] |
from __future__ import print_function # Python 2.x
import os
import numpy as np
import pandas as pd
import h5py
import sys
import math
from fnmatch import fnmatch
# helper functions klusta analysis pipeline
def get_param_file(filename,params_folder):
found = False
params = []
for path, subdirs, files in os.walk(params_folder):
for name in files:
for string in filename.split("/"):
if string in name:
found = True
params = name
return found, params
return found, params
def n_user(filename):
#find n-drive username
n_drive_user = 'non_identified'
if 'MosersServer' in filename:
# assuming osx/linux:
idx = [i for i,x in enumerate(filename.split('/')) if x == 'MosersServer'][0]
n_drive_user = filename.split('/')[idx+1]
else:
# assuming windows:
idx = [i for i,x in enumerate(filename.split('/')) if x == 'N:'][0]
n_drive_user = filename.split('/')[idx+1]
return n_drive_user
def create_export_folders(filename):
# create export folders
export_folder = "/".join(filename.split("/")[:-1])+"/KLUSTA/"
if not os.path.exists(export_folder):
os.makedirs(export_folder)
print('Created export folder: {:s}'.format(export_folder))
export_folder_basesession= "/".join(filename.split("/")[:-1])+"/KLUSTA/base_session"
if not os.path.exists(export_folder_basesession):
os.makedirs(export_folder_basesession)
print('Created export_folder_basesession: {:s}'.format(export_folder_basesession))
export_folder_othersessions= "/".join(filename.split("/")[:-1])+"/KLUSTA/other_session"
if not os.path.exists(export_folder_othersessions):
os.makedirs(export_folder_othersessions)
print('Created export_folder_othersessions: {:s}'.format(export_folder_othersessions))
export_folder_othersessions= "/".join(filename.split("/")[:-1])+"/KLUSTA/other_session"
if not os.path.exists(export_folder_othersessions):
os.makedirs(export_folder_othersessions)
print('Created export_folder_othersessions: {:s}'.format(export_folder_othersessions))
export_folder_lfp = "/".join(filename.split("/")[:-1])+"/KLUSTA/LFP"
if not os.path.exists(export_folder_lfp):
os.makedirs(export_folder_lfp)
print('Created export_folder_lfp: {:s}'.format(export_folder_lfp))
return export_folder, export_folder_basesession, export_folder_othersessions, export_folder_lfp
def get_clusters(filename,key=None):
if not key:
sys.stdout.write('No cluster group key given.')
sys.exit()
cluster_group_names = []
key_clusters =[]
with h5py.File(filename, mode="r") as f:
cluster_groups = f['channel_groups/1/cluster_groups/main/'].keys()
for clusterg in cluster_groups:
name = f['channel_groups/1/cluster_groups/main/'+clusterg].attrs.values()
cluster_group_names.append(name[0][0])
for cluster in f['/channel_groups/1/clusters/main'].iteritems():
name = f['channel_groups/1/clusters/main/'+cluster[0]].attrs.get('cluster_group')
if cluster_group_names[int(name)] == key:
key_clusters.append(int(cluster[0]))
print('{} clusters: {}'.format(key,key_clusters))
if not key_clusters:
print('None found :(')
sys.exit() # apparently this is a bad way to abort the execution ... but it does the job.
return key_clusters
def get_clusters_dont_exit(filename,key=None):
'''
Had to add this function to debug stimulus artefacts ... it serves no other purpose...
'''
if not key:
sys.stdout.write('No cluster group key given.')
sys.exit()
cluster_group_names = []
key_clusters =[]
try:
with h5py.File(filename, mode="r") as f:
cluster_groups = f['channel_groups/1/cluster_groups/main/'].keys()
for clusterg in cluster_groups:
name = f['channel_groups/1/cluster_groups/main/'+clusterg].attrs.values()
cluster_group_names.append(name[0][0])
for cluster in f['/channel_groups/1/clusters/main'].iteritems():
name = f['channel_groups/1/clusters/main/'+cluster[0]].attrs.get('cluster_group')
if cluster_group_names[int(name)] == key:
key_clusters.append(int(cluster[0]))
print('{} clusters: {}'.format(key,key_clusters))
if not key_clusters:
print('None found :(')
except KeyError as err:
print('No valid cluster groups found.')
return key_clusters
def get_basenames(filename):
with h5py.File(filename, mode="r") as f:
basenames = f['basenames'][:]
return basenames
def extract_times(filename):
with h5py.File(filename, mode="r") as f:
spiketimes = np.array(f['/channel_groups/1/spikes/time_samples'][:],dtype=float)
sample_rate = float(f['/application_data/spikedetekt'].attrs.get('sample_rate'))
time_stamps = np.array(f['/event_types/sessions/events/time_samples'][:],dtype=float)
# extract the time stamps yet another way ...
time_stamps_sessions = np.cumsum(np.array(f['/time_stamps_sessions'][:], dtype=float))
time_stamps_sessions_sample_rate = 96000. # cannot extract that from hdf5???
print('Extracted spiketimes.')
return spiketimes,sample_rate,time_stamps, time_stamps_sessions, time_stamps_sessions_sample_rate
def extract_input(filename):
with h5py.File(filename, mode="r") as f:
input_data = f['/input/input_data'][:]
sample_rate_inp = f['/input'].attrs.get('sample_rate_inp')
status=True
try:
sample_rate_inp = float(sample_rate_inp)
except TypeError as error:
status=False
if not status:
input_data=np.nan;time_inp=np.nan;time_stamps_sessions_input=np.nan;
sample_rate_inp=np.nan;num_inp_samples=np.nan;duration_inp=np.nan
return input_data,time_inp,time_stamps_sessions_input,sample_rate_inp,num_inp_samples,duration_inp
# process:
time_inp = np.array(input_data['time'],dtype=float)
time_inp = time_inp/sample_rate_inp*1000. # in ms
num_inp_samples = len(input_data)
time_stamps_sessions_input = f['/input/time_stamps_sessions_input'][:]
time_stamps_sessions_input = np.array(time_stamps_sessions_input,dtype=np.float64)
time_stamps_sessions_input = np.cumsum(time_stamps_sessions_input)
time_stamps_sessions_input = time_stamps_sessions_input/sample_rate_inp*1000. # in ms
duration_inp = (time_stamps_sessions_input)[-1] # in ms
print('Extracted input data.')
return input_data,time_inp,time_stamps_sessions_input,sample_rate_inp,num_inp_samples,duration_inp
def extract_waveforms(filename):
# extract waveforms:
kwx_filename = filename[:-5] + '.kwx'
file_kwx = h5py.File(kwx_filename, 'r')
with h5py.File(kwx_filename, mode="r") as f:
waveforms = f['/channel_groups/1/waveforms_raw'][:]
print('Extracted waveforms.')
return waveforms
def extract_positions(filename):
with h5py.File(filename, mode="r") as f:
data_pos = f['/positions/data_pos'][:]
time_stamps_sessions_pos = np.cumsum(np.array(f['/positions/time_stamps_sessions_pos'][:]),dtype=float)
timebase_pos = float(f['/positions/'].attrs.get('timebase_pos'))
print('Extracted tracking data.')
return data_pos, time_stamps_sessions_pos, timebase_pos
#print('Found position samples at {} Hz ({} seconds over {} session(s))'.format(timebase_pos,time_stamps_sessions_pos[-1]/timebase_pos,len(time_stamps_sessions_pos)-1))
def extract_lfp(filename):
with h5py.File(filename, mode="r") as f:
eeg_raw = f['/eeg/data_eeg'][:]
samples_sessions_eeg = f['/eeg/samples_sessions_eeg'][:]
sample_rate_eeg = f['/eeg/'].attrs.get('sample_rate_eeg')
samples_sessions_eeg = np.cumsum(samples_sessions_eeg)
print('Extracted LFP data.')
#print('Found EEG: {} samples at {} Hz ({} s). Separate LFPs recorded: {}'.format(eeg_raw.shape[1],sample_rate_eeg,len_eeg_s,len(eeg_raw)))
return eeg_raw,samples_sessions_eeg,sample_rate_eeg
def eeg_make_df(eeg_raw,data_pos_df,sample_rate_eeg,timebase_pos):
eeg_df = pd.DataFrame()
for eeg_no in xrange(eeg_raw['eeg'].shape[0]):
eeg_df['eeg{}'.format(eeg_no)] = eeg_raw['eeg'][eeg_no]
eeg_df['eeg_mean'] = np.mean(eeg_raw['eeg'],axis=0)
eeg_df['speed'] = np.repeat(data_pos_df['speed_filtered'].values,sample_rate_eeg/timebase_pos)
eeg_df['time'] = eeg_df.index.values.astype(float)/float(sample_rate_eeg) # in sec
eeg_df.set_index('time', drop=True, append=False, inplace=True, verify_integrity=False)
return eeg_df
def sanity_check(spiketimes=None,sample_rate=None,time_stamps=None,time_stamps_sessions=None,time_stamps_sessions_sample_rate=None,
waveforms=None,time_stamps_sessions_pos=None,timebase_pos=None,data_pos=None,time_stamps_sessions_input=None,
samples_sessions_eeg=None,sample_rate_eeg=None):
# performs a check of length of sessions etc.
try:
sys.stdout.write('\nComparing recorded session lengths...')
if time_stamps is not None and sample_rate and time_stamps_sessions is not None and time_stamps_sessions_sample_rate:
length_session1 = float(time_stamps[-1])/sample_rate
length_session2 = float(time_stamps_sessions[-1])/time_stamps_sessions_sample_rate
if length_session1 != length_session2:
sys.stdout.write('\rInconsistency in calculated session lengths.')
sys.exit()
else:
sys.stdout.write('Success.')
else:
sys.stdout.write('\rNo basic session information found.')
except AttributeError:
sys.stdout.write('\rNo basic session information found.')
except TypeError:
sys.stdout.write('\rNo basic session information found.')
try:
sys.stdout.write('\nComparing waveform and spike numbers...')
if waveforms.shape[0] != len(spiketimes): # as many waveforms recorded as spikes?
sys.stdout.write('\rNumber of recorded waveforms does not match length of recorded spikes.')
sys.exit()
else:
sys.stdout.write('Success.')
except AttributeError:
sys.stdout.write('\rNo waveforms loaded.')
except TypeError:
sys.stdout.write('\rNo waveforms loaded.')
try:
sys.stdout.write('\nComparing recorded session lengths and position record...')
if time_stamps_sessions_pos is not None and timebase_pos and time_stamps is not None and sample_rate and data_pos is not None:
if not (time_stamps/sample_rate == time_stamps_sessions_pos/timebase_pos).all():
sys.stdout.write('\rLength of sessions and position file do not match.')
sys.exit()
elif not len(data_pos)/timebase_pos == (time_stamps/sample_rate)[-1]:
sys.stdout.write('\rLength of sessions and position file do not match.')
sys.exit()
else:
sys.stdout.write('Success.')
else:
sys.stdout.write('\rNo position data loaded.')
except AttributeError:
sys.stdout.write('\rNo position data loaded.')
except TypeError:
sys.stdout.write('\rNo position data loaded.')
# input data:
try:
sys.stdout.write('\nComparing recorded session lengths and input data...')
if time_stamps is not None and sample_rate and time_stamps_sessions_input is not None:
time_stamps_session = np.cumsum(time_stamps)/sample_rate
if (time_stamps_session != np.cumsum(time_stamps_sessions_input)/1000).any():
sys.stdout.write('\rInconsistency in calculated session lengths (session vs. input)')
sys.exit()
else:
sys.stdout.write('Success.')
else:
sys.stdout.write('\rNo basic session or input information found! ')
except AttributeError:
sys.stdout.write('\rNo basic session or input information found! ')
sys.exit() # this is fatal ...
except TypeError:
sys.stdout.write('\rNo basic session or input information found! ')
sys.exit() # this is fatal ...
# LFP data:
try:
sys.stdout.write('\nComparing recorded session lengths and LFP data...')
if time_stamps is not None and sample_rate and samples_sessions_eeg is not None and sample_rate_eeg:
if (time_stamps/sample_rate != samples_sessions_eeg/sample_rate_eeg).all():
sys.stdout.write('\rInconsistency in calculated session lengths (session vs. LFP)')
sys.exit()
else:
sys.stdout.write('Success.')
else:
sys.stdout.write('\rNo basic session or LFP information found!')
except AttributeError:
sys.stdout.write('\rNo basic session or LFP information found!')
sys.exit() # this is fatal ...
except TypeError:
sys.stdout.write('\rNo basic session or LFP information found!')
sys.exit() # this is fatal ...
def find_nearest(array,value):
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
return idx-1
else:
return idx
print('Loaded analysis helpers: General')
| [
"numpy.mean",
"os.path.exists",
"numpy.repeat",
"os.makedirs",
"numpy.searchsorted",
"h5py.File",
"numpy.array",
"math.fabs",
"sys.exit",
"pandas.DataFrame",
"numpy.cumsum",
"os.walk",
"sys.stdout.write"
] | [((320, 342), 'os.walk', 'os.walk', (['params_folder'], {}), '(params_folder)\n', (327, 342), False, 'import os\n'), ((6986, 7014), 'h5py.File', 'h5py.File', (['kwx_filename', '"""r"""'], {}), "(kwx_filename, 'r')\n", (6995, 7014), False, 'import h5py\n'), ((8032, 8063), 'numpy.cumsum', 'np.cumsum', (['samples_sessions_eeg'], {}), '(samples_sessions_eeg)\n', (8041, 8063), True, 'import numpy as np\n'), ((8378, 8392), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8390, 8392), True, 'import pandas as pd\n'), ((8533, 8564), 'numpy.mean', 'np.mean', (["eeg_raw['eeg']"], {'axis': '(0)'}), "(eeg_raw['eeg'], axis=0)\n", (8540, 8564), True, 'import numpy as np\n'), ((8586, 8665), 'numpy.repeat', 'np.repeat', (["data_pos_df['speed_filtered'].values", '(sample_rate_eeg / timebase_pos)'], {}), "(data_pos_df['speed_filtered'].values, sample_rate_eeg / timebase_pos)\n", (8595, 8665), True, 'import numpy as np\n'), ((13395, 13437), 'numpy.searchsorted', 'np.searchsorted', (['array', 'value'], {'side': '"""left"""'}), "(array, value, side='left')\n", (13410, 13437), True, 'import numpy as np\n'), ((1207, 1236), 'os.path.exists', 'os.path.exists', (['export_folder'], {}), '(export_folder)\n', (1221, 1236), False, 'import os\n'), ((1246, 1272), 'os.makedirs', 'os.makedirs', (['export_folder'], {}), '(export_folder)\n', (1257, 1272), False, 'import os\n'), ((1441, 1482), 'os.path.exists', 'os.path.exists', (['export_folder_basesession'], {}), '(export_folder_basesession)\n', (1455, 1482), False, 'import os\n'), ((1492, 1530), 'os.makedirs', 'os.makedirs', (['export_folder_basesession'], {}), '(export_folder_basesession)\n', (1503, 1530), False, 'import os\n'), ((1726, 1769), 'os.path.exists', 'os.path.exists', (['export_folder_othersessions'], {}), '(export_folder_othersessions)\n', (1740, 1769), False, 'import os\n'), ((1779, 1819), 'os.makedirs', 'os.makedirs', (['export_folder_othersessions'], {}), '(export_folder_othersessions)\n', (1790, 1819), False, 'import os\n'), ((2019, 2062), 'os.path.exists', 'os.path.exists', (['export_folder_othersessions'], {}), '(export_folder_othersessions)\n', (2033, 2062), False, 'import os\n'), ((2072, 2112), 'os.makedirs', 'os.makedirs', (['export_folder_othersessions'], {}), '(export_folder_othersessions)\n', (2083, 2112), False, 'import os\n'), ((2293, 2326), 'os.path.exists', 'os.path.exists', (['export_folder_lfp'], {}), '(export_folder_lfp)\n', (2307, 2326), False, 'import os\n'), ((2336, 2366), 'os.makedirs', 'os.makedirs', (['export_folder_lfp'], {}), '(export_folder_lfp)\n', (2347, 2366), False, 'import os\n'), ((2605, 2652), 'sys.stdout.write', 'sys.stdout.write', (['"""No cluster group key given."""'], {}), "('No cluster group key given.')\n", (2621, 2652), False, 'import sys\n'), ((2661, 2671), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2669, 2671), False, 'import sys\n'), ((2732, 2761), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (2741, 2761), False, 'import h5py\n'), ((3413, 3423), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3421, 3423), False, 'import sys\n'), ((3707, 3754), 'sys.stdout.write', 'sys.stdout.write', (['"""No cluster group key given."""'], {}), "('No cluster group key given.')\n", (3723, 3754), False, 'import sys\n'), ((3763, 3773), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3771, 3773), False, 'import sys\n'), ((4704, 4733), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (4713, 4733), False, 'import h5py\n'), ((4838, 4867), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (4847, 4867), False, 'import h5py\n'), ((4895, 4963), 'numpy.array', 'np.array', (["f['/channel_groups/1/spikes/time_samples'][:]"], {'dtype': 'float'}), "(f['/channel_groups/1/spikes/time_samples'][:], dtype=float)\n", (4903, 4963), True, 'import numpy as np\n'), ((5074, 5146), 'numpy.array', 'np.array', (["f['/event_types/sessions/events/time_samples'][:]"], {'dtype': 'float'}), "(f['/event_types/sessions/events/time_samples'][:], dtype=float)\n", (5082, 5146), True, 'import numpy as np\n'), ((5557, 5586), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (5566, 5586), False, 'import h5py\n'), ((6185, 6226), 'numpy.array', 'np.array', (["input_data['time']"], {'dtype': 'float'}), "(input_data['time'], dtype=float)\n", (6193, 6226), True, 'import numpy as np\n'), ((6443, 6497), 'numpy.array', 'np.array', (['time_stamps_sessions_input'], {'dtype': 'np.float64'}), '(time_stamps_sessions_input, dtype=np.float64)\n', (6451, 6497), True, 'import numpy as np\n'), ((6534, 6571), 'numpy.cumsum', 'np.cumsum', (['time_stamps_sessions_input'], {}), '(time_stamps_sessions_input)\n', (6543, 6571), True, 'import numpy as np\n'), ((7024, 7057), 'h5py.File', 'h5py.File', (['kwx_filename'], {'mode': '"""r"""'}), "(kwx_filename, mode='r')\n", (7033, 7057), False, 'import h5py\n'), ((7222, 7251), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (7231, 7251), False, 'import h5py\n'), ((7798, 7827), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (7807, 7827), False, 'import h5py\n'), ((9255, 9317), 'sys.stdout.write', 'sys.stdout.write', (['"""\nComparing recorded session lengths..."""'], {}), '("""\nComparing recorded session lengths...""")\n', (9271, 9317), False, 'import sys\n'), ((10108, 10172), 'sys.stdout.write', 'sys.stdout.write', (['"""\nComparing waveform and spike numbers..."""'], {}), '("""\nComparing waveform and spike numbers...""")\n', (10124, 10172), False, 'import sys\n'), ((10612, 10699), 'sys.stdout.write', 'sys.stdout.write', (['"""\nComparing recorded session lengths and position record..."""'], {}), '(\n """\nComparing recorded session lengths and position record...""")\n', (10628, 10699), False, 'import sys\n'), ((11566, 11643), 'sys.stdout.write', 'sys.stdout.write', (['"""\nComparing recorded session lengths and input data..."""'], {}), '("""\nComparing recorded session lengths and input data...""")\n', (11582, 11643), False, 'import sys\n'), ((12527, 12602), 'sys.stdout.write', 'sys.stdout.write', (['"""\nComparing recorded session lengths and LFP data..."""'], {}), '("""\nComparing recorded session lengths and LFP data...""")\n', (12543, 12602), False, 'import sys\n'), ((3847, 3876), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (3856, 3876), False, 'import h5py\n'), ((5242, 5294), 'numpy.array', 'np.array', (["f['/time_stamps_sessions'][:]"], {'dtype': 'float'}), "(f['/time_stamps_sessions'][:], dtype=float)\n", (5250, 5294), True, 'import numpy as np\n'), ((7350, 7403), 'numpy.array', 'np.array', (["f['/positions/time_stamps_sessions_pos'][:]"], {}), "(f['/positions/time_stamps_sessions_pos'][:])\n", (7358, 7403), True, 'import numpy as np\n'), ((9851, 9908), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session information found.'"], {}), "('\\rNo basic session information found.')\n", (9867, 9908), False, 'import sys\n'), ((9944, 10001), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session information found.'"], {}), "('\\rNo basic session information found.')\n", (9960, 10001), False, 'import sys\n'), ((10032, 10089), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session information found.'"], {}), "('\\rNo basic session information found.')\n", (10048, 10089), False, 'import sys\n'), ((10272, 10369), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNumber of recorded waveforms does not match length of recorded spikes.'"], {}), "(\n '\\rNumber of recorded waveforms does not match length of recorded spikes.')\n", (10288, 10369), False, 'import sys\n'), ((10377, 10387), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10385, 10387), False, 'import sys\n'), ((10414, 10442), 'sys.stdout.write', 'sys.stdout.write', (['"""Success."""'], {}), "('Success.')\n", (10430, 10442), False, 'import sys\n'), ((10478, 10520), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo waveforms loaded.'"], {}), "('\\rNo waveforms loaded.')\n", (10494, 10520), False, 'import sys\n'), ((10551, 10593), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo waveforms loaded.'"], {}), "('\\rNo waveforms loaded.')\n", (10567, 10593), False, 'import sys\n'), ((11323, 11369), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo position data loaded.'"], {}), "('\\rNo position data loaded.')\n", (11339, 11369), False, 'import sys\n'), ((11406, 11452), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo position data loaded.'"], {}), "('\\rNo position data loaded.')\n", (11422, 11452), False, 'import sys\n'), ((11483, 11529), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo position data loaded.'"], {}), "('\\rNo position data loaded.')\n", (11499, 11529), False, 'import sys\n'), ((12113, 12188), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session or input information found! '"], {}), "('\\rNo basic session or input information found! ')\n", (12129, 12188), False, 'import sys\n'), ((12225, 12304), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session or input information found! '"], {}), "('\\rNo basic session or input information found! ')\n", (12241, 12304), False, 'import sys\n'), ((12313, 12323), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12321, 12323), False, 'import sys\n'), ((12374, 12453), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session or input information found! '"], {}), "('\\rNo basic session or input information found! ')\n", (12390, 12453), False, 'import sys\n'), ((12462, 12472), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12470, 12472), False, 'import sys\n'), ((13013, 13077), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session or LFP information found!'"], {}), "('\\rNo basic session or LFP information found!')\n", (13029, 13077), False, 'import sys\n'), ((13114, 13178), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session or LFP information found!'"], {}), "('\\rNo basic session or LFP information found!')\n", (13130, 13178), False, 'import sys\n'), ((13187, 13197), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13195, 13197), False, 'import sys\n'), ((13248, 13312), 'sys.stdout.write', 'sys.stdout.write', (["'\\rNo basic session or LFP information found!'"], {}), "('\\rNo basic session or LFP information found!')\n", (13264, 13312), False, 'import sys\n'), ((13321, 13331), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13329, 13331), False, 'import sys\n'), ((9668, 9734), 'sys.stdout.write', 'sys.stdout.write', (["'\\rInconsistency in calculated session lengths.'"], {}), "('\\rInconsistency in calculated session lengths.')\n", (9684, 9734), False, 'import sys\n'), ((9751, 9761), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9759, 9761), False, 'import sys\n'), ((9796, 9824), 'sys.stdout.write', 'sys.stdout.write', (['"""Success."""'], {}), "('Success.')\n", (9812, 9824), False, 'import sys\n'), ((10936, 11008), 'sys.stdout.write', 'sys.stdout.write', (["'\\rLength of sessions and position file do not match.'"], {}), "('\\rLength of sessions and position file do not match.')\n", (10952, 11008), False, 'import sys\n'), ((11025, 11035), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11033, 11035), False, 'import sys\n'), ((11770, 11792), 'numpy.cumsum', 'np.cumsum', (['time_stamps'], {}), '(time_stamps)\n', (11779, 11792), True, 'import numpy as np\n'), ((11911, 12001), 'sys.stdout.write', 'sys.stdout.write', (["'\\rInconsistency in calculated session lengths (session vs. input)'"], {}), "(\n '\\rInconsistency in calculated session lengths (session vs. input)')\n", (11927, 12001), False, 'import sys\n'), ((12013, 12023), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12021, 12023), False, 'import sys\n'), ((12058, 12086), 'sys.stdout.write', 'sys.stdout.write', (['"""Success."""'], {}), "('Success.')\n", (12074, 12086), False, 'import sys\n'), ((12813, 12901), 'sys.stdout.write', 'sys.stdout.write', (["'\\rInconsistency in calculated session lengths (session vs. LFP)'"], {}), "(\n '\\rInconsistency in calculated session lengths (session vs. LFP)')\n", (12829, 12901), False, 'import sys\n'), ((12913, 12923), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12921, 12923), False, 'import sys\n'), ((12958, 12986), 'sys.stdout.write', 'sys.stdout.write', (['"""Success."""'], {}), "('Success.')\n", (12974, 12986), False, 'import sys\n'), ((13479, 13512), 'math.fabs', 'math.fabs', (['(value - array[idx - 1])'], {}), '(value - array[idx - 1])\n', (13488, 13512), False, 'import math\n'), ((13513, 13542), 'math.fabs', 'math.fabs', (['(value - array[idx])'], {}), '(value - array[idx])\n', (13522, 13542), False, 'import math\n'), ((11134, 11206), 'sys.stdout.write', 'sys.stdout.write', (["'\\rLength of sessions and position file do not match.'"], {}), "('\\rLength of sessions and position file do not match.')\n", (11150, 11206), False, 'import sys\n'), ((11223, 11233), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11231, 11233), False, 'import sys\n'), ((11268, 11296), 'sys.stdout.write', 'sys.stdout.write', (['"""Success."""'], {}), "('Success.')\n", (11284, 11296), False, 'import sys\n'), ((11844, 11881), 'numpy.cumsum', 'np.cumsum', (['time_stamps_sessions_input'], {}), '(time_stamps_sessions_input)\n', (11853, 11881), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 12:31:14 2020
@author: nastavirs
"""
import tensorflow as tf
import numpy as np
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32) | [
"numpy.sqrt",
"tensorflow.truncated_normal"
] | [((254, 285), 'numpy.sqrt', 'np.sqrt', (['(2 / (in_dim + out_dim))'], {}), '(2 / (in_dim + out_dim))\n', (261, 285), True, 'import numpy as np\n'), ((312, 372), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_dim, out_dim]'], {'stddev': 'xavier_stddev'}), '([in_dim, out_dim], stddev=xavier_stddev)\n', (331, 372), True, 'import tensorflow as tf\n')] |
import logging
import numpy as np
from scipy.stats import nbinom, poisson, binom
from scipy.special import gamma, factorial, gammaln, logsumexp, hyp2f1, hyp1f1, hyperu, factorial
class CountModel:
error_rate=0.01
class MultiplePoissonModel(CountModel):
def __init__(self, base_lambda, repeat_dist, certain_counts):
self._base_lambda = base_lambda
self._repeat_dist = repeat_dist
self._certain_counts = certain_counts[:, None]
self._n_variants = self._certain_counts.size
self._max_duplicates = self._repeat_dist.shape[1]-1
@staticmethod
def calc_repeat_log_dist_fast(allele_frequencies):
allele_frequencies = np.tile(allele_frequencies, 2)
n_variants, n_duplicates = allele_frequencies.shape
ns = np.arange(n_duplicates)
repeat_dist = np.zeros((n_variants, n_duplicates+1))
repeat_dist[:, 0] = 1
for i, col in enumerate(allele_frequencies.T):
repeat_dist[:, 1:] = (repeat_dist[:, :-1]*col[:, None]+repeat_dist[:, 1:]*(1-col[:, None]))
repeat_dist[:, 0]*=(1-col)
assert np.allclose(repeat_dist.sum(axis=1), 1), repeat_dist.sum(axis=1)
return np.log(repeat_dist)
@classmethod
def from_counts(cls, base_lambda, certain_counts, allele_frequencies):
repeat_dist = cls.calc_repeat_log_dist_fast(allele_frequencies)
return cls(base_lambda, repeat_dist, 2*certain_counts)
def logpmf(self, k, n_copies=1):
assert k.shape == (self._n_variants, ), (k.shape, self._n_variants)
rates = (self._certain_counts + n_copies + np.arange(self._max_duplicates+1)[None, :]+self.error_rate)*self._base_lambda
log_probs = poisson.logpmf(k[:, None], rates)
tot_probs = log_probs+self._repeat_dist
return logsumexp(tot_probs, axis=1)
class NegativeBinomialModel(CountModel):
def __init__(self, base_lambda, r, p, certain_counts):
self._base_lambda = base_lambda
self._r = r[:, None]
self._p = p[:, None]
self._certain_counts = certain_counts[:, None]
@classmethod
def from_counts(cls, base_lambda, p_sum, p_sq_sum, certain_counts):
p_sum = p_sum*2
p_sq_sum = p_sq_sum*2
alpha = (p_sum)**2/(p_sum-p_sq_sum)
beta = p_sum/(base_lambda*(p_sum-p_sq_sum))
return cls(base_lambda, alpha, 1/(1+beta), 2*certain_counts)
def logpmf(self, k, n_copies=1):
k = k[:, None]
mu = (n_copies+self._certain_counts+self.error_rate)*self._base_lambda
r, p = (self._r, self._p)
h = hyperu(r, r + k + 1, mu / p)
invalid = (h==0) | (mu==0) | (p==0)
if np.any(invalid):
print(r[invalid])
print(p[invalid])
print(k[invalid])
print(mu[invalid])
print(h[invalid])
result = -r * np.log(p / (1 - p)) - mu + (r + k) * np.log(mu) - gammaln(k + 1) + np.log(h)
return result.flatten()
class PoissonModel(CountModel):
def __init__(self, base_lambda, expected_count):
self._base_lambda = base_lambda
self._expected_count = expected_count
@classmethod
def from_counts(cls, base_lambda, certain_counts, p_sum):
return cls(base_lambda, (certain_counts+p_sum)*2)
def logpmf(self, k, n_copies=1):
return poisson.logpmf(k, (self._expected_count+n_copies+self.error_rate)*self._base_lambda)
class ComboModel(CountModel):
def __init__(self, models, model_indexes):
self._models = models
self._model_indexes = model_indexes
self._n_variants= self._models[0]._n_variants
def diagnostics(self, idx):
return {"E": self._models[-1]._expected_count}
@classmethod
def from_counts(cls, base_lambda, p_sum, p_sq_sum, do_gamma_calc, certain_counts, allele_frequencies):
models = []
model_indices = np.empty(certain_counts.size, dtype="int")
multi_poisson_mask = ~do_gamma_calc
models.append(
MultiplePoissonModel.from_counts(base_lambda, certain_counts[multi_poisson_mask], allele_frequencies[multi_poisson_mask]))
model_indices[multi_poisson_mask] = 0
nb_mask = do_gamma_calc & (p_sum**2 <= (p_sum-p_sq_sum)*10)
models.append(
NegativeBinomialModel.from_counts(base_lambda, p_sum[nb_mask], p_sq_sum[nb_mask], certain_counts[nb_mask]))
model_indices[nb_mask] = 1
poisson_mask = do_gamma_calc & (~nb_mask)
models.append(
PoissonModel.from_counts(base_lambda, certain_counts[poisson_mask], p_sum[poisson_mask]))
model_indices[poisson_mask] = 2
return cls(models, model_indices)
@classmethod
def from_kmers(cls, kmers, base_lambda=7.5):
max_duplicates = 5
certain_counts = [kmer[0] for kmer in kmers]
p_sums = [np.sum(kmer[1]) for kmer in kmers]
p_sq_sums = [np.sum(np.square(kmer[1])) for kmer in kmers]
allele_frequencies = np.zeros((len(kmers), max_duplicates))
do_gamma_calc = [len(a)>max_duplicates for _, a in kmers]
for i, (_, a) in enumerate(kmers):
n = min(len(a), max_duplicates)
allele_frequencies[i, :n] = a[:n]
return cls.from_counts(7.5, np.array(p_sums), np.array(p_sq_sums),
np.array(do_gamma_calc), np.array(certain_counts), allele_frequencies)
def logpmf(self, k, n_copies=1):
logpmf = np.zeros(k.size)
for i, model in enumerate(self._models):
mask = (self._model_indexes == i)
logpmf[mask] = model.logpmf(k[mask], n_copies)
return logpmf
| [
"scipy.special.hyperu",
"numpy.tile",
"scipy.special.gammaln",
"numpy.log",
"numpy.any",
"numpy.square",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.empty",
"scipy.stats.poisson.logpmf",
"scipy.special.logsumexp",
"numpy.arange"
] | [((676, 706), 'numpy.tile', 'np.tile', (['allele_frequencies', '(2)'], {}), '(allele_frequencies, 2)\n', (683, 706), True, 'import numpy as np\n'), ((780, 803), 'numpy.arange', 'np.arange', (['n_duplicates'], {}), '(n_duplicates)\n', (789, 803), True, 'import numpy as np\n'), ((826, 866), 'numpy.zeros', 'np.zeros', (['(n_variants, n_duplicates + 1)'], {}), '((n_variants, n_duplicates + 1))\n', (834, 866), True, 'import numpy as np\n'), ((1188, 1207), 'numpy.log', 'np.log', (['repeat_dist'], {}), '(repeat_dist)\n', (1194, 1207), True, 'import numpy as np\n'), ((1699, 1732), 'scipy.stats.poisson.logpmf', 'poisson.logpmf', (['k[:, None]', 'rates'], {}), '(k[:, None], rates)\n', (1713, 1732), False, 'from scipy.stats import nbinom, poisson, binom\n'), ((1796, 1824), 'scipy.special.logsumexp', 'logsumexp', (['tot_probs'], {'axis': '(1)'}), '(tot_probs, axis=1)\n', (1805, 1824), False, 'from scipy.special import gamma, factorial, gammaln, logsumexp, hyp2f1, hyp1f1, hyperu, factorial\n'), ((2574, 2602), 'scipy.special.hyperu', 'hyperu', (['r', '(r + k + 1)', '(mu / p)'], {}), '(r, r + k + 1, mu / p)\n', (2580, 2602), False, 'from scipy.special import gamma, factorial, gammaln, logsumexp, hyp2f1, hyp1f1, hyperu, factorial\n'), ((2658, 2673), 'numpy.any', 'np.any', (['invalid'], {}), '(invalid)\n', (2664, 2673), True, 'import numpy as np\n'), ((3322, 3416), 'scipy.stats.poisson.logpmf', 'poisson.logpmf', (['k', '((self._expected_count + n_copies + self.error_rate) * self._base_lambda)'], {}), '(k, (self._expected_count + n_copies + self.error_rate) *\n self._base_lambda)\n', (3336, 3416), False, 'from scipy.stats import nbinom, poisson, binom\n'), ((3870, 3912), 'numpy.empty', 'np.empty', (['certain_counts.size'], {'dtype': '"""int"""'}), "(certain_counts.size, dtype='int')\n", (3878, 3912), True, 'import numpy as np\n'), ((5430, 5446), 'numpy.zeros', 'np.zeros', (['k.size'], {}), '(k.size)\n', (5438, 5446), True, 'import numpy as np\n'), ((2916, 2925), 'numpy.log', 'np.log', (['h'], {}), '(h)\n', (2922, 2925), True, 'import numpy as np\n'), ((4829, 4844), 'numpy.sum', 'np.sum', (['kmer[1]'], {}), '(kmer[1])\n', (4835, 4844), True, 'import numpy as np\n'), ((5234, 5250), 'numpy.array', 'np.array', (['p_sums'], {}), '(p_sums)\n', (5242, 5250), True, 'import numpy as np\n'), ((5252, 5271), 'numpy.array', 'np.array', (['p_sq_sums'], {}), '(p_sq_sums)\n', (5260, 5271), True, 'import numpy as np\n'), ((5304, 5327), 'numpy.array', 'np.array', (['do_gamma_calc'], {}), '(do_gamma_calc)\n', (5312, 5327), True, 'import numpy as np\n'), ((5329, 5353), 'numpy.array', 'np.array', (['certain_counts'], {}), '(certain_counts)\n', (5337, 5353), True, 'import numpy as np\n'), ((2899, 2913), 'scipy.special.gammaln', 'gammaln', (['(k + 1)'], {}), '(k + 1)\n', (2906, 2913), False, 'from scipy.special import gamma, factorial, gammaln, logsumexp, hyp2f1, hyp1f1, hyperu, factorial\n'), ((4892, 4910), 'numpy.square', 'np.square', (['kmer[1]'], {}), '(kmer[1])\n', (4901, 4910), True, 'import numpy as np\n'), ((1601, 1636), 'numpy.arange', 'np.arange', (['(self._max_duplicates + 1)'], {}), '(self._max_duplicates + 1)\n', (1610, 1636), True, 'import numpy as np\n'), ((2886, 2896), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (2892, 2896), True, 'import numpy as np\n'), ((2849, 2868), 'numpy.log', 'np.log', (['(p / (1 - p))'], {}), '(p / (1 - p))\n', (2855, 2868), True, 'import numpy as np\n')] |
import numpy as np
import ds_format as ds
from rstoollib.algorithms import *
def postprocess(d):
"""Postprocess profile (prof) dataset d by calculating derived
variables."""
if 'zg' not in d and 'z' in d and 'lat' in d:
d['zg'] = calc_zg(d['z'], d['lat'])
if 'z' not in d and 'zg' in d and 'lat' in d:
d['z'] = calc_z(d['zg'], d['lat'])
d['theta'] = calc_theta(d['p'], d['ta'])
d['p2'], d['bvf'] = calc_bvf(d['theta'], d['zg'], d['p'])
d['es'] = calc_es(d['ta'])
d['ta_par'] = calc_ta_par(d['p'], d['ta'][0])
if 'ts' in d:
d['llp'] = calc_llp(d['ts'], d['p'], d['theta'])
if 'ua' in d and 'va' in d:
d['wds'] = calc_wds(d['ua'], d['va'])
d['wdd'] = calc_wdd(d['ua'], d['va'])
elif 'wds' in d and 'wdd' in d:
d['ua'] = calc_ua(d['wds'], d['wdd'])
d['va'] = calc_va(d['wds'], d['wdd'])
if 'hus' in d:
ws = calc_w(d['p'], d['es'])
qs = 1./(1./ws + 1)
d['hur'] = 100.*d['hus']/qs
if 'hur' in d:
d['e'] = d['hur']/100.*d['es']
d['p_lcl'] = calc_lclp(d['p'][0], d['e'][0], d['ta'][0])
d['zg_lcl'] = np.interp(d['p_lcl'], d['p'][::-1], d['zg'][::-1])
#d['clp'] = calc_clp(d['p'], d['e'], d['ta'])
#d['cl'] = np.interp(d['clp'], d['p'][::-1], d['zg'][::-1])
d['ta_par_s'] = calc_ta_par_s(d['p'], d['ta'][0], d['e'][0])
if 'ts' in d:
d['ta_surf_par'] = calc_ta_par(d['p'], d['ts'])
d['ta_surf_par_s'] = calc_ta_par_s(d['p'], d['ts'], d['e'][0])
#d['ta_surf_par_x'] = calc_ta_par(d['p'], d['ts'] + 0.5)
#d['ta_surf_par_s_x'] = calc_ta_par_s(d['p'], d['ts'] + 0.5, d['e'][0])
| [
"numpy.interp"
] | [((1065, 1115), 'numpy.interp', 'np.interp', (["d['p_lcl']", "d['p'][::-1]", "d['zg'][::-1]"], {}), "(d['p_lcl'], d['p'][::-1], d['zg'][::-1])\n", (1074, 1115), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import re
import sounddevice as sd
from utils import read_wav, write_wav
import numpy as np
from threading import Thread
import argparse
class DeviceNotFoundError(Exception):
pass
def record_target(file_path, length, fs, channels=2, append=False):
"""Records audio and writes it to a file.
Args:
file_path: Path to output file
length: Audio recording length in samples
fs: Sampling rate
channels: Number of channels in the recording
append: Add track(s) to an existing file? Silence will be added to end of each track to make all equal in
length
Returns:
None
"""
recording = sd.rec(length, samplerate=fs, channels=channels, blocking=True)
recording = np.transpose(recording)
max_gain = 20 * np.log10(np.max(np.abs(recording)))
if append and os.path.isfile(file_path):
# Adding to existing file, read the file
_fs, data = read_wav(file_path, expand=True)
# Zero pad shorter to the length of the longer
if recording.shape[1] > data.shape[1]:
n = recording.shape[1] - data.shape[1]
data = np.pad(data, [(0, 0), (0, n)])
elif data.shape[1] > recording.shape[1]:
recording = np.pad(data, [(0, 0), (0, data.shape[1] - recording.shape[1])])
# Add recording to the end of the existing data
recording = np.vstack([data, recording])
write_wav(file_path, fs, recording)
print(f'Headroom: {-1.0*max_gain:.1f} dB')
def get_host_api_names():
"""Gets names of available host APIs in a list"""
return [hostapi['name'] for hostapi in sd.query_hostapis()]
def get_device(device_name, kind, host_api=None, min_channels=1):
"""Finds device with name, kind and host API
Args:
device_name: Device name
kind: Device type. "input" or "output"
host_api: Host API name
min_channels: Minimum number of channels in the device
Returns:
Device, None if no device was found which satisfies the parameters
"""
if device_name is None:
raise TypeError('Device name is required and cannot be None')
if kind is None:
raise TypeError('Kind is required and cannot be None')
# Available host APIs
host_api_names = get_host_api_names()
for i in range(len(host_api_names)):
host_api_names[i] = host_api_names[i].replace('Windows ', '')
if host_api is not None:
host_api = host_api.replace('Windows ', '')
# Host API check pattern
host_api_pattern = f'({"|".join([re.escape(name) for name in host_api_names])})$'
# Find with the given name
device = None
if re.search(host_api_pattern, device_name):
# Host API in the name, this should return only one device
device = sd.query_devices(device_name, kind=kind)
if device[f'max_{kind}_channels'] < min_channels:
# Channel count not satisfied
raise DeviceNotFoundError(f'Found {kind} device "{device["name"]} {host_api_names[device["hostapi"]]}"" '
f'but minimum number of channels is not satisfied. 1')
elif not re.search(host_api_pattern, device_name) and host_api is not None:
# Host API not specified in the name but host API is given as parameter
try:
# This should give one or zero devices
device = sd.query_devices(f'{device_name} {host_api}', kind=kind)
except ValueError:
# Zero devices
raise DeviceNotFoundError(f'No device found with name "{device_name}" and host API "{host_api}". ')
if device[f'max_{kind}_channels'] < min_channels:
# Channel count not satisfied
raise DeviceNotFoundError(f'Found {kind} device "{device["name"]} {host_api_names[device["hostapi"]]}" '
f'but minimum number of channels is not satisfied.')
else:
# Host API not in the name and host API is not given as parameter
host_api_preference = [x for x in ['DirectSound', 'MME', 'WASAPI'] if x in host_api_names]
for host_api_name in host_api_preference:
# Looping in the order of preference
try:
device = sd.query_devices(f'{device_name} {host_api_name}', kind=kind)
if device[f'max_{kind}_channels'] >= min_channels:
break
else:
device = None
except ValueError:
pass
if device is None:
raise DeviceNotFoundError('Could not find any device which satisfies minimum channel count.')
return device
def get_devices(input_device=None, output_device=None, host_api=None, min_channels=1):
"""Finds input and output devices
Args:
input_device: Input device name. System default is used if not given.
output_device: Output device name. System default is used if not given.
host_api: Host API name
min_channels: Minimum number of output channels that the output device needs to support
Returns:
- Input device object
- Output device object
"""
# Find devices
devices = sd.query_devices()
# Select input device
if input_device is None:
# Not given, use default
input_device = devices[sd.default.device[0]]['name']
input_device = get_device(input_device, 'input', host_api=host_api)
# Select output device
if output_device is None:
# Not given, use default
output_device = devices[sd.default.device[1]]['name']
output_device = get_device(output_device, 'output', host_api=host_api, min_channels=min_channels)
return input_device, output_device
def set_default_devices(input_device, output_device):
"""Sets sounddevice default devices
Args:
input_device: Input device object
output_device: Output device object
Returns:
- Input device name and host API as string
- Output device name and host API as string
"""
host_api_names = get_host_api_names()
input_device_str = f'{input_device["name"]} {host_api_names[input_device["hostapi"]]}'
output_device_str = f'{output_device["name"]} {host_api_names[output_device["hostapi"]]}'
sd.default.device = (input_device_str, output_device_str)
return input_device_str, output_device_str
def play_and_record(
play=None,
record=None,
input_device=None,
output_device=None,
host_api=None,
channels=2,
append=False):
"""Plays one file and records another at the same time
Args:
play: File path to playback file
record: File path to output recording file
input_device: Number of the input device as seen by sounddevice
output_device: Number of the output device as seen by sounddevice
host_api: Host API name
channels: Number of output channels
append: Add track(s) to an existing file? Silence will be added to end of each track to make all equal in
length
Returns:
None
"""
# Create output directory
out_dir, out_file = os.path.split(os.path.abspath(record))
os.makedirs(out_dir, exist_ok=True)
# Read playback file
fs, data = read_wav(play)
n_channels = data.shape[0]
# Find and set devices as default
input_device, output_device = get_devices(
input_device=input_device,
output_device=output_device,
host_api=host_api,
min_channels=n_channels
)
input_device_str, output_device_str = set_default_devices(input_device, output_device)
print(f'Input device: "{input_device_str}"')
print(f'Output device: "{output_device_str}"')
recorder = Thread(
target=record_target,
args=(record, data.shape[1], fs),
kwargs={'channels': channels, 'append': append}
)
recorder.start()
sd.play(np.transpose(data), samplerate=fs, blocking=True)
def create_cli():
"""Create command line interface
Returns:
Parsed CLI arguments
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--play', type=str, required=True, help='File path to WAV file to play.')
arg_parser.add_argument('--record', type=str, required=True,
help='File path to write the recording. This must have ".wav" extension and be either'
'"headphones.wav" or any combination of supported speaker names separated by commas '
'eg. FL,FC,FR.wav to be recognized by Impulcifer as a recording file. It\'s '
'convenient to point the file path directly to the recording directory such as '
'"data\\my_hrir\\FL,FR.wav".')
arg_parser.add_argument('--input_device', type=str, default=argparse.SUPPRESS,
help='Name or number of the input device. Use "python -m sounddevice to '
'find out which devices are available. It\'s possible to add host API at the end of '
'the input device name separated by space to specify which host API to use. For '
'example: "Zoom H1n DirectSound".')
arg_parser.add_argument('--output_device', type=str, default=argparse.SUPPRESS,
help='Name or number of the output device. Use "python -m sounddevice to '
'find out which devices are available. It\'s possible to add host API at the end of '
'the output device name separated by space to specify which host API to use. For '
'example: "Zoom H1n WASAPI"')
arg_parser.add_argument('--host_api', type=str, default=argparse.SUPPRESS,
help='Host API name to prefer for input and output devices. Supported options on Windows '
'are: "MME", "DirectSound" and "WASAPI". This is used when input and '
'output devices have not been specified (using system defaults) or if they have no '
'host API specified.')
arg_parser.add_argument('--channels', type=int, default=2, help='Number of output channels.')
arg_parser.add_argument('--append', action='store_true',
help='Add track(s) to existing file? Silence will be added to the end of all tracks to '
'make the equal in length.')
args = vars(arg_parser.parse_args())
return args
if __name__ == '__main__':
play_and_record(**create_cli())
| [
"numpy.abs",
"re.escape",
"sounddevice.rec",
"os.makedirs",
"argparse.ArgumentParser",
"sounddevice.query_hostapis",
"utils.read_wav",
"sounddevice.query_devices",
"os.path.isfile",
"utils.write_wav",
"numpy.vstack",
"os.path.abspath",
"threading.Thread",
"numpy.pad",
"numpy.transpose",
... | [((705, 768), 'sounddevice.rec', 'sd.rec', (['length'], {'samplerate': 'fs', 'channels': 'channels', 'blocking': '(True)'}), '(length, samplerate=fs, channels=channels, blocking=True)\n', (711, 768), True, 'import sounddevice as sd\n'), ((785, 808), 'numpy.transpose', 'np.transpose', (['recording'], {}), '(recording)\n', (797, 808), True, 'import numpy as np\n'), ((1461, 1496), 'utils.write_wav', 'write_wav', (['file_path', 'fs', 'recording'], {}), '(file_path, fs, recording)\n', (1470, 1496), False, 'from utils import read_wav, write_wav\n'), ((2707, 2747), 're.search', 're.search', (['host_api_pattern', 'device_name'], {}), '(host_api_pattern, device_name)\n', (2716, 2747), False, 'import re\n'), ((5240, 5258), 'sounddevice.query_devices', 'sd.query_devices', ([], {}), '()\n', (5256, 5258), True, 'import sounddevice as sd\n'), ((7267, 7302), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (7278, 7302), False, 'import os\n'), ((7344, 7358), 'utils.read_wav', 'read_wav', (['play'], {}), '(play)\n', (7352, 7358), False, 'from utils import read_wav, write_wav\n'), ((7822, 7938), 'threading.Thread', 'Thread', ([], {'target': 'record_target', 'args': '(record, data.shape[1], fs)', 'kwargs': "{'channels': channels, 'append': append}"}), "(target=record_target, args=(record, data.shape[1], fs), kwargs={\n 'channels': channels, 'append': append})\n", (7828, 7938), False, 'from threading import Thread\n'), ((8172, 8197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8195, 8197), False, 'import argparse\n'), ((883, 908), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (897, 908), False, 'import os\n'), ((979, 1011), 'utils.read_wav', 'read_wav', (['file_path'], {'expand': '(True)'}), '(file_path, expand=True)\n', (987, 1011), False, 'from utils import read_wav, write_wav\n'), ((1428, 1456), 'numpy.vstack', 'np.vstack', (['[data, recording]'], {}), '([data, recording])\n', (1437, 1456), True, 'import numpy as np\n'), ((2833, 2873), 'sounddevice.query_devices', 'sd.query_devices', (['device_name'], {'kind': 'kind'}), '(device_name, kind=kind)\n', (2849, 2873), True, 'import sounddevice as sd\n'), ((7238, 7261), 'os.path.abspath', 'os.path.abspath', (['record'], {}), '(record)\n', (7253, 7261), False, 'import os\n'), ((7997, 8015), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (8009, 8015), True, 'import numpy as np\n'), ((1184, 1214), 'numpy.pad', 'np.pad', (['data', '[(0, 0), (0, n)]'], {}), '(data, [(0, 0), (0, n)])\n', (1190, 1214), True, 'import numpy as np\n'), ((1669, 1688), 'sounddevice.query_hostapis', 'sd.query_hostapis', ([], {}), '()\n', (1686, 1688), True, 'import sounddevice as sd\n'), ((845, 862), 'numpy.abs', 'np.abs', (['recording'], {}), '(recording)\n', (851, 862), True, 'import numpy as np\n'), ((1288, 1351), 'numpy.pad', 'np.pad', (['data', '[(0, 0), (0, data.shape[1] - recording.shape[1])]'], {}), '(data, [(0, 0), (0, data.shape[1] - recording.shape[1])])\n', (1294, 1351), True, 'import numpy as np\n'), ((3198, 3238), 're.search', 're.search', (['host_api_pattern', 'device_name'], {}), '(host_api_pattern, device_name)\n', (3207, 3238), False, 'import re\n'), ((3430, 3486), 'sounddevice.query_devices', 'sd.query_devices', (['f"""{device_name} {host_api}"""'], {'kind': 'kind'}), "(f'{device_name} {host_api}', kind=kind)\n", (3446, 3486), True, 'import sounddevice as sd\n'), ((2601, 2616), 're.escape', 're.escape', (['name'], {}), '(name)\n', (2610, 2616), False, 'import re\n'), ((4285, 4346), 'sounddevice.query_devices', 'sd.query_devices', (['f"""{device_name} {host_api_name}"""'], {'kind': 'kind'}), "(f'{device_name} {host_api_name}', kind=kind)\n", (4301, 4346), True, 'import sounddevice as sd\n')] |
import pickle
from os.path import join
import numpy as np
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import print_stats
file_path = '/data2/mengtial/Exp/ArgoVerse1.1/output/rt_htc_dconv2_ms_nm_s1.0/val/time_info.pkl'
time_info = pickle.load(open(file_path, 'rb'))
runtime_all_np = np.array(time_info['runtime_all'])
s2ms = lambda x: 1e3*x
# print_stats(det1_runtime, 'det1 (ms)', cvt=s2ms)
# print_stats(det2_runtime, 'det2 (ms)', cvt=s2ms)
print_stats(runtime_all_np, 'overall (ms)', cvt=s2ms)
# file_path = '/data2/mengtial/Exp/ArgoVerse-pgt-gpupre/output/rt_dat_mrcnn50_nm_d15_s0.5/val/time_info.pkl'
# time_info = pickle.load(open(file_path, 'rb'))
# runtime_all_np = np.array(time_info['runtime_all'])
# which_cfg = np.array(time_info['which_cfg_all'], np.bool)
# det1_runtime = runtime_all_np[np.logical_not(which_cfg)]
# det2_runtime = runtime_all_np[which_cfg]
# s2ms = lambda x: 1e3*x
# print_stats(det1_runtime, 'det1 (ms)', cvt=s2ms)
# print_stats(det2_runtime, 'det2 (ms)', cvt=s2ms)
# print_stats(runtime_all_np, 'overall (ms)', cvt=s2ms)
# print('Init (ms): mean: %g; std: %g; min: %g; max: %g' % (
# runtime_all_np.mean(),
# runtime_all_np.std(ddof=1),
# runtime_all_np.min(),
# runtime_all_np.max(),
# ))
# aa = pickle.load(open('/data2/mengtial/Exp/ArgoVerse/output/cv2csrdcf_mrcnn50_d10/s1_val/time_tracker.pkl', 'rb'))
# runtime_all_np = 1e3*np.array(aa['rt_tracker_init'])[100:10101]
# print(len(runtime_all_np))
# print('Init (ms): mean: %g; std: %g; min: %g; max: %g' % (
# runtime_all_np.mean(),
# runtime_all_np.std(ddof=1),
# runtime_all_np.min(),
# runtime_all_np.max(),
# ))
# runtime_all_np = 1e3*np.array(aa['rt_tracker_update'])[100:10101]
# print('Update (ms): mean: %g; std: %g; min: %g; max: %g' % (
# runtime_all_np.mean(),
# runtime_all_np.std(ddof=1),
# runtime_all_np.min(),
# runtime_all_np.max(),
# ))
# status = np.array(aa['tracker_status'])
# print('%d; %g' % (len(status), status.mean()))
| [
"numpy.array",
"sys.path.insert",
"util.print_stats"
] | [((72, 96), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (87, 96), False, 'import sys\n'), ((98, 121), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (113, 121), False, 'import sys\n'), ((314, 348), 'numpy.array', 'np.array', (["time_info['runtime_all']"], {}), "(time_info['runtime_all'])\n", (322, 348), True, 'import numpy as np\n'), ((475, 528), 'util.print_stats', 'print_stats', (['runtime_all_np', '"""overall (ms)"""'], {'cvt': 's2ms'}), "(runtime_all_np, 'overall (ms)', cvt=s2ms)\n", (486, 528), False, 'from util import print_stats\n')] |
"""
Test for fake_data_generator.py
"""
import numpy as np
from deepchem.utils.fake_data_generator import FakeGraphGenerator, generate_edge_index, remove_self_loops
def test_fake_graph_dataset():
n_graphs = 10
n_node_features = 5
n_edge_features = 3
n_classes = 2
z_shape = 5
# graph-level labels
fgg = FakeGraphGenerator(
min_nodes=3,
max_nodes=10,
n_node_features=n_node_features,
avg_degree=4,
n_edge_features=n_edge_features,
n_classes=n_classes,
task='graph',
z=z_shape)
graphs = fgg.sample(n_graphs=n_graphs)
assert len(graphs) == n_graphs
assert np.unique(graphs.y).shape == (n_classes,)
graph = graphs.X[0]
assert graph.node_features.shape[1] == n_node_features
assert graph.edge_features.shape[1] == n_edge_features
assert graph.z.shape == (1, z_shape)
# node-level labels
fgg = FakeGraphGenerator(
min_nodes=3,
max_nodes=10,
n_node_features=n_node_features,
avg_degree=4,
n_edge_features=n_edge_features,
n_classes=n_classes,
task='node',
z=z_shape)
graphs = fgg.sample(n_graphs=n_graphs)
assert len(graphs) == n_graphs
graph = graphs.X[0]
# graph.y contains node-labels and graph.node_features.shape[0]
# holds number of nodes in that graph
assert graph.y.shape[0] == graph.node_features.shape[0]
assert graph.node_features.shape[1] == n_node_features
assert graph.edge_features.shape[1] == n_edge_features
assert graph.z.shape == (1, z_shape)
def test_generate_edge_index():
n_nodes, avg_degree = 5, 3
edge_indices = generate_edge_index(n_nodes, avg_degree, remove_loops=False)
assert edge_indices.shape[0] == 2
assert edge_indices.shape[1] == n_nodes * avg_degree
def test_remove_self_loops():
edge_indices = np.array([[1, 2, 3], [1, 2, 4]])
edge_indices = remove_self_loops(edge_indices)
assert edge_indices.shape[0] == 2
assert edge_indices.shape[1] == 1
edge_indices = np.ones((2, 3))
edge_indices = remove_self_loops(edge_indices)
assert edge_indices.shape[0] == 2
assert edge_indices.shape[1] == 0
| [
"numpy.ones",
"numpy.unique",
"deepchem.utils.fake_data_generator.FakeGraphGenerator",
"numpy.array",
"deepchem.utils.fake_data_generator.remove_self_loops",
"deepchem.utils.fake_data_generator.generate_edge_index"
] | [((320, 500), 'deepchem.utils.fake_data_generator.FakeGraphGenerator', 'FakeGraphGenerator', ([], {'min_nodes': '(3)', 'max_nodes': '(10)', 'n_node_features': 'n_node_features', 'avg_degree': '(4)', 'n_edge_features': 'n_edge_features', 'n_classes': 'n_classes', 'task': '"""graph"""', 'z': 'z_shape'}), "(min_nodes=3, max_nodes=10, n_node_features=\n n_node_features, avg_degree=4, n_edge_features=n_edge_features,\n n_classes=n_classes, task='graph', z=z_shape)\n", (338, 500), False, 'from deepchem.utils.fake_data_generator import FakeGraphGenerator, generate_edge_index, remove_self_loops\n'), ((874, 1053), 'deepchem.utils.fake_data_generator.FakeGraphGenerator', 'FakeGraphGenerator', ([], {'min_nodes': '(3)', 'max_nodes': '(10)', 'n_node_features': 'n_node_features', 'avg_degree': '(4)', 'n_edge_features': 'n_edge_features', 'n_classes': 'n_classes', 'task': '"""node"""', 'z': 'z_shape'}), "(min_nodes=3, max_nodes=10, n_node_features=\n n_node_features, avg_degree=4, n_edge_features=n_edge_features,\n n_classes=n_classes, task='node', z=z_shape)\n", (892, 1053), False, 'from deepchem.utils.fake_data_generator import FakeGraphGenerator, generate_edge_index, remove_self_loops\n'), ((1589, 1649), 'deepchem.utils.fake_data_generator.generate_edge_index', 'generate_edge_index', (['n_nodes', 'avg_degree'], {'remove_loops': '(False)'}), '(n_nodes, avg_degree, remove_loops=False)\n', (1608, 1649), False, 'from deepchem.utils.fake_data_generator import FakeGraphGenerator, generate_edge_index, remove_self_loops\n'), ((1790, 1822), 'numpy.array', 'np.array', (['[[1, 2, 3], [1, 2, 4]]'], {}), '([[1, 2, 3], [1, 2, 4]])\n', (1798, 1822), True, 'import numpy as np\n'), ((1840, 1871), 'deepchem.utils.fake_data_generator.remove_self_loops', 'remove_self_loops', (['edge_indices'], {}), '(edge_indices)\n', (1857, 1871), False, 'from deepchem.utils.fake_data_generator import FakeGraphGenerator, generate_edge_index, remove_self_loops\n'), ((1962, 1977), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (1969, 1977), True, 'import numpy as np\n'), ((1995, 2026), 'deepchem.utils.fake_data_generator.remove_self_loops', 'remove_self_loops', (['edge_indices'], {}), '(edge_indices)\n', (2012, 2026), False, 'from deepchem.utils.fake_data_generator import FakeGraphGenerator, generate_edge_index, remove_self_loops\n'), ((625, 644), 'numpy.unique', 'np.unique', (['graphs.y'], {}), '(graphs.y)\n', (634, 644), True, 'import numpy as np\n')] |
import datetime
import math
import re
import types
from typing import Any
from git import List
import numpy as np
import torch
from pandas import DataFrame as df
from PIL import Image
from torchvision import transforms
def getprice(img:List[Any], transform:Any, info:int,types:List[Any]):
val={}
for type in types:
v = transform(Image.open(img[type]))
v = v.numpy()
valueX = v[info, :, 0]
valueY = v[info, 0, :]
value = (valueX+valueY)/2
val[type] = min_max(value)
return val, img['date']
def GetSignal(item):
y1 = np.mean(item[162:192])
y2 = np.mean(item[192:222])
return y2/y1
def min_max(x, axis=None):
min = x.min(axis=axis, keepdims=True)
max = x.max(axis=axis, keepdims=True)
result = (x-min)/(max-min+0.0000000000010)
return result
# データフレームから画像を作成
#box_size = 切り取りサイズ
# foot_slide = スライド幅
def imagemake(dfspan1: df,
dfspan2: df,
dfspan3: df,
mode: str,
size: int=64,
slide: int=32
):
wsize = int(math.pow(2, int(math.log2(size))))
a = min_max(np.array([r.high for r in dfspan1]))
b = min_max(np.array([r.high for r in dfspan2]))
c = min_max(np.array([r.high for r in dfspan3]))
d = min_max(np.array([r.low for r in dfspan1]))
e = min_max(np.array([r.low for r in dfspan2]))
f = min_max(np.array([r.low for r in dfspan3]))
m = np.outer(a, d).astype(np.float32)
n = np.outer(b, e).astype(np.float32)
o = np.outer(c, f).astype(np.float32)
m1 = min_max(m[0:wsize, 0:wsize])
m2 = min_max(m[slide:wsize+slide, slide:wsize+slide])
n1 = min_max(n[0:wsize, 0:wsize])
n2 = min_max(n[slide:wsize+slide, slide:wsize+slide])
o1 = min_max(o[0:wsize, 0:wsize])
o2 = min_max(o[slide:wsize+slide, slide:wsize+slide])
te1 = np.stack([m1, n1, o1])
te2 = np.stack([m2, n2, o2])
te3 = np.concatenate([te1, te2], 2) if mode == 'A' \
else np.concatenate([te2, te2], 2)
a = 256/wsize, 256/wsize
te4 = np.kron(te3, np.ones(tuple(map(int,a))))
tmp = torch.from_numpy(te4).clone()
return transforms.ToPILImage(mode='RGB')(tmp)
def getprice2(img, transform):
try:
match = re.search(r'\d{4}-\d{2}-\d{2}', img['fakeB'])
date = datetime.datetime.strptime(match.group(), '%Y-%m-%d').date()
fake = transform(Image.open(img['fakeB']))
real = transform(Image.open(img['realB']))
fake = fake.numpy()
real = real.numpy()
reala = min_max(np.diag(real[0, :, :])) # 対角成分
real1x = min_max(real[0, :, 0])
real1y = min_max(real[0, 0, :])
real2 = (reala+real1x+real1y)/3
fake1a = min_max(np.diag(fake[0, :, :])) # 対角成分
fake1x = min_max(fake[0, :, 0])
fake1y = min_max(fake[0, 0, :])
fake2 = (fake1a+fake1x+fake1y)/3
return min_max(fake2), min_max(real2), img['date']
except:
return 0, 0, 0
| [
"numpy.mean",
"PIL.Image.open",
"torchvision.transforms.ToPILImage",
"math.log2",
"torch.from_numpy",
"numpy.diag",
"numpy.array",
"numpy.stack",
"numpy.outer",
"numpy.concatenate",
"re.search"
] | [((584, 606), 'numpy.mean', 'np.mean', (['item[162:192]'], {}), '(item[162:192])\n', (591, 606), True, 'import numpy as np\n'), ((616, 638), 'numpy.mean', 'np.mean', (['item[192:222]'], {}), '(item[192:222])\n', (623, 638), True, 'import numpy as np\n'), ((1868, 1890), 'numpy.stack', 'np.stack', (['[m1, n1, o1]'], {}), '([m1, n1, o1])\n', (1876, 1890), True, 'import numpy as np\n'), ((1901, 1923), 'numpy.stack', 'np.stack', (['[m2, n2, o2]'], {}), '([m2, n2, o2])\n', (1909, 1923), True, 'import numpy as np\n'), ((1142, 1177), 'numpy.array', 'np.array', (['[r.high for r in dfspan1]'], {}), '([r.high for r in dfspan1])\n', (1150, 1177), True, 'import numpy as np\n'), ((1195, 1230), 'numpy.array', 'np.array', (['[r.high for r in dfspan2]'], {}), '([r.high for r in dfspan2])\n', (1203, 1230), True, 'import numpy as np\n'), ((1248, 1283), 'numpy.array', 'np.array', (['[r.high for r in dfspan3]'], {}), '([r.high for r in dfspan3])\n', (1256, 1283), True, 'import numpy as np\n'), ((1301, 1335), 'numpy.array', 'np.array', (['[r.low for r in dfspan1]'], {}), '([r.low for r in dfspan1])\n', (1309, 1335), True, 'import numpy as np\n'), ((1353, 1387), 'numpy.array', 'np.array', (['[r.low for r in dfspan2]'], {}), '([r.low for r in dfspan2])\n', (1361, 1387), True, 'import numpy as np\n'), ((1405, 1439), 'numpy.array', 'np.array', (['[r.low for r in dfspan3]'], {}), '([r.low for r in dfspan3])\n', (1413, 1439), True, 'import numpy as np\n'), ((1935, 1964), 'numpy.concatenate', 'np.concatenate', (['[te1, te2]', '(2)'], {}), '([te1, te2], 2)\n', (1949, 1964), True, 'import numpy as np\n'), ((1995, 2024), 'numpy.concatenate', 'np.concatenate', (['[te2, te2]', '(2)'], {}), '([te2, te2], 2)\n', (2009, 2024), True, 'import numpy as np\n'), ((2157, 2190), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {'mode': '"""RGB"""'}), "(mode='RGB')\n", (2178, 2190), False, 'from torchvision import transforms\n'), ((2254, 2301), 're.search', 're.search', (['"""\\\\d{4}-\\\\d{2}-\\\\d{2}"""', "img['fakeB']"], {}), "('\\\\d{4}-\\\\d{2}-\\\\d{2}', img['fakeB'])\n", (2263, 2301), False, 'import re\n'), ((347, 368), 'PIL.Image.open', 'Image.open', (['img[type]'], {}), '(img[type])\n', (357, 368), False, 'from PIL import Image\n'), ((1450, 1464), 'numpy.outer', 'np.outer', (['a', 'd'], {}), '(a, d)\n', (1458, 1464), True, 'import numpy as np\n'), ((1492, 1506), 'numpy.outer', 'np.outer', (['b', 'e'], {}), '(b, e)\n', (1500, 1506), True, 'import numpy as np\n'), ((1534, 1548), 'numpy.outer', 'np.outer', (['c', 'f'], {}), '(c, f)\n', (1542, 1548), True, 'import numpy as np\n'), ((2115, 2136), 'torch.from_numpy', 'torch.from_numpy', (['te4'], {}), '(te4)\n', (2131, 2136), False, 'import torch\n'), ((2401, 2425), 'PIL.Image.open', 'Image.open', (["img['fakeB']"], {}), "(img['fakeB'])\n", (2411, 2425), False, 'from PIL import Image\n'), ((2452, 2476), 'PIL.Image.open', 'Image.open', (["img['realB']"], {}), "(img['realB'])\n", (2462, 2476), False, 'from PIL import Image\n'), ((2559, 2581), 'numpy.diag', 'np.diag', (['real[0, :, :]'], {}), '(real[0, :, :])\n', (2566, 2581), True, 'import numpy as np\n'), ((2737, 2759), 'numpy.diag', 'np.diag', (['fake[0, :, :]'], {}), '(fake[0, :, :])\n', (2744, 2759), True, 'import numpy as np\n'), ((1106, 1121), 'math.log2', 'math.log2', (['size'], {}), '(size)\n', (1115, 1121), False, 'import math\n')] |
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests completeness axiom, batching, and error handling for blur_ig."""
import unittest
import unittest.mock as mock
from . import blur_ig
import numpy as np
from scipy.ndimage import gaussian_filter
import tensorflow.compat.v1 as tf
class BlurIgTest(unittest.TestCase):
"""To run: "python -m saliency.tf1.blur_ig_test" top-level saliency directory."""
def setUp(self):
super().setUp()
self.max_sigma = 10
with tf.Graph().as_default() as graph:
self.x = tf.placeholder(shape=[None, 5, 5, 1], dtype=tf.float32)
y = tf.sin(self.x)
y_sum = tf.reduce_sum(y, [1,2,3])
self.gradients_node = tf.gradients(y, self.x)[0]
self.sess = tf.Session(graph=graph)
self.sess_spy = mock.MagicMock(wraps=self.sess)
# All black except 2 pixels near the center.
self.x_input_val = np.array([
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.5, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
dtype=float)
self.x_input_val = self.x_input_val.reshape((5, 5, 1))
# Calculate the value of `y` at the input.
y_input_val = self.sess.run(y, feed_dict={self.x: [self.x_input_val]})
# Baseline is the fully blurred version of the input.
x_baseline_val = gaussian_filter(
self.x_input_val,
sigma=[self.max_sigma, self.max_sigma, 0],
mode='constant')
y_baseline_val = self.sess.run(y, feed_dict={self.x: [x_baseline_val]})
# The expected BlurIG value is equal to the difference between
# the `y` value at the input and the `y` value at the baseline. Because
# each value is independent, we can calculate the expected blur_ig value
# of each.
#
# Expected: [[-0, -0, -0, -0, -0],
# [-0, 0.641, -0, -0, -0],
# [-0, -0, 0.838, -0, -0],
# [-0, -0, -0, -0, -0],
# [-0, -0, -0, -0, -0]
self.expected_val = y_input_val[0] - y_baseline_val[0]
self.blur_ig_instance = blur_ig.BlurIG(graph,
self.sess_spy,
y_sum,
self.x)
def testBlurIGGetMask(self):
"""Tests that BlurIG steps are created and aggregated correctly."""
x_steps = 2000
# Calculate the Blur IG attribution of the input.
mask = self.blur_ig_instance.GetMask(self.x_input_val,
feed_dict=None,
max_sigma=self.max_sigma,
steps=x_steps)
# Because the baseline is blurred, all zero values should still have some
# attribution (introduced noise).
self.assertEqual(np.count_nonzero(mask), mask.size)
# Verify the result (for accuracy and therefore completeness).
np.testing.assert_almost_equal(mask, self.expected_val, decimal=2)
self.assertEqual(self.sess_spy.run.call_count, x_steps)
def testBlurIGGetMaskBatched(self):
"""Tests that multiple BlurIG batches are created and aggregated correctly."""
x_steps = 1001
batch_size = 500
expected_calls = 3 # batch size is 500, ceil(1001/500)=3
self.blur_ig_instance.validate_xy_tensor_shape = mock.MagicMock()
expected_validate_args = (x_steps, batch_size)
mask = self.blur_ig_instance.GetMask(self.x_input_val,
feed_dict=None,
max_sigma=self.max_sigma,
steps=x_steps,
batch_size=batch_size)
validate_args = self.blur_ig_instance.validate_xy_tensor_shape.call_args[0]
# Because the baseline is blurred, all zero values should still have some
# attribution (introduced noise).
self.assertEqual(np.count_nonzero(mask), mask.size)
# Verify the result (for accuracy and therefore completeness).
np.testing.assert_almost_equal(mask, self.expected_val, decimal=2)
self.assertEqual(self.sess_spy.run.call_count, expected_calls)
self.assertEqual(validate_args, expected_validate_args)
def testBlurIGGetMaskSingleBatch(self):
"""Tests that a single BlurIG batch is created and aggregated correctly."""
x_steps = 999
batch_size = 1000
expected_calls = 1 # batch size is 1000, ceil(999/1000)=1
mask = self.blur_ig_instance.GetMask(self.x_input_val,
feed_dict=None,
max_sigma=self.max_sigma,
steps=x_steps,
batch_size=batch_size)
# Because the baseline is blurred, all zero values should still have some
# attribution (introduced noise).
self.assertEqual(np.count_nonzero(mask), mask.size)
# Verify the result (for accuracy and therefore completeness).
np.testing.assert_almost_equal(mask, self.expected_val, decimal=2)
self.assertEqual(self.sess_spy.run.call_count, expected_calls)
def testBlurIGGetMaskArgs(self):
"""Tests that call_model_function receives correct inputs."""
x_steps = 5
feed_dict = {'foo': 'bar'}
self.sess_spy.run.return_value = [self.x_input_val.reshape((1, 5, 5, 1))]
self.blur_ig_instance.GetMask(self.x_input_val,
feed_dict=feed_dict,
max_sigma=self.max_sigma,
steps=x_steps)
actual_feed_dict = self.sess_spy.run.call_args[1]['feed_dict']
self.assertEqual(actual_feed_dict['foo'], feed_dict['foo'])
if __name__ == '__main__':
unittest.main()
| [
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.reduce_sum",
"unittest.mock.MagicMock",
"numpy.count_nonzero",
"numpy.testing.assert_almost_equal",
"numpy.array",
"tensorflow.compat.v1.sin",
"tensorflow.compat.v1.gradients",
"scipy.ndimage.gaussian_filter",
... | [((6359, 6374), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6372, 6374), False, 'import unittest\n'), ((3551, 3617), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['mask', 'self.expected_val'], {'decimal': '(2)'}), '(mask, self.expected_val, decimal=2)\n', (3581, 3617), True, 'import numpy as np\n'), ((3955, 3971), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3969, 3971), True, 'import unittest.mock as mock\n'), ((4651, 4717), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['mask', 'self.expected_val'], {'decimal': '(2)'}), '(mask, self.expected_val, decimal=2)\n', (4681, 4717), True, 'import numpy as np\n'), ((5619, 5685), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['mask', 'self.expected_val'], {'decimal': '(2)'}), '(mask, self.expected_val, decimal=2)\n', (5649, 5685), True, 'import numpy as np\n'), ((1078, 1133), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'shape': '[None, 5, 5, 1]', 'dtype': 'tf.float32'}), '(shape=[None, 5, 5, 1], dtype=tf.float32)\n', (1092, 1133), True, 'import tensorflow.compat.v1 as tf\n'), ((1144, 1158), 'tensorflow.compat.v1.sin', 'tf.sin', (['self.x'], {}), '(self.x)\n', (1150, 1158), True, 'import tensorflow.compat.v1 as tf\n'), ((1173, 1200), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['y', '[1, 2, 3]'], {}), '(y, [1, 2, 3])\n', (1186, 1200), True, 'import tensorflow.compat.v1 as tf\n'), ((1272, 1295), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (1282, 1295), True, 'import tensorflow.compat.v1 as tf\n'), ((1318, 1349), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'wraps': 'self.sess'}), '(wraps=self.sess)\n', (1332, 1349), True, 'import unittest.mock as mock\n'), ((1424, 1591), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.5, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0,\n 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]]'], {'dtype': 'float'}), '([[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.5, 0.0, 0.0, 0.0], [0.0, 0.0, \n 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]],\n dtype=float)\n', (1432, 1591), True, 'import numpy as np\n'), ((1947, 2044), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['self.x_input_val'], {'sigma': '[self.max_sigma, self.max_sigma, 0]', 'mode': '"""constant"""'}), "(self.x_input_val, sigma=[self.max_sigma, self.max_sigma, 0],\n mode='constant')\n", (1962, 2044), False, 'from scipy.ndimage import gaussian_filter\n'), ((3445, 3467), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (3461, 3467), True, 'import numpy as np\n'), ((4545, 4567), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (4561, 4567), True, 'import numpy as np\n'), ((5513, 5535), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (5529, 5535), True, 'import numpy as np\n'), ((1227, 1250), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['y', 'self.x'], {}), '(y, self.x)\n', (1239, 1250), True, 'import tensorflow.compat.v1 as tf\n'), ((1029, 1039), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (1037, 1039), True, 'import tensorflow.compat.v1 as tf\n')] |
import numpy as np
# ==============================================================================
# Funcion que calcula el coeficiente de arrastre de una esfera en caida libre
# Funciona con Reynolds descde 0 hasta m'as all'a de 3e6
# ==============================================================================
def Cd(u, D, nu):
# Calculando el numero de Reynolds
Re = u * D / nu
# Definiendo el Coeficiente de arrastr C_d
if Re == 0:
Cd = 0
elif np.logical_and(Re > 0, Re <= 1):
Cd = 24 / Re
elif np.logical_and(Re > 1, Re <= 400):
Cd = 24 / Re ** 0.646
elif np.logical_and(Re > 400, Re <= 3e5):
Cd = 0.5
elif np.logical_and(Re > 3e5, Re <= 2e6):
Cd = 0.000366 * Re ** 0.4275
else:
Cd = 0.18
return Cd
# ==============================================================================
# Funcion para calcular el paso incremental de RK4.
# Funciona con el codigo de caida libre y es una funcion que es llamada desde
# la siguiente funcion de este archivo
# ==============================================================================
def Fi(A, B, C, CD, x):
vsq = x ** 2
Fi = (1 / A) * (B - C * vsq * CD)
return Fi
# ==============================================================================
# Funcion para definir los valores dvi y dzi. La funcion devuelve dos vectores
# que almacenan los valores de los incrementales
# ==============================================================================
def dv_dz(h, A, B, C, D, CD, V):
# Definiendo el vector donde serán almacenados los valores
dv = np.zeros(4)
dz = np.zeros_like(dv)
# Calculo de incrementos para el metodo RK4
dz[0] = h * V
dv[0] = h * Fi(A, B, C, CD, V)
dz[1] = h * (V + 0.5 * dv[0])
dv[1] = h * (V + 0.5 * dv[0])
dz[2] = h * (V + 0.5 * dv[1])
dv[2] = h * Fi(A, B, C, CD, V + 0.5 * dv[1])
dz[3] = h * (V + dv[2])
dv[3] = h * Fi(A, B, C, CD, V + dv[2])
return dv, dz | [
"numpy.zeros",
"numpy.zeros_like",
"numpy.logical_and"
] | [((1750, 1761), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1758, 1761), True, 'import numpy as np\n'), ((1771, 1788), 'numpy.zeros_like', 'np.zeros_like', (['dv'], {}), '(dv)\n', (1784, 1788), True, 'import numpy as np\n'), ((505, 536), 'numpy.logical_and', 'np.logical_and', (['(Re > 0)', '(Re <= 1)'], {}), '(Re > 0, Re <= 1)\n', (519, 536), True, 'import numpy as np\n'), ((586, 619), 'numpy.logical_and', 'np.logical_and', (['(Re > 1)', '(Re <= 400)'], {}), '(Re > 1, Re <= 400)\n', (600, 619), True, 'import numpy as np\n'), ((678, 718), 'numpy.logical_and', 'np.logical_and', (['(Re > 400)', '(Re <= 300000.0)'], {}), '(Re > 400, Re <= 300000.0)\n', (692, 718), True, 'import numpy as np\n'), ((759, 805), 'numpy.logical_and', 'np.logical_and', (['(Re > 300000.0)', '(Re <= 2000000.0)'], {}), '(Re > 300000.0, Re <= 2000000.0)\n', (773, 805), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
trainmetrics = [-7.827264757844233, -6.539122052193318, -5.46885741580931, -4.860724952141639]
testmetrics = [-7.624574522874662, -6.11743100622369, -5.002220748941359, -4.422560242520135]
trainmetrics = np.round(trainmetrics, decimals=3)
testmetrics = np.round(testmetrics, decimals=3)
print (trainmetrics, testmetrics)
# x = [1,2,3,4]
x = np.linspace(0,1,100)
y = x**2
# plt.plot(x, trainmetrics, color='b', marker='^')
# plt.plot(x, testmetrics, color='r', marker='o')
# plt.xlabel('Log-Scale 10 # Sequences')
# plt.ylabel('Average Log-Likelihood')
# plt.legend(('Train LL', 'Test LL'))
# plt.title('#Sequences vs Avg. Log-Likelihood')
plt.plot(x,x)
plt.plot(x,y)
plt.show() | [
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.round",
"matplotlib.pyplot.show"
] | [((257, 291), 'numpy.round', 'np.round', (['trainmetrics'], {'decimals': '(3)'}), '(trainmetrics, decimals=3)\n', (265, 291), True, 'import numpy as np\n'), ((306, 339), 'numpy.round', 'np.round', (['testmetrics'], {'decimals': '(3)'}), '(testmetrics, decimals=3)\n', (314, 339), True, 'import numpy as np\n'), ((396, 418), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (407, 418), True, 'import numpy as np\n'), ((695, 709), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {}), '(x, x)\n', (703, 709), True, 'import matplotlib.pyplot as plt\n'), ((709, 723), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (717, 723), True, 'import matplotlib.pyplot as plt\n'), ((723, 733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (731, 733), True, 'import matplotlib.pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.